xref: /freebsd/sys/powerpc/booke/locore.S (revision 52c2bb75163559a6e2866ad374a7de67a4ea1273)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.inc"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44#ifdef __powerpc64__
45#define GET_TOCBASE(r)  \
46	mfspr	r, SPR_SPRG8
47#define	TOC_RESTORE	nop
48#define	CMPI	cmpdi
49#define	CMPL	cmpld
50#define	LOAD	ld
51#define	LOADX	ldarx
52#define	STORE	std
53#define	STOREX	stdcx.
54#define	STU	stdu
55#define	CALLSIZE	48
56#define	REDZONE		288
57#define	THREAD_REG	%r13
58#define	ADDR(x)	\
59	.llong	x
60#define	WORD_SIZE	8
61#else
62#define	GET_TOCBASE(r)
63#define	TOC_RESTORE
64#define	CMPI	cmpwi
65#define	CMPL	cmplw
66#define	LOAD	lwz
67#define	LOADX	lwarx
68#define	STOREX	stwcx.
69#define	STORE	stw
70#define	STU	stwu
71#define	CALLSIZE	8
72#define	REDZONE		0
73#define	THREAD_REG	%r2
74#define	ADDR(x)	\
75	.long	x
76#define	WORD_SIZE	4
77#endif
78
79	.text
80	.globl	btext
81btext:
82
83/*
84 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
85 * mark the start of kernel text.
86 */
87	.globl	kernel_text
88kernel_text:
89
90/*
91 * Startup entry.  Note, this must be the first thing in the text segment!
92 */
93	.text
94	.globl	__start
95__start:
96
97/*
98 * Assumptions on the boot loader:
99 *  - System memory starts from physical address 0
100 *  - It's mapped by a single TLB1 entry
101 *  - TLB1 mapping is 1:1 pa to va
102 *  - Kernel is loaded at 64MB boundary
103 *  - All PID registers are set to the same value
104 *  - CPU is running in AS=0
105 *
106 * Registers contents provided by the loader(8):
107 *	r1	: stack pointer
108 *	r3	: metadata pointer
109 *
110 * We rearrange the TLB1 layout as follows:
111 *  - Find TLB1 entry we started in
112 *  - Make sure it's protected, invalidate other entries
113 *  - Create temp entry in the second AS (make sure it's not TLB[1])
114 *  - Switch to temp mapping
115 *  - Map 64MB of RAM in TLB1[1]
116 *  - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
117 *  - Switch to TLB1[1] mapping
118 *  - Invalidate temp mapping
119 *
120 * locore registers use:
121 *	r1	: stack pointer
122 *	r2	: trace pointer (AP only, for early diagnostics)
123 *	r3-r27	: scratch registers
124 *	r28	: temp TLB1 entry
125 *	r29	: initial TLB1 entry we started in
126 *	r30-r31	: arguments (metadata pointer)
127 */
128
129/*
130 * Keep arguments in r30 & r31 for later use.
131 */
132	mr	%r30, %r3
133	mr	%r31, %r4
134
135/*
136 * Initial cleanup
137 */
138	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
139#ifdef __powerpc64__
140	oris	%r3, %r3, PSL_CM@h
141#endif
142	mtmsr	%r3
143	isync
144
145/*
146 * Initial HIDs configuration
147 */
1481:
149	mfpvr	%r3
150	rlwinm	%r3, %r3, 16, 16, 31
151
152	lis	%r4, HID0_E500_DEFAULT_SET@h
153	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
154
155	/* Check for e500mc and e5500 */
156	cmpli	0, 0, %r3, FSL_E500mc
157	bne	2f
158
159	lis	%r4, HID0_E500MC_DEFAULT_SET@h
160	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
161	b	3f
1622:
163	cmpli	0, 0, %r3, FSL_E5500
164	bne	3f
165
166	lis	%r4, HID0_E5500_DEFAULT_SET@h
167	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
168
1693:
170	mtspr	SPR_HID0, %r4
171	isync
172
173/*
174 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
175 * this core.
176 */
177	cmpli	0, 0, %r3, FSL_E500mc
178	beq	1f
179	cmpli	0, 0, %r3, FSL_E5500
180	beq	1f
181	cmpli	0, 0, %r3, FSL_E6500
182	beq	1f
183
184	lis	%r3, HID1_E500_DEFAULT_SET@h
185	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
186	mtspr	SPR_HID1, %r3
187	isync
1881:
189	/* Invalidate all entries in TLB0 */
190	li	%r3, 0
191	bl	tlb_inval_all
192
193	cmpwi	%r30, 0
194	beq	done_mapping
195
196/*
197 * Locate the TLB1 entry that maps this code
198 */
199	bl	1f
2001:	mflr	%r3
201	bl	tlb1_find_current	/* the entry found is returned in r29 */
202
203	bl	tlb1_inval_all_but_current
204
205/*
206 * Create temporary mapping in AS=1 and switch to it
207 */
208	bl	tlb1_temp_mapping_as1
209
210	mfmsr	%r3
211	ori	%r3, %r3, (PSL_IS | PSL_DS)
212	bl	2f
2132:	mflr	%r4
214	addi	%r4, %r4, (3f - 2b)
215	mtspr	SPR_SRR0, %r4
216	mtspr	SPR_SRR1, %r3
217	rfi				/* Switch context */
218
219/*
220 * Invalidate initial entry
221 */
2223:
223	mr	%r3, %r29
224	bl	tlb1_inval_entry
225
226/*
227 * Setup final mapping in TLB1[1] and switch to it
228 */
229	/* Final kernel mapping, map in 64 MB of RAM */
230	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
231	li	%r4, 0			/* Entry 0 */
232	rlwimi	%r3, %r4, 16, 10, 15
233	mtspr	SPR_MAS0, %r3
234	isync
235
236	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
237	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
238	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
239	isync
240
241	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
242	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
243	mtspr	SPR_MAS2, %r3
244	isync
245
246	/* Discover phys load address */
247	bl	3f
2483:	mflr	%r4			/* Use current address */
249	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
250	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
251	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
252	isync
253	li	%r4, 0
254	mtspr	SPR_MAS7, %r4
255	bl	zero_mas8
256	isync
257	tlbwe
258	isync
259	msync
260
261	/* Switch to the above TLB1[1] mapping */
262	bl	4f
2634:	mflr	%r4
264#ifdef __powerpc64__
265	clrldi	%r4, %r4, 38
266	clrrdi	%r3, %r3, 12
267#else
268	rlwinm	%r4, %r4, 0, 6, 31	/* Current offset from kernel load address */
269	rlwinm	%r3, %r3, 0, 0, 19
270#endif
271	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
272	addi	%r4, %r4, (5f - 4b)
273	li	%r3, PSL_DE		/* Note AS=0 */
274#ifdef __powerpc64__
275	oris	%r3, %r3, PSL_CM@h
276#endif
277	mtspr   SPR_SRR0, %r4
278	mtspr   SPR_SRR1, %r3
279	rfi
280
281/*
282 * Invalidate temp mapping
283 */
2845:
285	mr	%r3, %r28
286	bl	tlb1_inval_entry
287
288done_mapping:
289
290#ifdef __powerpc64__
291	/* Set up the TOC pointer */
292	b	0f
293	.align 3
2940:	nop
295	bl	1f
296	.llong	__tocbase + 0x8000 - .
2971:	mflr	%r2
298	ld	%r1,0(%r2)
299	add	%r2,%r1,%r2
300	mtspr	SPR_SPRG8, %r2
301
302	/* Get load offset */
303	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
304	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
305
306	/* Set up the stack pointer */
307	addis	%r1,%r2,TOC_REF(tmpstack)@ha
308	ld	%r1,TOC_REF(tmpstack)@l(%r1)
309	addi	%r1,%r1,TMPSTACKSZ-96
310	add	%r1,%r1,%r31
311	bl	1f
312	.llong _DYNAMIC-.
3131:	mflr	%r3
314	ld	%r4,0(%r3)
315	add	%r3,%r4,%r3
316	mr	%r4,%r31
317#else
318/*
319 * Setup a temporary stack
320 */
321	bl	1f
322	.long tmpstack-.
3231:	mflr	%r1
324	lwz	%r2,0(%r1)
325	add	%r1,%r1,%r2
326	addi	%r1, %r1, (TMPSTACKSZ - 16)
327
328/*
329 * Relocate kernel
330 */
331	bl      1f
332	.long   _DYNAMIC-.
333	.long   _GLOBAL_OFFSET_TABLE_-.
3341:	mflr    %r5
335	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
336	add	%r3,%r3,%r5
337	lwz	%r4,4(%r5)	/* GOT pointer */
338	add	%r4,%r4,%r5
339	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
340	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
341#endif
342	bl	CNAME(elf_reloc_self)
343	TOC_RESTORE
344
345/*
346 * Initialise exception vector offsets
347 */
348	bl	CNAME(ivor_setup)
349	TOC_RESTORE
350
351/*
352 * Set up arguments and jump to system initialization code
353 */
354	mr	%r3, %r30
355	mr	%r4, %r31
356
357	/* Prepare core */
358	bl	CNAME(booke_init)
359	TOC_RESTORE
360
361	/* Switch to thread0.td_kstack now */
362	mr	%r1, %r3
363	li	%r3, 0
364	STORE	%r3, 0(%r1)
365
366	/* Machine independet part, does not return */
367	bl	CNAME(mi_startup)
368	TOC_RESTORE
369	/* NOT REACHED */
3705:	b	5b
371
372
373#ifdef SMP
374/************************************************************************/
375/* AP Boot page */
376/************************************************************************/
377	.text
378	.globl	__boot_page
379	.align	12
380__boot_page:
381	bl	1f
382
383	.globl	bp_trace
384bp_trace:
385	.long	0
386
387	.globl	bp_kernload
388bp_kernload:
389	.long	0
390
391/*
392 * Initial configuration
393 */
3941:
395	mflr    %r31		/* r31 hold the address of bp_trace */
396
397	/* Set HIDs */
398	mfpvr	%r3
399	rlwinm	%r3, %r3, 16, 16, 31
400
401	/* HID0 for E500 is default */
402	lis	%r4, HID0_E500_DEFAULT_SET@h
403	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
404
405	cmpli	0, 0, %r3, FSL_E500mc
406	bne	2f
407	lis	%r4, HID0_E500MC_DEFAULT_SET@h
408	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
409	b	3f
4102:
411	cmpli	0, 0, %r3, FSL_E5500
412	bne	3f
413	lis	%r4, HID0_E5500_DEFAULT_SET@h
414	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
4153:
416	mtspr	SPR_HID0, %r4
417	isync
418
419	/* Enable branch prediction */
420	li	%r3, BUCSR_BPEN
421	mtspr	SPR_BUCSR, %r3
422	isync
423
424	/* Invalidate all entries in TLB0 */
425	li	%r3, 0
426	bl	tlb_inval_all
427
428/*
429 * Find TLB1 entry which is translating us now
430 */
431	bl	2f
4322:	mflr	%r3
433	bl	tlb1_find_current	/* the entry number found is in r29 */
434
435	bl	tlb1_inval_all_but_current
436
437/*
438 * Create temporary translation in AS=1 and switch to it
439 */
440
441	bl	tlb1_temp_mapping_as1
442
443	mfmsr	%r3
444	ori	%r3, %r3, (PSL_IS | PSL_DS)
445#ifdef __powerpc64__
446	oris	%r3, %r3, PSL_CM@h
447#endif
448	bl	3f
4493:	mflr	%r4
450	addi	%r4, %r4, (4f - 3b)
451	mtspr	SPR_SRR0, %r4
452	mtspr	SPR_SRR1, %r3
453	rfi				/* Switch context */
454
455/*
456 * Invalidate initial entry
457 */
4584:
459	mr	%r3, %r29
460	bl	tlb1_inval_entry
461
462/*
463 * Setup final mapping in TLB1[1] and switch to it
464 */
465	/* Final kernel mapping, map in 64 MB of RAM */
466	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
467	li	%r4, 0			/* Entry 0 */
468	rlwimi	%r3, %r4, 16, 4, 15
469	mtspr	SPR_MAS0, %r3
470	isync
471
472	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
473	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
474	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
475	isync
476
477	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
478	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
479	mtspr	SPR_MAS2, %r3
480	isync
481
482	/* Retrieve kernel load [physical] address from bp_kernload */
483#ifdef __powerpc64__
484	b	0f
485	.align	3
4860:
487	nop
488#endif
489	bl 5f
490	ADDR(bp_kernload)
491	ADDR(__boot_page)
4925:	mflr	%r3
493#ifdef __powerpc64__
494	ld	%r4, 0(%r3)
495	ld	%r5, 8(%r3)
496	clrrdi	%r3, %r3, 12
497#else
498	lwz	%r4, 0(%r3)
499	lwz	%r5, 4(%r3)
500	rlwinm	%r3, %r3, 0, 0, 19
501#endif
502	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
503	lwzx	%r3, %r4, %r3
504
505	/* Set RPN and protection */
506	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
507	mtspr	SPR_MAS3, %r3
508	isync
509	li	%r4, 0
510	mtspr	SPR_MAS7, %r4
511	bl	zero_mas8
512	isync
513	tlbwe
514	isync
515	msync
516
517	/* Switch to the final mapping */
518	bl	6f
5196:	mflr	%r3
520	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
521	add	%r3, %r3, %r5		/* Make this virtual address */
522	addi	%r3, %r3, (7f - 6b)
523#ifdef __powerpc64__
524	lis	%r4, PSL_CM@h		/* Note AS=0 */
525#else
526	li	%r4, 0			/* Note AS=0 */
527#endif
528	mtspr	SPR_SRR0, %r3
529	mtspr	SPR_SRR1, %r4
530	rfi
5317:
532
533/*
534 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
535 * beyond so it's allowed to directly access all locations the kernel was linked
536 * against.
537 */
538
539/*
540 * Invalidate temp mapping
541 */
542	mr	%r3, %r28
543	bl	tlb1_inval_entry
544
545#ifdef __powerpc64__
546	/* Set up the TOC pointer */
547	b	0f
548	.align 3
5490:	nop
550	bl	1f
551	.llong	__tocbase + 0x8000 - .
5521:	mflr	%r2
553	ld	%r1,0(%r2)
554	add	%r2,%r1,%r2
555	mtspr	SPR_SPRG8, %r2
556
557	/* Set up the stack pointer */
558	addis	%r1,%r2,TOC_REF(tmpstack)@ha
559	ld	%r1,TOC_REF(tmpstack)@l(%r1)
560	addi	%r1,%r1,TMPSTACKSZ-96
561#else
562/*
563 * Setup a temporary stack
564 */
565	bl	1f
566	.long tmpstack-.
5671:	mflr	%r1
568	lwz	%r2,0(%r1)
569	add	%r1,%r1,%r2
570	stw	%r1, 0(%r1)
571	addi	%r1, %r1, (TMPSTACKSZ - 16)
572#endif
573
574/*
575 * Initialise exception vector offsets
576 */
577	bl	CNAME(ivor_setup)
578	TOC_RESTORE
579
580	/*
581	 * Assign our pcpu instance
582	 */
583	bl	1f
584	.long ap_pcpu-.
5851:	mflr	%r4
586	lwz	%r3, 0(%r4)
587	add	%r3, %r3, %r4
588	LOAD	%r3, 0(%r3)
589	mtsprg0	%r3
590
591	bl	CNAME(pmap_bootstrap_ap)
592	TOC_RESTORE
593
594	bl	CNAME(cpudep_ap_bootstrap)
595	TOC_RESTORE
596	/* Switch to the idle thread's kstack */
597	mr	%r1, %r3
598
599	bl	CNAME(machdep_ap_bootstrap)
600	TOC_RESTORE
601
602	/* NOT REACHED */
6036:	b	6b
604#endif /* SMP */
605
606#if defined (BOOKE_E500)
607/*
608 * Invalidate all entries in the given TLB.
609 *
610 * r3	TLBSEL
611 */
612tlb_inval_all:
613	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
614	ori	%r3, %r3, (1 << 2)	/* INVALL */
615	tlbivax	0, %r3
616	isync
617	msync
618
619	tlbsync
620	msync
621	blr
622
623/*
624 * expects address to look up in r3, returns entry number in r29
625 *
626 * FIXME: the hidden assumption is we are now running in AS=0, but we should
627 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
628 */
629tlb1_find_current:
630	mfspr	%r17, SPR_PID0
631	slwi	%r17, %r17, MAS6_SPID0_SHIFT
632	mtspr	SPR_MAS6, %r17
633	isync
634	tlbsx	0, %r3
635	mfspr	%r17, SPR_MAS0
636	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
637
638	/* Make sure we have IPROT set on the entry */
639	mfspr	%r17, SPR_MAS1
640	oris	%r17, %r17, MAS1_IPROT@h
641	mtspr	SPR_MAS1, %r17
642	isync
643	tlbwe
644	isync
645	msync
646	blr
647
648/*
649 * Invalidates a single entry in TLB1.
650 *
651 * r3		ESEL
652 * r4-r5	scratched
653 */
654tlb1_inval_entry:
655	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
656	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
657	mtspr	SPR_MAS0, %r4
658	isync
659	tlbre
660	li	%r5, 0			/* MAS1[V] = 0 */
661	mtspr	SPR_MAS1, %r5
662	isync
663	tlbwe
664	isync
665	msync
666	blr
667
668/*
669 * r29		current entry number
670 * r28		returned temp entry
671 * r3-r5	scratched
672 */
673tlb1_temp_mapping_as1:
674	/* Read our current translation */
675	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
676	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
677	mtspr	SPR_MAS0, %r3
678	isync
679	tlbre
680
681	/*
682	 * Prepare and write temp entry
683	 *
684	 * FIXME this is not robust against overflow i.e. when the current
685	 * entry is the last in TLB1
686	 */
687	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
688	addi	%r28, %r29, 1		/* Use next entry. */
689	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
690	mtspr	SPR_MAS0, %r3
691	isync
692	mfspr	%r5, SPR_MAS1
693	li	%r4, 1			/* AS=1 */
694	rlwimi	%r5, %r4, 12, 19, 19
695	li	%r4, 0			/* Global mapping, TID=0 */
696	rlwimi	%r5, %r4, 16, 8, 15
697	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
698	mtspr	SPR_MAS1, %r5
699	isync
700	mflr	%r3
701	li	%r4, 0
702	mtspr	SPR_MAS7, %r4
703	bl	zero_mas8
704	mtlr	%r3
705	isync
706	tlbwe
707	isync
708	msync
709	blr
710
711/*
712 * Loops over TLB1, invalidates all entries skipping the one which currently
713 * maps this code.
714 *
715 * r29		current entry
716 * r3-r5	scratched
717 */
718tlb1_inval_all_but_current:
719	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
720	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
721	li	%r4, 0			/* Start from Entry 0 */
7221:	lis	%r5, MAS0_TLBSEL1@h
723	rlwimi	%r5, %r4, 16, 10, 15
724	mtspr	SPR_MAS0, %r5
725	isync
726	tlbre
727	mfspr	%r5, SPR_MAS1
728	cmpw	%r4, %r29		/* our current entry? */
729	beq	2f
730	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
731	mtspr	SPR_MAS1, %r5
732	isync
733	tlbwe
734	isync
735	msync
7362:	addi	%r4, %r4, 1
737	cmpw	%r4, %r3		/* Check if this is the last entry */
738	bne	1b
739	blr
740
741/*
742 * MAS8 conditional zeroing.
743 */
744.globl zero_mas8
745zero_mas8:
746	mfpvr	%r20
747	rlwinm	%r20, %r20, 16, 16, 31
748	cmpli	0, 0, %r20, FSL_E500mc
749	beq	1f
750	cmpli	0, 0, %r20, FSL_E5500
751	beq	1f
752
753	blr
7541:
755	li	%r20, 0
756	mtspr	SPR_MAS8, %r20
757	blr
758#endif
759
760#ifdef SMP
761.globl __boot_tlb1
762	/*
763	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
764	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
765	 * The BSP fills in the table in tlb_ap_prep() function. Next,
766	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
767	 */
768__boot_tlb1:
769	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
770
771__boot_page_padding:
772	/*
773	 * Boot page needs to be exactly 4K, with the last word of this page
774	 * acting as the reset vector, so we need to stuff the remainder.
775	 * Upon release from holdoff CPU fetches the last word of the boot
776	 * page.
777	 */
778	.space	4092 - (__boot_page_padding - __boot_page)
779	b	__boot_page
780#endif /* SMP */
781
782/************************************************************************/
783/* locore subroutines */
784/************************************************************************/
785
786/*
787 * Cache disable/enable/inval sequences according
788 * to section 2.16 of E500CORE RM.
789 */
790ENTRY(dcache_inval)
791	/* Invalidate d-cache */
792	mfspr	%r3, SPR_L1CSR0
793	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
794	msync
795	isync
796	mtspr	SPR_L1CSR0, %r3
797	isync
7981:	mfspr	%r3, SPR_L1CSR0
799	andi.	%r3, %r3, L1CSR0_DCFI
800	bne	1b
801	blr
802
803ENTRY(dcache_disable)
804	/* Disable d-cache */
805	mfspr	%r3, SPR_L1CSR0
806	li	%r4, L1CSR0_DCE@l
807	not	%r4, %r4
808	and	%r3, %r3, %r4
809	msync
810	isync
811	mtspr	SPR_L1CSR0, %r3
812	isync
813	blr
814
815ENTRY(dcache_enable)
816	/* Enable d-cache */
817	mfspr	%r3, SPR_L1CSR0
818	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
819	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
820	msync
821	isync
822	mtspr	SPR_L1CSR0, %r3
823	isync
824	blr
825
826ENTRY(icache_inval)
827	/* Invalidate i-cache */
828	mfspr	%r3, SPR_L1CSR1
829	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
830	isync
831	mtspr	SPR_L1CSR1, %r3
832	isync
8331:	mfspr	%r3, SPR_L1CSR1
834	andi.	%r3, %r3, L1CSR1_ICFI
835	bne	1b
836	blr
837
838ENTRY(icache_disable)
839	/* Disable i-cache */
840	mfspr	%r3, SPR_L1CSR1
841	li	%r4, L1CSR1_ICE@l
842	not	%r4, %r4
843	and	%r3, %r3, %r4
844	isync
845	mtspr	SPR_L1CSR1, %r3
846	isync
847	blr
848
849ENTRY(icache_enable)
850	/* Enable i-cache */
851	mfspr	%r3, SPR_L1CSR1
852	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
853	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
854	isync
855	mtspr	SPR_L1CSR1, %r3
856	isync
857	blr
858
859/*
860 * L2 cache disable/enable/inval sequences for E500mc.
861 */
862
863ENTRY(l2cache_inval)
864	mfspr	%r3, SPR_L2CSR0
865	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
866	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
867	isync
868	mtspr	SPR_L2CSR0, %r3
869	isync
8701:	mfspr   %r3, SPR_L2CSR0
871	andis.	%r3, %r3, L2CSR0_L2FI@h
872	bne	1b
873	blr
874
875ENTRY(l2cache_enable)
876	mfspr	%r3, SPR_L2CSR0
877	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
878	isync
879	mtspr	SPR_L2CSR0, %r3
880	isync
881	blr
882
883/*
884 * Branch predictor setup.
885 */
886ENTRY(bpred_enable)
887	mfspr	%r3, SPR_BUCSR
888	ori	%r3, %r3, BUCSR_BBFI
889	isync
890	mtspr	SPR_BUCSR, %r3
891	isync
892	ori	%r3, %r3, BUCSR_BPEN
893	isync
894	mtspr	SPR_BUCSR, %r3
895	isync
896	blr
897
898/*
899 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
900 * created.
901 */
902ENTRY(get_spr)
903	mfspr	%r3, 0
904	blr
905
906/************************************************************************/
907/* Data section								*/
908/************************************************************************/
909	.data
910	.align 3
911GLOBAL(__startkernel)
912	ADDR(begin)
913GLOBAL(__endkernel)
914	ADDR(end)
915	.align	4
916tmpstack:
917	.space	TMPSTACKSZ
918tmpstackbound:
919	.space 10240	/* XXX: this really should not be necessary */
920#ifdef __powerpc64__
921TOC_ENTRY(tmpstack)
922TOC_ENTRY(bp_kernload)
923#endif
924
925/*
926 * Compiled KERNBASE locations
927 */
928	.globl	kernbase
929	.set	kernbase, KERNBASE
930
931#include <powerpc/booke/trap_subr.S>
932