xref: /freebsd/sys/powerpc/booke/locore.S (revision 123af6ec70016f5556da5972d4d63c7d175c06d3)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.inc"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44#ifdef __powerpc64__
45#define GET_TOCBASE(r)  \
46	mfspr	r, SPR_SPRG8
47#define	TOC_RESTORE	nop
48#define	CMPI	cmpdi
49#define	CMPL	cmpld
50#define	LOAD	ld
51#define	LOADX	ldarx
52#define	STORE	std
53#define	STOREX	stdcx.
54#define	STU	stdu
55#define	CALLSIZE	48
56#define	REDZONE		288
57#define	THREAD_REG	%r13
58#define	ADDR(x)	\
59	.llong	x
60#define	WORD_SIZE	8
61#else
62#define	GET_TOCBASE(r)
63#define	TOC_RESTORE
64#define	CMPI	cmpwi
65#define	CMPL	cmplw
66#define	LOAD	lwz
67#define	LOADX	lwarx
68#define	STOREX	stwcx.
69#define	STORE	stw
70#define	STU	stwu
71#define	CALLSIZE	8
72#define	REDZONE		0
73#define	THREAD_REG	%r2
74#define	ADDR(x)	\
75	.long	x
76#define	WORD_SIZE	4
77#endif
78
79	.text
80	.globl	btext
81btext:
82
83/*
84 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
85 * mark the start of kernel text.
86 */
87	.globl	kernel_text
88kernel_text:
89
90/*
91 * Startup entry.  Note, this must be the first thing in the text segment!
92 */
93	.text
94	.globl	__start
95__start:
96
97/*
98 * Assumptions on the boot loader:
99 *  - System memory starts from physical address 0
100 *  - It's mapped by a single TLB1 entry
101 *  - TLB1 mapping is 1:1 pa to va
102 *  - Kernel is loaded at 64MB boundary
103 *  - All PID registers are set to the same value
104 *  - CPU is running in AS=0
105 *
106 * Registers contents provided by the loader(8):
107 *	r1	: stack pointer
108 *	r3	: metadata pointer
109 *
110 * We rearrange the TLB1 layout as follows:
111 *  - Find TLB1 entry we started in
112 *  - Make sure it's protected, invalidate other entries
113 *  - Create temp entry in the second AS (make sure it's not TLB[1])
114 *  - Switch to temp mapping
115 *  - Map 64MB of RAM in TLB1[1]
116 *  - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
117 *  - Switch to TLB1[1] mapping
118 *  - Invalidate temp mapping
119 *
120 * locore registers use:
121 *	r1	: stack pointer
122 *	r2	: trace pointer (AP only, for early diagnostics)
123 *	r3-r27	: scratch registers
124 *	r28	: temp TLB1 entry
125 *	r29	: initial TLB1 entry we started in
126 *	r30-r31	: arguments (metadata pointer)
127 */
128
129/*
130 * Keep arguments in r30 & r31 for later use.
131 */
132	mr	%r30, %r3
133	mr	%r31, %r4
134
135/*
136 * Initial cleanup
137 */
138	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
139#ifdef __powerpc64__
140	oris	%r3, %r3, PSL_CM@h
141#endif
142	mtmsr	%r3
143	isync
144
145/*
146 * Initial HIDs configuration
147 */
1481:
149	mfpvr	%r3
150	rlwinm	%r3, %r3, 16, 16, 31
151
152	lis	%r4, HID0_E500_DEFAULT_SET@h
153	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
154
155	/* Check for e500mc and e5500 */
156	cmpli	0, 0, %r3, FSL_E500mc
157	bne	2f
158
159	lis	%r4, HID0_E500MC_DEFAULT_SET@h
160	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
161	b	3f
1622:
163	cmpli	0, 0, %r3, FSL_E5500
164	bne	3f
165
166	lis	%r4, HID0_E5500_DEFAULT_SET@h
167	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
168
1693:
170	mtspr	SPR_HID0, %r4
171	isync
172
173/*
174 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
175 * this core.
176 */
177	cmpli	0, 0, %r3, FSL_E500mc
178	beq	1f
179	cmpli	0, 0, %r3, FSL_E5500
180	beq	1f
181	cmpli	0, 0, %r3, FSL_E6500
182	beq	1f
183
184	lis	%r3, HID1_E500_DEFAULT_SET@h
185	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
186	mtspr	SPR_HID1, %r3
187	isync
1881:
189	/* Invalidate all entries in TLB0 */
190	li	%r3, 0
191	bl	tlb_inval_all
192
193	cmpwi	%r30, 0
194	beq	done_mapping
195
196/*
197 * Locate the TLB1 entry that maps this code
198 */
199	bl	1f
2001:	mflr	%r3
201	bl	tlb1_find_current	/* the entry found is returned in r29 */
202
203	bl	tlb1_inval_all_but_current
204
205/*
206 * Create temporary mapping in AS=1 and switch to it
207 */
208	bl	tlb1_temp_mapping_as1
209
210	mfmsr	%r3
211	ori	%r3, %r3, (PSL_IS | PSL_DS)
212	bl	2f
2132:	mflr	%r4
214	addi	%r4, %r4, (3f - 2b)
215	mtspr	SPR_SRR0, %r4
216	mtspr	SPR_SRR1, %r3
217	rfi				/* Switch context */
218
219/*
220 * Invalidate initial entry
221 */
2223:
223	mr	%r3, %r29
224	bl	tlb1_inval_entry
225
226/*
227 * Setup final mapping in TLB1[1] and switch to it
228 */
229	/* Final kernel mapping, map in 64 MB of RAM */
230	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
231	li	%r4, 0			/* Entry 0 */
232	rlwimi	%r3, %r4, 16, 10, 15
233	mtspr	SPR_MAS0, %r3
234	isync
235
236	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
237	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
238	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
239	isync
240
241	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
242	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
243	mtspr	SPR_MAS2, %r3
244	isync
245
246	/* Discover phys load address */
247	bl	3f
2483:	mflr	%r4			/* Use current address */
249	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
250	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
251	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
252	isync
253	bl	zero_mas7
254	bl	zero_mas8
255	isync
256	tlbwe
257	isync
258	msync
259
260	/* Switch to the above TLB1[1] mapping */
261	bl	4f
2624:	mflr	%r4
263#ifdef __powerpc64__
264	clrldi	%r4, %r4, 38
265	clrrdi	%r3, %r3, 12
266#else
267	rlwinm	%r4, %r4, 0, 6, 31	/* Current offset from kernel load address */
268	rlwinm	%r3, %r3, 0, 0, 19
269#endif
270	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
271	addi	%r4, %r4, (5f - 4b)
272	li	%r3, PSL_DE		/* Note AS=0 */
273#ifdef __powerpc64__
274	oris	%r3, %r3, PSL_CM@h
275#endif
276	mtspr   SPR_SRR0, %r4
277	mtspr   SPR_SRR1, %r3
278	rfi
279
280/*
281 * Invalidate temp mapping
282 */
2835:
284	mr	%r3, %r28
285	bl	tlb1_inval_entry
286
287done_mapping:
288
289#ifdef __powerpc64__
290	/* Set up the TOC pointer */
291	b	0f
292	.align 3
2930:	nop
294	bl	1f
295	.llong	__tocbase + 0x8000 - .
2961:	mflr	%r2
297	ld	%r1,0(%r2)
298	add	%r2,%r1,%r2
299	mtspr	SPR_SPRG8, %r2
300
301	/* Get load offset */
302	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
303	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
304
305	/* Set up the stack pointer */
306	ld	%r1,TOC_REF(tmpstack)(%r2)
307	addi	%r1,%r1,TMPSTACKSZ-96
308	add	%r1,%r1,%r31
309	bl	1f
310	.llong _DYNAMIC-.
3111:	mflr	%r3
312	ld	%r4,0(%r3)
313	add	%r3,%r4,%r3
314	mr	%r4,%r31
315#else
316/*
317 * Setup a temporary stack
318 */
319	bl	1f
320	.long tmpstack-.
3211:	mflr	%r1
322	lwz	%r2,0(%r1)
323	add	%r1,%r1,%r2
324	addi	%r1, %r1, (TMPSTACKSZ - 16)
325
326/*
327 * Relocate kernel
328 */
329	bl      1f
330	.long   _DYNAMIC-.
331	.long   _GLOBAL_OFFSET_TABLE_-.
3321:	mflr    %r5
333	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
334	add	%r3,%r3,%r5
335	lwz	%r4,4(%r5)	/* GOT pointer */
336	add	%r4,%r4,%r5
337	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
338	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
339#endif
340	bl	CNAME(elf_reloc_self)
341	TOC_RESTORE
342
343/*
344 * Initialise exception vector offsets
345 */
346	bl	CNAME(ivor_setup)
347	TOC_RESTORE
348
349/*
350 * Set up arguments and jump to system initialization code
351 */
352	mr	%r3, %r30
353	mr	%r4, %r31
354
355	/* Prepare core */
356	bl	CNAME(booke_init)
357	TOC_RESTORE
358
359	/* Switch to thread0.td_kstack now */
360	mr	%r1, %r3
361	li	%r3, 0
362	STORE	%r3, 0(%r1)
363
364	/* Machine independet part, does not return */
365	bl	CNAME(mi_startup)
366	TOC_RESTORE
367	/* NOT REACHED */
3685:	b	5b
369
370
371#ifdef SMP
372/************************************************************************/
373/* AP Boot page */
374/************************************************************************/
375	.text
376	.globl	__boot_page
377	.align	12
378__boot_page:
379	bl	1f
380
381	.globl	bp_trace
382bp_trace:
383	.long	0
384
385	.globl	bp_kernload
386bp_kernload:
387	.long	0
388
389/*
390 * Initial configuration
391 */
3921:
393	mflr    %r31		/* r31 hold the address of bp_trace */
394
395	/* Set HIDs */
396	mfpvr	%r3
397	rlwinm	%r3, %r3, 16, 16, 31
398
399	/* HID0 for E500 is default */
400	lis	%r4, HID0_E500_DEFAULT_SET@h
401	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
402
403	cmpli	0, 0, %r3, FSL_E500mc
404	bne	2f
405	lis	%r4, HID0_E500MC_DEFAULT_SET@h
406	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
407	b	3f
4082:
409	cmpli	0, 0, %r3, FSL_E5500
410	bne	3f
411	lis	%r4, HID0_E5500_DEFAULT_SET@h
412	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
4133:
414	mtspr	SPR_HID0, %r4
415	isync
416
417	/* Enable branch prediction */
418	li	%r3, BUCSR_BPEN
419	mtspr	SPR_BUCSR, %r3
420	isync
421
422	/* Invalidate all entries in TLB0 */
423	li	%r3, 0
424	bl	tlb_inval_all
425
426/*
427 * Find TLB1 entry which is translating us now
428 */
429	bl	2f
4302:	mflr	%r3
431	bl	tlb1_find_current	/* the entry number found is in r29 */
432
433	bl	tlb1_inval_all_but_current
434
435/*
436 * Create temporary translation in AS=1 and switch to it
437 */
438
439	bl	tlb1_temp_mapping_as1
440
441	mfmsr	%r3
442	ori	%r3, %r3, (PSL_IS | PSL_DS)
443#ifdef __powerpc64__
444	oris	%r3, %r3, PSL_CM@h
445#endif
446	bl	3f
4473:	mflr	%r4
448	addi	%r4, %r4, (4f - 3b)
449	mtspr	SPR_SRR0, %r4
450	mtspr	SPR_SRR1, %r3
451	rfi				/* Switch context */
452
453/*
454 * Invalidate initial entry
455 */
4564:
457	mr	%r3, %r29
458	bl	tlb1_inval_entry
459
460/*
461 * Setup final mapping in TLB1[1] and switch to it
462 */
463	/* Final kernel mapping, map in 64 MB of RAM */
464	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
465	li	%r4, 0			/* Entry 0 */
466	rlwimi	%r3, %r4, 16, 4, 15
467	mtspr	SPR_MAS0, %r3
468	isync
469
470	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
471	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
472	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
473	isync
474
475	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
476	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
477	mtspr	SPR_MAS2, %r3
478	isync
479
480	/* Retrieve kernel load [physical] address from bp_kernload */
481#ifdef __powerpc64__
482	b	0f
483	.align	3
4840:
485	nop
486#endif
487	bl 5f
488	ADDR(bp_kernload)
489	ADDR(__boot_page)
4905:	mflr	%r3
491#ifdef __powerpc64__
492	ld	%r4, 0(%r3)
493	ld	%r5, 8(%r3)
494	clrrdi	%r3, %r3, 12
495#else
496	lwz	%r4, 0(%r3)
497	lwz	%r5, 4(%r3)
498	rlwinm	%r3, %r3, 0, 0, 19
499#endif
500	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
501	lwzx	%r3, %r4, %r3
502
503	/* Set RPN and protection */
504	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
505	mtspr	SPR_MAS3, %r3
506	isync
507	bl	zero_mas7
508	bl	zero_mas8
509	isync
510	tlbwe
511	isync
512	msync
513
514	/* Switch to the final mapping */
515	bl	6f
5166:	mflr	%r3
517	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
518	add	%r3, %r3, %r5		/* Make this virtual address */
519	addi	%r3, %r3, (7f - 6b)
520#ifdef __powerpc64__
521	lis	%r4, PSL_CM@h		/* Note AS=0 */
522#else
523	li	%r4, 0			/* Note AS=0 */
524#endif
525	mtspr	SPR_SRR0, %r3
526	mtspr	SPR_SRR1, %r4
527	rfi
5287:
529
530/*
531 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
532 * beyond so it's allowed to directly access all locations the kernel was linked
533 * against.
534 */
535
536/*
537 * Invalidate temp mapping
538 */
539	mr	%r3, %r28
540	bl	tlb1_inval_entry
541
542#ifdef __powerpc64__
543	/* Set up the TOC pointer */
544	b	0f
545	.align 3
5460:	nop
547	bl	1f
548	.llong	__tocbase + 0x8000 - .
5491:	mflr	%r2
550	ld	%r1,0(%r2)
551	add	%r2,%r1,%r2
552	mtspr	SPR_SPRG8, %r2
553
554	/* Set up the stack pointer */
555	ld	%r1,TOC_REF(tmpstack)(%r2)
556	addi	%r1,%r1,TMPSTACKSZ-96
557#else
558/*
559 * Setup a temporary stack
560 */
561	bl	1f
562	.long tmpstack-.
5631:	mflr	%r1
564	lwz	%r2,0(%r1)
565	add	%r1,%r1,%r2
566	stw	%r1, 0(%r1)
567	addi	%r1, %r1, (TMPSTACKSZ - 16)
568#endif
569
570/*
571 * Initialise exception vector offsets
572 */
573	bl	CNAME(ivor_setup)
574	TOC_RESTORE
575
576	/*
577	 * Assign our pcpu instance
578	 */
579	bl	1f
580	.long ap_pcpu-.
5811:	mflr	%r4
582	lwz	%r3, 0(%r4)
583	add	%r3, %r3, %r4
584	LOAD	%r3, 0(%r3)
585	mtsprg0	%r3
586
587	bl	CNAME(pmap_bootstrap_ap)
588	TOC_RESTORE
589
590	bl	CNAME(cpudep_ap_bootstrap)
591	TOC_RESTORE
592	/* Switch to the idle thread's kstack */
593	mr	%r1, %r3
594
595	bl	CNAME(machdep_ap_bootstrap)
596	TOC_RESTORE
597
598	/* NOT REACHED */
5996:	b	6b
600#endif /* SMP */
601
602#if defined (BOOKE_E500)
603/*
604 * Invalidate all entries in the given TLB.
605 *
606 * r3	TLBSEL
607 */
608tlb_inval_all:
609	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
610	ori	%r3, %r3, (1 << 2)	/* INVALL */
611	tlbivax	0, %r3
612	isync
613	msync
614
615	tlbsync
616	msync
617	blr
618
619/*
620 * expects address to look up in r3, returns entry number in r29
621 *
622 * FIXME: the hidden assumption is we are now running in AS=0, but we should
623 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
624 */
625tlb1_find_current:
626	mfspr	%r17, SPR_PID0
627	slwi	%r17, %r17, MAS6_SPID0_SHIFT
628	mtspr	SPR_MAS6, %r17
629	isync
630	tlbsx	0, %r3
631	mfspr	%r17, SPR_MAS0
632	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
633
634	/* Make sure we have IPROT set on the entry */
635	mfspr	%r17, SPR_MAS1
636	oris	%r17, %r17, MAS1_IPROT@h
637	mtspr	SPR_MAS1, %r17
638	isync
639	tlbwe
640	isync
641	msync
642	blr
643
644/*
645 * Invalidates a single entry in TLB1.
646 *
647 * r3		ESEL
648 * r4-r5	scratched
649 */
650tlb1_inval_entry:
651	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
652	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
653	mtspr	SPR_MAS0, %r4
654	isync
655	tlbre
656	li	%r5, 0			/* MAS1[V] = 0 */
657	mtspr	SPR_MAS1, %r5
658	isync
659	tlbwe
660	isync
661	msync
662	blr
663
664/*
665 * r29		current entry number
666 * r28		returned temp entry
667 * r3-r5	scratched
668 */
669tlb1_temp_mapping_as1:
670	/* Read our current translation */
671	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
672	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
673	mtspr	SPR_MAS0, %r3
674	isync
675	tlbre
676
677	/*
678	 * Prepare and write temp entry
679	 *
680	 * FIXME this is not robust against overflow i.e. when the current
681	 * entry is the last in TLB1
682	 */
683	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
684	addi	%r28, %r29, 1		/* Use next entry. */
685	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
686	mtspr	SPR_MAS0, %r3
687	isync
688	mfspr	%r5, SPR_MAS1
689	li	%r4, 1			/* AS=1 */
690	rlwimi	%r5, %r4, 12, 19, 19
691	li	%r4, 0			/* Global mapping, TID=0 */
692	rlwimi	%r5, %r4, 16, 8, 15
693	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
694	mtspr	SPR_MAS1, %r5
695	isync
696	mflr	%r3
697	bl	zero_mas7
698	bl	zero_mas8
699	mtlr	%r3
700	isync
701	tlbwe
702	isync
703	msync
704	blr
705
706/*
707 * Loops over TLB1, invalidates all entries skipping the one which currently
708 * maps this code.
709 *
710 * r29		current entry
711 * r3-r5	scratched
712 */
713tlb1_inval_all_but_current:
714	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
715	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
716	li	%r4, 0			/* Start from Entry 0 */
7171:	lis	%r5, MAS0_TLBSEL1@h
718	rlwimi	%r5, %r4, 16, 10, 15
719	mtspr	SPR_MAS0, %r5
720	isync
721	tlbre
722	mfspr	%r5, SPR_MAS1
723	cmpw	%r4, %r29		/* our current entry? */
724	beq	2f
725	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
726	mtspr	SPR_MAS1, %r5
727	isync
728	tlbwe
729	isync
730	msync
7312:	addi	%r4, %r4, 1
732	cmpw	%r4, %r3		/* Check if this is the last entry */
733	bne	1b
734	blr
735
736/*
737 * MAS7 and MAS8 conditional zeroing.
738 */
739.globl zero_mas7
740zero_mas7:
741	mfpvr	%r20
742	rlwinm	%r20, %r20, 16, 16, 31
743	cmpli	0, 0, %r20, FSL_E500v1
744	beq	1f
745
746	li	%r20, 0
747	mtspr	SPR_MAS7, %r20
7481:
749	blr
750
751.globl zero_mas8
752zero_mas8:
753	mfpvr	%r20
754	rlwinm	%r20, %r20, 16, 16, 31
755	cmpli	0, 0, %r20, FSL_E500mc
756	beq	1f
757	cmpli	0, 0, %r20, FSL_E5500
758	beq	1f
759
760	blr
7611:
762	li	%r20, 0
763	mtspr	SPR_MAS8, %r20
764	blr
765#endif
766
767#ifdef SMP
768.globl __boot_tlb1
769	/*
770	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
771	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
772	 * The BSP fills in the table in tlb_ap_prep() function. Next,
773	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
774	 */
775__boot_tlb1:
776	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
777
778__boot_page_padding:
779	/*
780	 * Boot page needs to be exactly 4K, with the last word of this page
781	 * acting as the reset vector, so we need to stuff the remainder.
782	 * Upon release from holdoff CPU fetches the last word of the boot
783	 * page.
784	 */
785	.space	4092 - (__boot_page_padding - __boot_page)
786	b	__boot_page
787#endif /* SMP */
788
789/************************************************************************/
790/* locore subroutines */
791/************************************************************************/
792
793/*
794 * Cache disable/enable/inval sequences according
795 * to section 2.16 of E500CORE RM.
796 */
797ENTRY(dcache_inval)
798	/* Invalidate d-cache */
799	mfspr	%r3, SPR_L1CSR0
800	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
801	msync
802	isync
803	mtspr	SPR_L1CSR0, %r3
804	isync
8051:	mfspr	%r3, SPR_L1CSR0
806	andi.	%r3, %r3, L1CSR0_DCFI
807	bne	1b
808	blr
809
810ENTRY(dcache_disable)
811	/* Disable d-cache */
812	mfspr	%r3, SPR_L1CSR0
813	li	%r4, L1CSR0_DCE@l
814	not	%r4, %r4
815	and	%r3, %r3, %r4
816	msync
817	isync
818	mtspr	SPR_L1CSR0, %r3
819	isync
820	blr
821
822ENTRY(dcache_enable)
823	/* Enable d-cache */
824	mfspr	%r3, SPR_L1CSR0
825	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
826	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
827	msync
828	isync
829	mtspr	SPR_L1CSR0, %r3
830	isync
831	blr
832
833ENTRY(icache_inval)
834	/* Invalidate i-cache */
835	mfspr	%r3, SPR_L1CSR1
836	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
837	isync
838	mtspr	SPR_L1CSR1, %r3
839	isync
8401:	mfspr	%r3, SPR_L1CSR1
841	andi.	%r3, %r3, L1CSR1_ICFI
842	bne	1b
843	blr
844
845ENTRY(icache_disable)
846	/* Disable i-cache */
847	mfspr	%r3, SPR_L1CSR1
848	li	%r4, L1CSR1_ICE@l
849	not	%r4, %r4
850	and	%r3, %r3, %r4
851	isync
852	mtspr	SPR_L1CSR1, %r3
853	isync
854	blr
855
856ENTRY(icache_enable)
857	/* Enable i-cache */
858	mfspr	%r3, SPR_L1CSR1
859	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
860	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
861	isync
862	mtspr	SPR_L1CSR1, %r3
863	isync
864	blr
865
866/*
867 * L2 cache disable/enable/inval sequences for E500mc.
868 */
869
870ENTRY(l2cache_inval)
871	mfspr	%r3, SPR_L2CSR0
872	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
873	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
874	isync
875	mtspr	SPR_L2CSR0, %r3
876	isync
8771:	mfspr   %r3, SPR_L2CSR0
878	andis.	%r3, %r3, L2CSR0_L2FI@h
879	bne	1b
880	blr
881
882ENTRY(l2cache_enable)
883	mfspr	%r3, SPR_L2CSR0
884	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
885	isync
886	mtspr	SPR_L2CSR0, %r3
887	isync
888	blr
889
890/*
891 * Branch predictor setup.
892 */
893ENTRY(bpred_enable)
894	mfspr	%r3, SPR_BUCSR
895	ori	%r3, %r3, BUCSR_BBFI
896	isync
897	mtspr	SPR_BUCSR, %r3
898	isync
899	ori	%r3, %r3, BUCSR_BPEN
900	isync
901	mtspr	SPR_BUCSR, %r3
902	isync
903	blr
904
905/*
906 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
907 * created.
908 */
909ENTRY(get_spr)
910	mfspr	%r3, 0
911	blr
912
913/************************************************************************/
914/* Data section								*/
915/************************************************************************/
916	.data
917	.align 3
918GLOBAL(__startkernel)
919	ADDR(begin)
920GLOBAL(__endkernel)
921	ADDR(end)
922	.align	4
923tmpstack:
924	.space	TMPSTACKSZ
925tmpstackbound:
926	.space 10240	/* XXX: this really should not be necessary */
927#ifdef __powerpc64__
928TOC_ENTRY(tmpstack)
929TOC_ENTRY(bp_kernload)
930#endif
931
932/*
933 * Compiled KERNBASE locations
934 */
935	.globl	kernbase
936	.set	kernbase, KERNBASE
937
938#include <powerpc/booke/trap_subr.S>
939