xref: /freebsd/sys/powerpc/booke/locore.S (revision 094fc1ed0f2627525c7b0342efcbad5be7a8546a)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44#ifdef __powerpc64__
45#define GET_TOCBASE(r)  \
46	mfspr	r, SPR_SPRG8
47#define	TOC_RESTORE	nop
48#define	CMPI	cmpdi
49#define	CMPL	cmpld
50#define	LOAD	ld
51#define	LOADX	ldarx
52#define	STORE	std
53#define	STOREX	stdcx.
54#define	STU	stdu
55#define	CALLSIZE	48
56#define	REDZONE		288
57#define	THREAD_REG	%r13
58#define	ADDR(x)	\
59	.llong	x
60#else
61#define	GET_TOCBASE(r)
62#define	TOC_RESTORE
63#define	CMPI	cmpwi
64#define	CMPL	cmplw
65#define	LOAD	lwz
66#define	LOADX	lwarx
67#define	STOREX	stwcx.
68#define	STORE	stw
69#define	STU	stwu
70#define	CALLSIZE	8
71#define	REDZONE		0
72#define	THREAD_REG	%r2
73#define	ADDR(x)	\
74	.long	x
75#endif
76
77	.text
78	.globl	btext
79btext:
80
81/*
82 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
83 * mark the start of kernel text.
84 */
85	.globl	kernel_text
86kernel_text:
87
88/*
89 * Startup entry.  Note, this must be the first thing in the text segment!
90 */
91	.text
92	.globl	__start
93__start:
94
95/*
96 * Assumptions on the boot loader:
97 *  - System memory starts from physical address 0
98 *  - It's mapped by a single TLB1 entry
99 *  - TLB1 mapping is 1:1 pa to va
100 *  - Kernel is loaded at 64MB boundary
101 *  - All PID registers are set to the same value
102 *  - CPU is running in AS=0
103 *
104 * Registers contents provided by the loader(8):
105 *	r1	: stack pointer
106 *	r3	: metadata pointer
107 *
108 * We rearrange the TLB1 layout as follows:
109 *  - Find TLB1 entry we started in
110 *  - Make sure it's protected, invalidate other entries
111 *  - Create temp entry in the second AS (make sure it's not TLB[1])
112 *  - Switch to temp mapping
113 *  - Map 64MB of RAM in TLB1[1]
114 *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
115 *  - Switch to to TLB1[1] mapping
116 *  - Invalidate temp mapping
117 *
118 * locore registers use:
119 *	r1	: stack pointer
120 *	r2	: trace pointer (AP only, for early diagnostics)
121 *	r3-r27	: scratch registers
122 *	r28	: temp TLB1 entry
123 *	r29	: initial TLB1 entry we started in
124 *	r30-r31	: arguments (metadata pointer)
125 */
126
127/*
128 * Keep arguments in r30 & r31 for later use.
129 */
130	mr	%r30, %r3
131	mr	%r31, %r4
132
133/*
134 * Initial cleanup
135 */
136	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
137#ifdef __powerpc64__
138	oris	%r3, %r3, PSL_CM@h
139#endif
140	mtmsr	%r3
141	isync
142
143/*
144 * Initial HIDs configuration
145 */
1461:
147	mfpvr	%r3
148	rlwinm	%r3, %r3, 16, 16, 31
149
150	lis	%r4, HID0_E500_DEFAULT_SET@h
151	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
152
153	/* Check for e500mc and e5500 */
154	cmpli	0, 0, %r3, FSL_E500mc
155	bne	2f
156
157	lis	%r4, HID0_E500MC_DEFAULT_SET@h
158	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
159	b	3f
1602:
161	cmpli	0, 0, %r3, FSL_E5500
162	bne	3f
163
164	lis	%r4, HID0_E5500_DEFAULT_SET@h
165	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
166
1673:
168	mtspr	SPR_HID0, %r4
169	isync
170
171/*
172 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
173 * this core.
174 */
175	cmpli	0, 0, %r3, FSL_E500mc
176	beq	1f
177	cmpli	0, 0, %r3, FSL_E5500
178	beq	1f
179	cmpli	0, 0, %r3, FSL_E6500
180	beq	1f
181
182	lis	%r3, HID1_E500_DEFAULT_SET@h
183	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
184	mtspr	SPR_HID1, %r3
185	isync
1861:
187	/* Invalidate all entries in TLB0 */
188	li	%r3, 0
189	bl	tlb_inval_all
190
191	cmpwi	%r30, 0
192	beq	done_mapping
193
194/*
195 * Locate the TLB1 entry that maps this code
196 */
197	bl	1f
1981:	mflr	%r3
199	bl	tlb1_find_current	/* the entry found is returned in r29 */
200
201	bl	tlb1_inval_all_but_current
202
203/*
204 * Create temporary mapping in AS=1 and switch to it
205 */
206	bl	tlb1_temp_mapping_as1
207
208	mfmsr	%r3
209	ori	%r3, %r3, (PSL_IS | PSL_DS)
210	bl	2f
2112:	mflr	%r4
212	addi	%r4, %r4, (3f - 2b)
213	mtspr	SPR_SRR0, %r4
214	mtspr	SPR_SRR1, %r3
215	rfi				/* Switch context */
216
217/*
218 * Invalidate initial entry
219 */
2203:
221	mr	%r3, %r29
222	bl	tlb1_inval_entry
223
224/*
225 * Setup final mapping in TLB1[1] and switch to it
226 */
227	/* Final kernel mapping, map in 64 MB of RAM */
228	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
229	li	%r4, 0			/* Entry 0 */
230	rlwimi	%r3, %r4, 16, 10, 15
231	mtspr	SPR_MAS0, %r3
232	isync
233
234	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
235	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
236	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
237	isync
238
239	LOAD_ADDR(%r3, KERNBASE)
240	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
241	mtspr	SPR_MAS2, %r3
242	isync
243
244	/* Discover phys load address */
245	bl	3f
2463:	mflr	%r4			/* Use current address */
247	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
248	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
249	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
250	isync
251	bl	zero_mas7
252	bl	zero_mas8
253	tlbwe
254	isync
255	msync
256
257	/* Switch to the above TLB1[1] mapping */
258	bl	4f
2594:	mflr	%r4
260#ifdef __powerpc64__
261	clrldi	%r4, %r4, 38
262	clrrdi	%r3, %r3, 12
263#else
264	rlwinm	%r4, %r4, 0, 6, 31	/* Current offset from kernel load address */
265	rlwinm	%r3, %r3, 0, 0, 19
266#endif
267	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
268	addi	%r4, %r4, (5f - 4b)
269	li	%r3, PSL_DE		/* Note AS=0 */
270#ifdef __powerpc64__
271	oris	%r3, %r3, PSL_CM@h
272#endif
273	mtspr   SPR_SRR0, %r4
274	mtspr   SPR_SRR1, %r3
275	rfi
276
277/*
278 * Invalidate temp mapping
279 */
2805:
281	mr	%r3, %r28
282	bl	tlb1_inval_entry
283
284done_mapping:
285
286#ifdef __powerpc64__
287	/* Set up the TOC pointer */
288	b	0f
289	.align 3
2900:	nop
291	bl	1f
292	.llong	__tocbase + 0x8000 - .
2931:	mflr	%r2
294	ld	%r1,0(%r2)
295	add	%r2,%r1,%r2
296	mtspr	SPR_SPRG8, %r2
297
298	/* Get load offset */
299	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
300	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
301
302	/* Set up the stack pointer */
303	ld	%r1,TOC_REF(tmpstack)(%r2)
304	addi	%r1,%r1,TMPSTACKSZ-96
305	add	%r1,%r1,%r31
306	bl	1f
307	.llong _DYNAMIC-.
3081:	mflr	%r3
309	ld	%r4,0(%r3)
310	add	%r3,%r4,%r3
311	mr	%r4,%r31
312#else
313/*
314 * Setup a temporary stack
315 */
316	bl	1f
317	.long tmpstack-.
3181:	mflr	%r1
319	lwz	%r2,0(%r1)
320	add	%r1,%r1,%r2
321	addi	%r1, %r1, (TMPSTACKSZ - 16)
322
323/*
324 * Relocate kernel
325 */
326	bl      1f
327	.long   _DYNAMIC-.
328	.long   _GLOBAL_OFFSET_TABLE_-.
3291:	mflr    %r5
330	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
331	add	%r3,%r3,%r5
332	lwz	%r4,4(%r5)	/* GOT pointer */
333	add	%r4,%r4,%r5
334	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
335	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
336#endif
337	bl	CNAME(elf_reloc_self)
338	TOC_RESTORE
339
340/*
341 * Initialise exception vector offsets
342 */
343	bl	CNAME(ivor_setup)
344	TOC_RESTORE
345
346/*
347 * Set up arguments and jump to system initialization code
348 */
349	mr	%r3, %r30
350	mr	%r4, %r31
351
352	/* Prepare core */
353	bl	CNAME(booke_init)
354	TOC_RESTORE
355
356	/* Switch to thread0.td_kstack now */
357	mr	%r1, %r3
358	li	%r3, 0
359	STORE	%r3, 0(%r1)
360
361	/* Machine independet part, does not return */
362	bl	CNAME(mi_startup)
363	TOC_RESTORE
364	/* NOT REACHED */
3655:	b	5b
366
367
368#ifdef SMP
369/************************************************************************/
370/* AP Boot page */
371/************************************************************************/
372	.text
373	.globl	__boot_page
374	.align	12
375__boot_page:
376	bl	1f
377
378	.globl	bp_trace
379bp_trace:
380	.long	0
381
382	.globl	bp_kernload
383bp_kernload:
384	.long	0
385
386/*
387 * Initial configuration
388 */
3891:
390	mflr    %r31		/* r31 hold the address of bp_trace */
391
392	/* Set HIDs */
393	mfpvr	%r3
394	rlwinm	%r3, %r3, 16, 16, 31
395
396	/* HID0 for E500 is default */
397	lis	%r4, HID0_E500_DEFAULT_SET@h
398	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
399
400	cmpli	0, 0, %r3, FSL_E500mc
401	bne	2f
402	lis	%r4, HID0_E500MC_DEFAULT_SET@h
403	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
404	b	3f
4052:
406	cmpli	0, 0, %r3, FSL_E5500
407	bne	3f
408	lis	%r4, HID0_E5500_DEFAULT_SET@h
409	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
4103:
411	mtspr	SPR_HID0, %r4
412	isync
413
414	/* Enable branch prediction */
415	li	%r3, BUCSR_BPEN
416	mtspr	SPR_BUCSR, %r3
417	isync
418
419	/* Invalidate all entries in TLB0 */
420	li	%r3, 0
421	bl	tlb_inval_all
422
423/*
424 * Find TLB1 entry which is translating us now
425 */
426	bl	2f
4272:	mflr	%r3
428	bl	tlb1_find_current	/* the entry number found is in r29 */
429
430	bl	tlb1_inval_all_but_current
431
432/*
433 * Create temporary translation in AS=1 and switch to it
434 */
435
436	bl	tlb1_temp_mapping_as1
437
438	mfmsr	%r3
439	ori	%r3, %r3, (PSL_IS | PSL_DS)
440#ifdef __powerpc64__
441	oris	%r3, %r3, PSL_CM@h
442#endif
443	bl	3f
4443:	mflr	%r4
445	addi	%r4, %r4, (4f - 3b)
446	mtspr	SPR_SRR0, %r4
447	mtspr	SPR_SRR1, %r3
448	rfi				/* Switch context */
449
450/*
451 * Invalidate initial entry
452 */
4534:
454	mr	%r3, %r29
455	bl	tlb1_inval_entry
456
457/*
458 * Setup final mapping in TLB1[1] and switch to it
459 */
460	/* Final kernel mapping, map in 64 MB of RAM */
461	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
462	li	%r4, 0			/* Entry 0 */
463	rlwimi	%r3, %r4, 16, 4, 15
464	mtspr	SPR_MAS0, %r3
465	isync
466
467	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
468	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
469	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
470	isync
471
472	LOAD_ADDR(%r3, KERNBASE)
473	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
474	mtspr	SPR_MAS2, %r3
475	isync
476
477	/* Retrieve kernel load [physical] address from bp_kernload */
478#ifdef __powerpc64__
479	b	0f
480	.align	3
4810:
482	nop
483#endif
484	bl 5f
485	ADDR(bp_kernload)
486	ADDR(__boot_page)
4875:	mflr	%r3
488#ifdef __powerpc64__
489	ld	%r4, 0(%r3)
490	ld	%r5, 8(%r3)
491	clrrdi	%r3, %r3, 12
492#else
493	lwz	%r4, 0(%r3)
494	lwz	%r5, 4(%r3)
495	rlwinm	%r3, %r3, 0, 0, 19
496#endif
497	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
498	lwzx	%r3, %r4, %r3
499
500	/* Set RPN and protection */
501	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
502	mtspr	SPR_MAS3, %r3
503	isync
504	bl	zero_mas7
505	bl	zero_mas8
506	tlbwe
507	isync
508	msync
509
510	/* Switch to the final mapping */
511	bl	6f
5126:	mflr	%r3
513	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
514	add	%r3, %r3, %r5		/* Make this virtual address */
515	addi	%r3, %r3, (7f - 6b)
516#ifdef __powerpc64__
517	lis	%r4, PSL_CM@h		/* Note AS=0 */
518#else
519	li	%r4, 0			/* Note AS=0 */
520#endif
521	mtspr	SPR_SRR0, %r3
522	mtspr	SPR_SRR1, %r4
523	rfi
5247:
525
526/*
527 * At this point we're running at virtual addresses KERNBASE and beyond so
528 * it's allowed to directly access all locations the kernel was linked
529 * against.
530 */
531
532/*
533 * Invalidate temp mapping
534 */
535	mr	%r3, %r28
536	bl	tlb1_inval_entry
537
538#ifdef __powerpc64__
539	/* Set up the TOC pointer */
540	b	0f
541	.align 3
5420:	nop
543	bl	1f
544	.llong	__tocbase + 0x8000 - .
5451:	mflr	%r2
546	ld	%r1,0(%r2)
547	add	%r2,%r1,%r2
548	mtspr	SPR_SPRG8, %r2
549
550	/* Get load offset */
551	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
552	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
553
554	/* Set up the stack pointer */
555	ld	%r1,TOC_REF(tmpstack)(%r2)
556	addi	%r1,%r1,TMPSTACKSZ-96
557	add	%r1,%r1,%r31
558#else
559/*
560 * Setup a temporary stack
561 */
562	bl	1f
563	.long tmpstack-.
5641:	mflr	%r1
565	lwz	%r2,0(%r1)
566	add	%r1,%r1,%r2
567	stw	%r1, 0(%r1)
568	addi	%r1, %r1, (TMPSTACKSZ - 16)
569#endif
570
571/*
572 * Initialise exception vector offsets
573 */
574	bl	CNAME(ivor_setup)
575	TOC_RESTORE
576
577	/*
578	 * Assign our pcpu instance
579	 */
580	bl	1f
581	.long ap_pcpu-.
5821:	mflr	%r4
583	lwz	%r3, 0(%r4)
584	add	%r3, %r3, %r4
585	LOAD	%r3, 0(%r3)
586	mtsprg0	%r3
587
588	bl	CNAME(pmap_bootstrap_ap)
589	TOC_RESTORE
590
591	bl	CNAME(cpudep_ap_bootstrap)
592	TOC_RESTORE
593	/* Switch to the idle thread's kstack */
594	mr	%r1, %r3
595
596	bl	CNAME(machdep_ap_bootstrap)
597	TOC_RESTORE
598
599	/* NOT REACHED */
6006:	b	6b
601#endif /* SMP */
602
603#if defined (BOOKE_E500)
604/*
605 * Invalidate all entries in the given TLB.
606 *
607 * r3	TLBSEL
608 */
609tlb_inval_all:
610	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
611	ori	%r3, %r3, (1 << 2)	/* INVALL */
612	tlbivax	0, %r3
613	isync
614	msync
615
616	tlbsync
617	msync
618	blr
619
620/*
621 * expects address to look up in r3, returns entry number in r29
622 *
623 * FIXME: the hidden assumption is we are now running in AS=0, but we should
624 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
625 */
626tlb1_find_current:
627	mfspr	%r17, SPR_PID0
628	slwi	%r17, %r17, MAS6_SPID0_SHIFT
629	mtspr	SPR_MAS6, %r17
630	isync
631	tlbsx	0, %r3
632	mfspr	%r17, SPR_MAS0
633	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
634
635	/* Make sure we have IPROT set on the entry */
636	mfspr	%r17, SPR_MAS1
637	oris	%r17, %r17, MAS1_IPROT@h
638	mtspr	SPR_MAS1, %r17
639	isync
640	tlbwe
641	isync
642	msync
643	blr
644
645/*
646 * Invalidates a single entry in TLB1.
647 *
648 * r3		ESEL
649 * r4-r5	scratched
650 */
651tlb1_inval_entry:
652	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
653	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
654	mtspr	SPR_MAS0, %r4
655	isync
656	tlbre
657	li	%r5, 0			/* MAS1[V] = 0 */
658	mtspr	SPR_MAS1, %r5
659	isync
660	tlbwe
661	isync
662	msync
663	blr
664
665/*
666 * r29		current entry number
667 * r28		returned temp entry
668 * r3-r5	scratched
669 */
670tlb1_temp_mapping_as1:
671	/* Read our current translation */
672	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
673	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
674	mtspr	SPR_MAS0, %r3
675	isync
676	tlbre
677
678	/*
679	 * Prepare and write temp entry
680	 *
681	 * FIXME this is not robust against overflow i.e. when the current
682	 * entry is the last in TLB1
683	 */
684	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
685	addi	%r28, %r29, 1		/* Use next entry. */
686	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
687	mtspr	SPR_MAS0, %r3
688	isync
689	mfspr	%r5, SPR_MAS1
690	li	%r4, 1			/* AS=1 */
691	rlwimi	%r5, %r4, 12, 19, 19
692	li	%r4, 0			/* Global mapping, TID=0 */
693	rlwimi	%r5, %r4, 16, 8, 15
694	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
695	mtspr	SPR_MAS1, %r5
696	isync
697	mflr	%r3
698	bl	zero_mas7
699	bl	zero_mas8
700	mtlr	%r3
701	tlbwe
702	isync
703	msync
704	blr
705
706/*
707 * Loops over TLB1, invalidates all entries skipping the one which currently
708 * maps this code.
709 *
710 * r29		current entry
711 * r3-r5	scratched
712 */
713tlb1_inval_all_but_current:
714	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
715	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
716	li	%r4, 0			/* Start from Entry 0 */
7171:	lis	%r5, MAS0_TLBSEL1@h
718	rlwimi	%r5, %r4, 16, 10, 15
719	mtspr	SPR_MAS0, %r5
720	isync
721	tlbre
722	mfspr	%r5, SPR_MAS1
723	cmpw	%r4, %r29		/* our current entry? */
724	beq	2f
725	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
726	mtspr	SPR_MAS1, %r5
727	isync
728	tlbwe
729	isync
730	msync
7312:	addi	%r4, %r4, 1
732	cmpw	%r4, %r3		/* Check if this is the last entry */
733	bne	1b
734	blr
735
736/*
737 * MAS7 and MAS8 conditional zeroing.
738 */
739.globl zero_mas7
740zero_mas7:
741	mfpvr	%r20
742	rlwinm	%r20, %r20, 16, 16, 31
743	cmpli	0, 0, %r20, FSL_E500v1
744	beq	1f
745
746	li	%r20, 0
747	mtspr	SPR_MAS7, %r20
748	isync
7491:
750	blr
751
752.globl zero_mas8
753zero_mas8:
754	mfpvr	%r20
755	rlwinm	%r20, %r20, 16, 16, 31
756	cmpli	0, 0, %r20, FSL_E500mc
757	beq	1f
758	cmpli	0, 0, %r20, FSL_E5500
759	beq	1f
760
761	blr
7621:
763	li	%r20, 0
764	mtspr	SPR_MAS8, %r20
765	isync
766	blr
767#endif
768
769#ifdef SMP
770.globl __boot_tlb1
771	/*
772	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
773	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
774	 * The BSP fills in the table in tlb_ap_prep() function. Next,
775	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
776	 */
777__boot_tlb1:
778	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
779
780__boot_page_padding:
781	/*
782	 * Boot page needs to be exactly 4K, with the last word of this page
783	 * acting as the reset vector, so we need to stuff the remainder.
784	 * Upon release from holdoff CPU fetches the last word of the boot
785	 * page.
786	 */
787	.space	4092 - (__boot_page_padding - __boot_page)
788	b	__boot_page
789#endif /* SMP */
790
791/************************************************************************/
792/* locore subroutines */
793/************************************************************************/
794
795/*
796 * Cache disable/enable/inval sequences according
797 * to section 2.16 of E500CORE RM.
798 */
799ENTRY(dcache_inval)
800	/* Invalidate d-cache */
801	mfspr	%r3, SPR_L1CSR0
802	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
803	msync
804	isync
805	mtspr	SPR_L1CSR0, %r3
806	isync
8071:	mfspr	%r3, SPR_L1CSR0
808	andi.	%r3, %r3, L1CSR0_DCFI
809	bne	1b
810	blr
811
812ENTRY(dcache_disable)
813	/* Disable d-cache */
814	mfspr	%r3, SPR_L1CSR0
815	li	%r4, L1CSR0_DCE@l
816	not	%r4, %r4
817	and	%r3, %r3, %r4
818	msync
819	isync
820	mtspr	SPR_L1CSR0, %r3
821	isync
822	blr
823
824ENTRY(dcache_enable)
825	/* Enable d-cache */
826	mfspr	%r3, SPR_L1CSR0
827	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
828	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
829	msync
830	isync
831	mtspr	SPR_L1CSR0, %r3
832	isync
833	blr
834
835ENTRY(icache_inval)
836	/* Invalidate i-cache */
837	mfspr	%r3, SPR_L1CSR1
838	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
839	isync
840	mtspr	SPR_L1CSR1, %r3
841	isync
8421:	mfspr	%r3, SPR_L1CSR1
843	andi.	%r3, %r3, L1CSR1_ICFI
844	bne	1b
845	blr
846
847ENTRY(icache_disable)
848	/* Disable i-cache */
849	mfspr	%r3, SPR_L1CSR1
850	li	%r4, L1CSR1_ICE@l
851	not	%r4, %r4
852	and	%r3, %r3, %r4
853	isync
854	mtspr	SPR_L1CSR1, %r3
855	isync
856	blr
857
858ENTRY(icache_enable)
859	/* Enable i-cache */
860	mfspr	%r3, SPR_L1CSR1
861	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
862	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
863	isync
864	mtspr	SPR_L1CSR1, %r3
865	isync
866	blr
867
868/*
869 * L2 cache disable/enable/inval sequences for E500mc.
870 */
871
872ENTRY(l2cache_inval)
873	mfspr	%r3, SPR_L2CSR0
874	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
875	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
876	isync
877	mtspr	SPR_L2CSR0, %r3
878	isync
8791:	mfspr   %r3, SPR_L2CSR0
880	andis.	%r3, %r3, L2CSR0_L2FI@h
881	bne	1b
882	blr
883
884ENTRY(l2cache_enable)
885	mfspr	%r3, SPR_L2CSR0
886	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
887	isync
888	mtspr	SPR_L2CSR0, %r3
889	isync
890	blr
891
892/*
893 * Branch predictor setup.
894 */
895ENTRY(bpred_enable)
896	mfspr	%r3, SPR_BUCSR
897	ori	%r3, %r3, BUCSR_BBFI
898	isync
899	mtspr	SPR_BUCSR, %r3
900	isync
901	ori	%r3, %r3, BUCSR_BPEN
902	isync
903	mtspr	SPR_BUCSR, %r3
904	isync
905	blr
906
907ENTRY(dataloss_erratum_access)
908	/* Lock two cache lines into I-Cache */
909	sync
910	mfspr	%r11, SPR_L1CSR1
911	rlwinm	%r11, %r11, 0, ~L1CSR1_ICUL
912	sync
913	isync
914	mtspr	SPR_L1CSR1, %r11
915	isync
916
917	lis	%r8, 2f@h
918	ori	%r8, %r8, 2f@l
919	icbtls	0, 0, %r8
920	addi	%r9, %r8, 64
921
922	sync
923	mfspr	%r11, SPR_L1CSR1
9243:	andi.	%r11, %r11, L1CSR1_ICUL
925	bne	3b
926
927	icbtls	0, 0, %r9
928
929	sync
930	mfspr	%r11, SPR_L1CSR1
9313:	andi.	%r11, %r11, L1CSR1_ICUL
932	bne	3b
933
934	b	2f
935	.align	6
936	/* Inside a locked cacheline, wait a while, write, then wait a while */
9372:	sync
938
939	mfspr	%r5, TBR_TBL
9404:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
941	mfspr	%r5, TBR_TBL
942	subf.	%r5, %r5, %r11
943	bgt	4b
944
945	stw	%r4, 0(%r3)
946
947	mfspr	%r5, TBR_TBL
9484:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
949	mfspr	%r5, TBR_TBL
950	subf.	%r5, %r5, %r11
951	bgt	4b
952
953	sync
954
955	/*
956	 * Fill out the rest of this cache line and the next with nops,
957	 * to ensure that nothing outside the locked area will be
958	 * fetched due to a branch.
959	 */
960	.rept 19
961	nop
962	.endr
963
964	icblc	0, 0, %r8
965	icblc	0, 0, %r9
966
967	blr
968
969/*
970 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
971 * created.
972 */
973ENTRY(get_spr)
974	mfspr	%r3, 0
975	blr
976
977/************************************************************************/
978/* Data section								*/
979/************************************************************************/
980	.data
981	.align 3
982GLOBAL(__startkernel)
983	ADDR(begin)
984GLOBAL(__endkernel)
985	ADDR(end)
986	.align	4
987tmpstack:
988	.space	TMPSTACKSZ
989tmpstackbound:
990	.space 10240	/* XXX: this really should not be necessary */
991#ifdef __powerpc64__
992TOC_ENTRY(tmpstack)
993TOC_ENTRY(bp_kernload)
994#endif
995
996/*
997 * Compiled KERNBASE locations
998 */
999	.globl	kernbase
1000	.set	kernbase, KERNBASE
1001
1002#include <powerpc/booke/trap_subr.S>
1003