xref: /freebsd/sys/powerpc/booke/locore.S (revision 63d1fd5970ec814904aa0f4580b10a0d302d08b2)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44	.text
45	.globl	btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52	.globl	kernel_text
53kernel_text:
54
55/*
56 * Startup entry.  Note, this must be the first thing in the text segment!
57 */
58	.text
59	.globl	__start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 *  - System memory starts from physical address 0
65 *  - It's mapped by a single TLB1 entry
66 *  - TLB1 mapping is 1:1 pa to va
67 *  - Kernel is loaded at 64MB boundary
68 *  - All PID registers are set to the same value
69 *  - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 *	r1	: stack pointer
73 *	r3	: metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 *  - Find TLB1 entry we started in
77 *  - Make sure it's protected, invalidate other entries
78 *  - Create temp entry in the second AS (make sure it's not TLB[1])
79 *  - Switch to temp mapping
80 *  - Map 64MB of RAM in TLB1[1]
81 *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 *  - Switch to to TLB1[1] mapping
83 *  - Invalidate temp mapping
84 *
85 * locore registers use:
86 *	r1	: stack pointer
87 *	r2	: trace pointer (AP only, for early diagnostics)
88 *	r3-r27	: scratch registers
89 *	r28	: temp TLB1 entry
90 *	r29	: initial TLB1 entry we started in
91 *	r30-r31	: arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97	mr	%r30, %r3
98	mr	%r31, %r4
99
100/*
101 * Initial cleanup
102 */
103	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
104	mtmsr	%r3
105	isync
106
107/*
108 * Initial HIDs configuration
109 */
1101:
111	mfpvr	%r3
112	rlwinm	%r3, %r3, 16, 16, 31
113
114	lis	%r4, HID0_E500_DEFAULT_SET@h
115	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
116
117	/* Check for e500mc and e5500 */
118	cmpli	0, 0, %r3, FSL_E500mc
119	bne	2f
120
121	lis	%r4, HID0_E500MC_DEFAULT_SET@h
122	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
123	b	3f
1242:
125	cmpli	0, 0, %r3, FSL_E5500
126	bne	3f
127
128	lis	%r4, HID0_E5500_DEFAULT_SET@h
129	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
130
1313:
132	mtspr	SPR_HID0, %r4
133	isync
134
135/*
136 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
137 * this core.
138 */
139	cmpli	0, 0, %r3, FSL_E500mc
140	beq	1f
141	cmpli	0, 0, %r3, FSL_E5500
142	beq	1f
143	cmpli	0, 0, %r3, FSL_E6500
144	beq	1f
145
146	lis	%r3, HID1_E500_DEFAULT_SET@h
147	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
148	mtspr	SPR_HID1, %r3
149	isync
1501:
151	/* Invalidate all entries in TLB0 */
152	li	%r3, 0
153	bl	tlb_inval_all
154
155	cmpwi	%r30, 0
156	beq	done_mapping
157
158/*
159 * Locate the TLB1 entry that maps this code
160 */
161	bl	1f
1621:	mflr	%r3
163	bl	tlb1_find_current	/* the entry found is returned in r29 */
164
165	bl	tlb1_inval_all_but_current
166
167/*
168 * Create temporary mapping in AS=1 and switch to it
169 */
170	bl	tlb1_temp_mapping_as1
171
172	mfmsr	%r3
173	ori	%r3, %r3, (PSL_IS | PSL_DS)
174	bl	2f
1752:	mflr	%r4
176	addi	%r4, %r4, (3f - 2b)
177	mtspr	SPR_SRR0, %r4
178	mtspr	SPR_SRR1, %r3
179	rfi				/* Switch context */
180
181/*
182 * Invalidate initial entry
183 */
1843:
185	mr	%r3, %r29
186	bl	tlb1_inval_entry
187
188/*
189 * Setup final mapping in TLB1[1] and switch to it
190 */
191	/* Final kernel mapping, map in 64 MB of RAM */
192	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
193	li	%r4, 0			/* Entry 0 */
194	rlwimi	%r3, %r4, 16, 10, 15
195	mtspr	SPR_MAS0, %r3
196	isync
197
198	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
199	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
200	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
201	isync
202
203	lis	%r3, KERNBASE@h
204	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
205#ifdef SMP
206	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
207#endif
208	mtspr	SPR_MAS2, %r3
209	isync
210
211	/* Discover phys load address */
212	bl	3f
2133:	mflr	%r4			/* Use current address */
214	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
215	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
216	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
217	isync
218	bl	zero_mas7
219	bl	zero_mas8
220	tlbwe
221	isync
222	msync
223
224	/* Switch to the above TLB1[1] mapping */
225	bl	4f
2264:	mflr	%r4
227	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
228	rlwinm	%r3, %r3, 0, 0, 19
229	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
230	addi	%r4, %r4, (5f - 4b)
231	li	%r3, PSL_DE		/* Note AS=0 */
232	mtspr   SPR_SRR0, %r4
233	mtspr   SPR_SRR1, %r3
234	rfi
235
236/*
237 * Invalidate temp mapping
238 */
2395:
240	mr	%r3, %r28
241	bl	tlb1_inval_entry
242
243done_mapping:
244
245/*
246 * Setup a temporary stack
247 */
248	bl	1f
249	.long tmpstack-.
2501:	mflr	%r1
251	lwz	%r2,0(%r1)
252	add	%r1,%r1,%r2
253	addi	%r1, %r1, (TMPSTACKSZ - 16)
254
255/*
256 * Relocate kernel
257 */
258	bl      1f
259	.long   _DYNAMIC-.
260	.long   _GLOBAL_OFFSET_TABLE_-.
2611:	mflr    %r5
262	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
263	add	%r3,%r3,%r5
264	lwz	%r4,4(%r5)	/* GOT pointer */
265	add	%r4,%r4,%r5
266	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
267	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
268	bl	elf_reloc_self
269
270/*
271 * Initialise exception vector offsets
272 */
273	bl	ivor_setup
274
275/*
276 * Set up arguments and jump to system initialization code
277 */
278	mr	%r3, %r30
279	mr	%r4, %r31
280
281	/* Prepare core */
282	bl	booke_init
283
284	/* Switch to thread0.td_kstack now */
285	mr	%r1, %r3
286	li	%r3, 0
287	stw	%r3, 0(%r1)
288
289	/* Machine independet part, does not return */
290	bl	mi_startup
291	/* NOT REACHED */
2925:	b	5b
293
294
295#ifdef SMP
296/************************************************************************/
297/* AP Boot page */
298/************************************************************************/
299	.text
300	.globl	__boot_page
301	.align	12
302__boot_page:
303	bl	1f
304
305	.globl	bp_trace
306bp_trace:
307	.long	0
308
309	.globl	bp_kernload
310bp_kernload:
311	.long	0
312
313/*
314 * Initial configuration
315 */
3161:
317	mflr    %r31		/* r31 hold the address of bp_trace */
318
319	/* Set HIDs */
320	mfpvr	%r3
321	rlwinm	%r3, %r3, 16, 16, 31
322
323	/* HID0 for E500 is default */
324	lis	%r4, HID0_E500_DEFAULT_SET@h
325	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
326
327	cmpli	0, 0, %r3, FSL_E500mc
328	bne	2f
329	lis	%r4, HID0_E500MC_DEFAULT_SET@h
330	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
331	b	3f
3322:
333	cmpli	0, 0, %r3, FSL_E5500
334	bne	3f
335	lis	%r4, HID0_E5500_DEFAULT_SET@h
336	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
3373:
338	mtspr	SPR_HID0, %r4
339	isync
340
341	/* Enable branch prediction */
342	li	%r3, BUCSR_BPEN
343	mtspr	SPR_BUCSR, %r3
344	isync
345
346	/* Invalidate all entries in TLB0 */
347	li	%r3, 0
348	bl	tlb_inval_all
349
350/*
351 * Find TLB1 entry which is translating us now
352 */
353	bl	2f
3542:	mflr	%r3
355	bl	tlb1_find_current	/* the entry number found is in r29 */
356
357	bl	tlb1_inval_all_but_current
358
359/*
360 * Create temporary translation in AS=1 and switch to it
361 */
362
363	bl	tlb1_temp_mapping_as1
364
365	mfmsr	%r3
366	ori	%r3, %r3, (PSL_IS | PSL_DS)
367	bl	3f
3683:	mflr	%r4
369	addi	%r4, %r4, (4f - 3b)
370	mtspr	SPR_SRR0, %r4
371	mtspr	SPR_SRR1, %r3
372	rfi				/* Switch context */
373
374/*
375 * Invalidate initial entry
376 */
3774:
378	mr	%r3, %r29
379	bl	tlb1_inval_entry
380
381/*
382 * Setup final mapping in TLB1[1] and switch to it
383 */
384	/* Final kernel mapping, map in 64 MB of RAM */
385	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
386	li	%r4, 0			/* Entry 0 */
387	rlwimi	%r3, %r4, 16, 4, 15
388	mtspr	SPR_MAS0, %r3
389	isync
390
391	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
392	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
393	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
394	isync
395
396	lis	%r3, KERNBASE@h
397	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
398	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
399	mtspr	SPR_MAS2, %r3
400	isync
401
402	/* Retrieve kernel load [physical] address from bp_kernload */
403	bl	5f
404	.long	bp_kernload
405	.long	__boot_page
4065:	mflr	%r3
407	lwz	%r4, 0(%r3)
408	lwz	%r5, 4(%r3)
409	rlwinm	%r3, %r3, 0, 0, 19
410	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
411	lwzx	%r3, %r4, %r3
412
413	/* Set RPN and protection */
414	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
415	mtspr	SPR_MAS3, %r3
416	isync
417	bl	zero_mas7
418	bl	zero_mas8
419	tlbwe
420	isync
421	msync
422
423	/* Switch to the final mapping */
424	bl	6f
4256:	mflr	%r3
426	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
427	add	%r3, %r3, %r5		/* Make this virtual address */
428	addi	%r3, %r3, (7f - 6b)
429	li	%r4, 0			/* Note AS=0 */
430	mtspr	SPR_SRR0, %r3
431	mtspr	SPR_SRR1, %r4
432	rfi
4337:
434
435/*
436 * At this point we're running at virtual addresses KERNBASE and beyond so
437 * it's allowed to directly access all locations the kernel was linked
438 * against.
439 */
440
441/*
442 * Invalidate temp mapping
443 */
444	mr	%r3, %r28
445	bl	tlb1_inval_entry
446
447/*
448 * Setup a temporary stack
449 */
450	bl	1f
451	.long tmpstack-.
4521:	mflr	%r1
453	lwz	%r2,0(%r1)
454	add	%r1,%r1,%r2
455	stw	%r1, 0(%r1)
456	addi	%r1, %r1, (TMPSTACKSZ - 16)
457
458/*
459 * Initialise exception vector offsets
460 */
461	bl	ivor_setup
462
463	/*
464	 * Assign our pcpu instance
465	 */
466	bl	1f
467	.long ap_pcpu-.
4681:	mflr	%r4
469	lwz	%r3, 0(%r4)
470	add	%r3, %r3, %r4
471	lwz	%r3, 0(%r3)
472	mtsprg0	%r3
473
474	bl	pmap_bootstrap_ap
475
476	bl	cpudep_ap_bootstrap
477	/* Switch to the idle thread's kstack */
478	mr	%r1, %r3
479
480	bl	machdep_ap_bootstrap
481
482	/* NOT REACHED */
4836:	b	6b
484#endif /* SMP */
485
486#if defined (BOOKE_E500)
487/*
488 * Invalidate all entries in the given TLB.
489 *
490 * r3	TLBSEL
491 */
492tlb_inval_all:
493	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
494	ori	%r3, %r3, (1 << 2)	/* INVALL */
495	tlbivax	0, %r3
496	isync
497	msync
498
499	tlbsync
500	msync
501	blr
502
503/*
504 * expects address to look up in r3, returns entry number in r29
505 *
506 * FIXME: the hidden assumption is we are now running in AS=0, but we should
507 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
508 */
509tlb1_find_current:
510	mfspr	%r17, SPR_PID0
511	slwi	%r17, %r17, MAS6_SPID0_SHIFT
512	mtspr	SPR_MAS6, %r17
513	isync
514	tlbsx	0, %r3
515	mfspr	%r17, SPR_MAS0
516	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
517
518	/* Make sure we have IPROT set on the entry */
519	mfspr	%r17, SPR_MAS1
520	oris	%r17, %r17, MAS1_IPROT@h
521	mtspr	SPR_MAS1, %r17
522	isync
523	tlbwe
524	isync
525	msync
526	blr
527
528/*
529 * Invalidates a single entry in TLB1.
530 *
531 * r3		ESEL
532 * r4-r5	scratched
533 */
534tlb1_inval_entry:
535	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
536	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
537	mtspr	SPR_MAS0, %r4
538	isync
539	tlbre
540	li	%r5, 0			/* MAS1[V] = 0 */
541	mtspr	SPR_MAS1, %r5
542	isync
543	tlbwe
544	isync
545	msync
546	blr
547
548/*
549 * r29		current entry number
550 * r28		returned temp entry
551 * r3-r5	scratched
552 */
553tlb1_temp_mapping_as1:
554	/* Read our current translation */
555	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
556	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
557	mtspr	SPR_MAS0, %r3
558	isync
559	tlbre
560
561	/*
562	 * Prepare and write temp entry
563	 *
564	 * FIXME this is not robust against overflow i.e. when the current
565	 * entry is the last in TLB1
566	 */
567	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
568	addi	%r28, %r29, 1		/* Use next entry. */
569	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
570	mtspr	SPR_MAS0, %r3
571	isync
572	mfspr	%r5, SPR_MAS1
573	li	%r4, 1			/* AS=1 */
574	rlwimi	%r5, %r4, 12, 19, 19
575	li	%r4, 0			/* Global mapping, TID=0 */
576	rlwimi	%r5, %r4, 16, 8, 15
577	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
578	mtspr	SPR_MAS1, %r5
579	isync
580	mflr	%r3
581	bl	zero_mas7
582	bl	zero_mas8
583	mtlr	%r3
584	tlbwe
585	isync
586	msync
587	blr
588
589/*
590 * Loops over TLB1, invalidates all entries skipping the one which currently
591 * maps this code.
592 *
593 * r29		current entry
594 * r3-r5	scratched
595 */
596tlb1_inval_all_but_current:
597	mr	%r6, %r3
598	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
599	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
600	li	%r4, 0			/* Start from Entry 0 */
6011:	lis	%r5, MAS0_TLBSEL1@h
602	rlwimi	%r5, %r4, 16, 10, 15
603	mtspr	SPR_MAS0, %r5
604	isync
605	tlbre
606	mfspr	%r5, SPR_MAS1
607	cmpw	%r4, %r29		/* our current entry? */
608	beq	2f
609	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
610	mtspr	SPR_MAS1, %r5
611	isync
612	tlbwe
613	isync
614	msync
6152:	addi	%r4, %r4, 1
616	cmpw	%r4, %r3		/* Check if this is the last entry */
617	bne	1b
618	blr
619
620/*
621 * MAS7 and MAS8 conditional zeroing.
622 */
623.globl zero_mas7
624zero_mas7:
625	mfpvr	%r20
626	rlwinm	%r20, %r20, 16, 16, 31
627	cmpli	0, 0, %r20, FSL_E500v1
628	beq	1f
629
630	li	%r20, 0
631	mtspr	SPR_MAS7, %r20
632	isync
6331:
634	blr
635
636.globl zero_mas8
637zero_mas8:
638	mfpvr	%r20
639	rlwinm	%r20, %r20, 16, 16, 31
640	cmpli	0, 0, %r20, FSL_E500mc
641	beq	1f
642	cmpli	0, 0, %r20, FSL_E5500
643	beq	1f
644
645	blr
6461:
647	li	%r20, 0
648	mtspr	SPR_MAS8, %r20
649	isync
650	blr
651#endif
652
653#ifdef SMP
654.globl __boot_tlb1
655	/*
656	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
657	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
658	 * The BSP fills in the table in tlb_ap_prep() function. Next,
659	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
660	 */
661__boot_tlb1:
662	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
663
664__boot_page_padding:
665	/*
666	 * Boot page needs to be exactly 4K, with the last word of this page
667	 * acting as the reset vector, so we need to stuff the remainder.
668	 * Upon release from holdoff CPU fetches the last word of the boot
669	 * page.
670	 */
671	.space	4092 - (__boot_page_padding - __boot_page)
672	b	__boot_page
673#endif /* SMP */
674
675/************************************************************************/
676/* locore subroutines */
677/************************************************************************/
678
679/*
680 * Cache disable/enable/inval sequences according
681 * to section 2.16 of E500CORE RM.
682 */
683ENTRY(dcache_inval)
684	/* Invalidate d-cache */
685	mfspr	%r3, SPR_L1CSR0
686	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
687	msync
688	isync
689	mtspr	SPR_L1CSR0, %r3
690	isync
6911:	mfspr	%r3, SPR_L1CSR0
692	andi.	%r3, %r3, L1CSR0_DCFI
693	bne	1b
694	blr
695
696ENTRY(dcache_disable)
697	/* Disable d-cache */
698	mfspr	%r3, SPR_L1CSR0
699	li	%r4, L1CSR0_DCE@l
700	not	%r4, %r4
701	and	%r3, %r3, %r4
702	msync
703	isync
704	mtspr	SPR_L1CSR0, %r3
705	isync
706	blr
707
708ENTRY(dcache_enable)
709	/* Enable d-cache */
710	mfspr	%r3, SPR_L1CSR0
711	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
712	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
713	msync
714	isync
715	mtspr	SPR_L1CSR0, %r3
716	isync
717	blr
718
719ENTRY(icache_inval)
720	/* Invalidate i-cache */
721	mfspr	%r3, SPR_L1CSR1
722	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
723	isync
724	mtspr	SPR_L1CSR1, %r3
725	isync
7261:	mfspr	%r3, SPR_L1CSR1
727	andi.	%r3, %r3, L1CSR1_ICFI
728	bne	1b
729	blr
730
731ENTRY(icache_disable)
732	/* Disable i-cache */
733	mfspr	%r3, SPR_L1CSR1
734	li	%r4, L1CSR1_ICE@l
735	not	%r4, %r4
736	and	%r3, %r3, %r4
737	isync
738	mtspr	SPR_L1CSR1, %r3
739	isync
740	blr
741
742ENTRY(icache_enable)
743	/* Enable i-cache */
744	mfspr	%r3, SPR_L1CSR1
745	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
746	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
747	isync
748	mtspr	SPR_L1CSR1, %r3
749	isync
750	blr
751
752/*
753 * L2 cache disable/enable/inval sequences for E500mc.
754 */
755
756ENTRY(l2cache_inval)
757	mfspr	%r3, SPR_L2CSR0
758	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
759	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
760	isync
761	mtspr	SPR_L2CSR0, %r3
762	isync
7631:	mfspr   %r3, SPR_L2CSR0
764	andis.	%r3, %r3, L2CSR0_L2FI@h
765	bne	1b
766	blr
767
768ENTRY(l2cache_enable)
769	mfspr	%r3, SPR_L2CSR0
770	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
771	isync
772	mtspr	SPR_L2CSR0, %r3
773	isync
774	blr
775
776/*
777 * Branch predictor setup.
778 */
779ENTRY(bpred_enable)
780	mfspr	%r3, SPR_BUCSR
781	ori	%r3, %r3, BUCSR_BBFI
782	isync
783	mtspr	SPR_BUCSR, %r3
784	isync
785	ori	%r3, %r3, BUCSR_BPEN
786	isync
787	mtspr	SPR_BUCSR, %r3
788	isync
789	blr
790
791ENTRY(dataloss_erratum_access)
792	/* Lock two cache lines into I-Cache */
793	sync
794	mfspr	%r11, SPR_L1CSR1
795	rlwinm	%r11, %r11, 0, ~L1CSR1_ICUL
796	sync
797	isync
798	mtspr	SPR_L1CSR1, %r11
799	isync
800
801	lis	%r8, 2f@h
802	ori	%r8, %r8, 2f@l
803	icbtls	0, 0, %r8
804	addi	%r9, %r8, 64
805
806	sync
807	mfspr	%r11, SPR_L1CSR1
8083:	andi.	%r11, %r11, L1CSR1_ICUL
809	bne	3b
810
811	icbtls	0, 0, %r9
812
813	sync
814	mfspr	%r11, SPR_L1CSR1
8153:	andi.	%r11, %r11, L1CSR1_ICUL
816	bne	3b
817
818	b	2f
819	.align	6
820	/* Inside a locked cacheline, wait a while, write, then wait a while */
8212:	sync
822
823	mfspr	%r5, TBR_TBL
8244:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
825	mfspr	%r5, TBR_TBL
826	subf.	%r5, %r5, %r11
827	bgt	4b
828
829	stw	%r4, 0(%r3)
830
831	mfspr	%r5, TBR_TBL
8324:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
833	mfspr	%r5, TBR_TBL
834	subf.	%r5, %r5, %r11
835	bgt	4b
836
837	sync
838
839	/*
840	 * Fill out the rest of this cache line and the next with nops,
841	 * to ensure that nothing outside the locked area will be
842	 * fetched due to a branch.
843	 */
844	.rept 19
845	nop
846	.endr
847
848	icblc	0, 0, %r8
849	icblc	0, 0, %r9
850
851	blr
852
853/*
854 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
855 * created.
856 */
857ENTRY(get_spr)
858	mfspr	%r3, 0
859	blr
860
861/************************************************************************/
862/* Data section								*/
863/************************************************************************/
864	.data
865	.align 3
866GLOBAL(__startkernel)
867	.long   begin
868GLOBAL(__endkernel)
869	.long   end
870	.align	4
871tmpstack:
872	.space	TMPSTACKSZ
873tmpstackbound:
874	.space 10240	/* XXX: this really should not be necessary */
875
876/*
877 * Compiled KERNBASE locations
878 */
879	.globl	kernbase
880	.set	kernbase, KERNBASE
881
882#include <powerpc/booke/trap_subr.S>
883