xref: /freebsd/sys/powerpc/booke/locore.S (revision eb9da1ada8b6b2c74378a5c17029ec5a7fb199e6)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44	.text
45	.globl	btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52	.globl	kernel_text
53kernel_text:
54
55/*
56 * Startup entry.  Note, this must be the first thing in the text segment!
57 */
58	.text
59	.globl	__start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 *  - System memory starts from physical address 0
65 *  - It's mapped by a single TLB1 entry
66 *  - TLB1 mapping is 1:1 pa to va
67 *  - Kernel is loaded at 64MB boundary
68 *  - All PID registers are set to the same value
69 *  - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 *	r1	: stack pointer
73 *	r3	: metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 *  - Find TLB1 entry we started in
77 *  - Make sure it's protected, invalidate other entries
78 *  - Create temp entry in the second AS (make sure it's not TLB[1])
79 *  - Switch to temp mapping
80 *  - Map 64MB of RAM in TLB1[1]
81 *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 *  - Switch to to TLB1[1] mapping
83 *  - Invalidate temp mapping
84 *
85 * locore registers use:
86 *	r1	: stack pointer
87 *	r2	: trace pointer (AP only, for early diagnostics)
88 *	r3-r27	: scratch registers
89 *	r28	: temp TLB1 entry
90 *	r29	: initial TLB1 entry we started in
91 *	r30-r31	: arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97	mr	%r30, %r3
98	mr	%r31, %r4
99
100/*
101 * Initial cleanup
102 */
103	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
104	mtmsr	%r3
105	isync
106
107/*
108 * Initial HIDs configuration
109 */
1101:
111	mfpvr	%r3
112	rlwinm	%r3, %r3, 16, 16, 31
113
114	lis	%r4, HID0_E500_DEFAULT_SET@h
115	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
116
117	/* Check for e500mc and e5500 */
118	cmpli	0, 0, %r3, FSL_E500mc
119	bne	2f
120
121	lis	%r4, HID0_E500MC_DEFAULT_SET@h
122	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
123	b	3f
1242:
125	cmpli	0, 0, %r3, FSL_E5500
126	bne	3f
127
128	lis	%r4, HID0_E5500_DEFAULT_SET@h
129	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
130
1313:
132	mtspr	SPR_HID0, %r4
133	isync
134
135/*
136 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
137 * this core.
138 */
139	cmpli	0, 0, %r3, FSL_E500mc
140	beq	1f
141	cmpli	0, 0, %r3, FSL_E5500
142	beq	1f
143
144	lis	%r3, HID1_E500_DEFAULT_SET@h
145	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
146	mtspr	SPR_HID1, %r3
147	isync
1481:
149	/* Invalidate all entries in TLB0 */
150	li	%r3, 0
151	bl	tlb_inval_all
152
153	cmpwi	%r30, 0
154	beq	done_mapping
155
156/*
157 * Locate the TLB1 entry that maps this code
158 */
159	bl	1f
1601:	mflr	%r3
161	bl	tlb1_find_current	/* the entry found is returned in r29 */
162
163	bl	tlb1_inval_all_but_current
164
165/*
166 * Create temporary mapping in AS=1 and switch to it
167 */
168	bl	tlb1_temp_mapping_as1
169
170	mfmsr	%r3
171	ori	%r3, %r3, (PSL_IS | PSL_DS)
172	bl	2f
1732:	mflr	%r4
174	addi	%r4, %r4, (3f - 2b)
175	mtspr	SPR_SRR0, %r4
176	mtspr	SPR_SRR1, %r3
177	rfi				/* Switch context */
178
179/*
180 * Invalidate initial entry
181 */
1823:
183	mr	%r3, %r29
184	bl	tlb1_inval_entry
185
186/*
187 * Setup final mapping in TLB1[1] and switch to it
188 */
189	/* Final kernel mapping, map in 64 MB of RAM */
190	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
191	li	%r4, 0			/* Entry 0 */
192	rlwimi	%r3, %r4, 16, 10, 15
193	mtspr	SPR_MAS0, %r3
194	isync
195
196	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
197	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
198	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
199	isync
200
201	lis	%r3, KERNBASE@h
202	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
203#ifdef SMP
204	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
205#endif
206	mtspr	SPR_MAS2, %r3
207	isync
208
209	/* Discover phys load address */
210	bl	3f
2113:	mflr	%r4			/* Use current address */
212	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
213	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
214	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
215	isync
216	bl	zero_mas7
217	bl	zero_mas8
218	tlbwe
219	isync
220	msync
221
222	/* Switch to the above TLB1[1] mapping */
223	bl	4f
2244:	mflr	%r4
225	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
226	rlwinm	%r3, %r3, 0, 0, 19
227	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
228	addi	%r4, %r4, (5f - 4b)
229	li	%r3, PSL_DE		/* Note AS=0 */
230	mtspr   SPR_SRR0, %r4
231	mtspr   SPR_SRR1, %r3
232	rfi
233
234/*
235 * Invalidate temp mapping
236 */
2375:
238	mr	%r3, %r28
239	bl	tlb1_inval_entry
240
241done_mapping:
242
243/*
244 * Setup a temporary stack
245 */
246	bl	1f
247	.long tmpstack-.
2481:	mflr	%r1
249	lwz	%r2,0(%r1)
250	add	%r1,%r1,%r2
251	addi	%r1, %r1, (TMPSTACKSZ - 16)
252
253/*
254 * Relocate kernel
255 */
256	bl      1f
257	.long   _DYNAMIC-.
258	.long   _GLOBAL_OFFSET_TABLE_-.
2591:	mflr    %r5
260	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
261	add	%r3,%r3,%r5
262	lwz	%r4,4(%r5)	/* GOT pointer */
263	add	%r4,%r4,%r5
264	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
265	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
266	bl	elf_reloc_self
267
268/*
269 * Initialise exception vector offsets
270 */
271	bl	ivor_setup
272
273/*
274 * Set up arguments and jump to system initialization code
275 */
276	mr	%r3, %r30
277	mr	%r4, %r31
278
279	/* Prepare core */
280	bl	booke_init
281
282	/* Switch to thread0.td_kstack now */
283	mr	%r1, %r3
284	li	%r3, 0
285	stw	%r3, 0(%r1)
286
287	/* Machine independet part, does not return */
288	bl	mi_startup
289	/* NOT REACHED */
2905:	b	5b
291
292
293#ifdef SMP
294/************************************************************************/
295/* AP Boot page */
296/************************************************************************/
297	.text
298	.globl	__boot_page
299	.align	12
300__boot_page:
301	bl	1f
302
303	.globl	bp_trace
304bp_trace:
305	.long	0
306
307	.globl	bp_kernload
308bp_kernload:
309	.long	0
310
311/*
312 * Initial configuration
313 */
3141:
315	mflr    %r31		/* r31 hold the address of bp_trace */
316
317	/* Set HIDs */
318	mfpvr	%r3
319	rlwinm	%r3, %r3, 16, 16, 31
320
321	/* HID0 for E500 is default */
322	lis	%r4, HID0_E500_DEFAULT_SET@h
323	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
324
325	cmpli	0, 0, %r3, FSL_E500mc
326	bne	2f
327	lis	%r4, HID0_E500MC_DEFAULT_SET@h
328	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
329	b	3f
3302:
331	cmpli	0, 0, %r3, FSL_E5500
332	bne	3f
333	lis	%r4, HID0_E5500_DEFAULT_SET@h
334	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
3353:
336	mtspr	SPR_HID0, %r4
337	isync
338
339	/* Enable branch prediction */
340	li	%r3, BUCSR_BPEN
341	mtspr	SPR_BUCSR, %r3
342	isync
343
344	/* Invalidate all entries in TLB0 */
345	li	%r3, 0
346	bl	tlb_inval_all
347
348/*
349 * Find TLB1 entry which is translating us now
350 */
351	bl	2f
3522:	mflr	%r3
353	bl	tlb1_find_current	/* the entry number found is in r29 */
354
355	bl	tlb1_inval_all_but_current
356
357/*
358 * Create temporary translation in AS=1 and switch to it
359 */
360
361	bl	tlb1_temp_mapping_as1
362
363	mfmsr	%r3
364	ori	%r3, %r3, (PSL_IS | PSL_DS)
365	bl	3f
3663:	mflr	%r4
367	addi	%r4, %r4, (4f - 3b)
368	mtspr	SPR_SRR0, %r4
369	mtspr	SPR_SRR1, %r3
370	rfi				/* Switch context */
371
372/*
373 * Invalidate initial entry
374 */
3754:
376	mr	%r3, %r29
377	bl	tlb1_inval_entry
378
379/*
380 * Setup final mapping in TLB1[1] and switch to it
381 */
382	/* Final kernel mapping, map in 64 MB of RAM */
383	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
384	li	%r4, 0			/* Entry 0 */
385	rlwimi	%r3, %r4, 16, 4, 15
386	mtspr	SPR_MAS0, %r3
387	isync
388
389	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
390	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
391	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
392	isync
393
394	lis	%r3, KERNBASE@h
395	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
396	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
397	mtspr	SPR_MAS2, %r3
398	isync
399
400	/* Retrieve kernel load [physical] address from bp_kernload */
401	bl	5f
402	.long	bp_kernload
403	.long	__boot_page
4045:	mflr	%r3
405	lwz	%r4, 0(%r3)
406	lwz	%r5, 4(%r3)
407	rlwinm	%r3, %r3, 0, 0, 19
408	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
409	lwzx	%r3, %r4, %r3
410
411	/* Set RPN and protection */
412	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
413	mtspr	SPR_MAS3, %r3
414	isync
415	bl	zero_mas7
416	bl	zero_mas8
417	tlbwe
418	isync
419	msync
420
421	/* Switch to the final mapping */
422	bl	6f
4236:	mflr	%r3
424	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
425	add	%r3, %r3, %r5		/* Make this virtual address */
426	addi	%r3, %r3, (7f - 6b)
427	li	%r4, 0			/* Note AS=0 */
428	mtspr	SPR_SRR0, %r3
429	mtspr	SPR_SRR1, %r4
430	rfi
4317:
432
433/*
434 * At this point we're running at virtual addresses KERNBASE and beyond so
435 * it's allowed to directly access all locations the kernel was linked
436 * against.
437 */
438
439/*
440 * Invalidate temp mapping
441 */
442	mr	%r3, %r28
443	bl	tlb1_inval_entry
444
445/*
446 * Setup a temporary stack
447 */
448	bl	1f
449	.long tmpstack-.
4501:	mflr	%r1
451	lwz	%r2,0(%r1)
452	add	%r1,%r1,%r2
453	stw	%r1, 0(%r1)
454	addi	%r1, %r1, (TMPSTACKSZ - 16)
455
456/*
457 * Initialise exception vector offsets
458 */
459	bl	ivor_setup
460
461	/*
462	 * Assign our pcpu instance
463	 */
464	bl	1f
465	.long ap_pcpu-.
4661:	mflr	%r4
467	lwz	%r3, 0(%r4)
468	add	%r3, %r3, %r4
469	lwz	%r3, 0(%r3)
470	mtsprg0	%r3
471
472	bl	pmap_bootstrap_ap
473
474	bl	cpudep_ap_bootstrap
475	/* Switch to the idle thread's kstack */
476	mr	%r1, %r3
477
478	bl	machdep_ap_bootstrap
479
480	/* NOT REACHED */
4816:	b	6b
482#endif /* SMP */
483
484#if defined (BOOKE_E500)
485/*
486 * Invalidate all entries in the given TLB.
487 *
488 * r3	TLBSEL
489 */
490tlb_inval_all:
491	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
492	ori	%r3, %r3, (1 << 2)	/* INVALL */
493	tlbivax	0, %r3
494	isync
495	msync
496
497	tlbsync
498	msync
499	blr
500
501/*
502 * expects address to look up in r3, returns entry number in r29
503 *
504 * FIXME: the hidden assumption is we are now running in AS=0, but we should
505 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
506 */
507tlb1_find_current:
508	mfspr	%r17, SPR_PID0
509	slwi	%r17, %r17, MAS6_SPID0_SHIFT
510	mtspr	SPR_MAS6, %r17
511	isync
512	tlbsx	0, %r3
513	mfspr	%r17, SPR_MAS0
514	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
515
516	/* Make sure we have IPROT set on the entry */
517	mfspr	%r17, SPR_MAS1
518	oris	%r17, %r17, MAS1_IPROT@h
519	mtspr	SPR_MAS1, %r17
520	isync
521	tlbwe
522	isync
523	msync
524	blr
525
526/*
527 * Invalidates a single entry in TLB1.
528 *
529 * r3		ESEL
530 * r4-r5	scratched
531 */
532tlb1_inval_entry:
533	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
534	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
535	mtspr	SPR_MAS0, %r4
536	isync
537	tlbre
538	li	%r5, 0			/* MAS1[V] = 0 */
539	mtspr	SPR_MAS1, %r5
540	isync
541	tlbwe
542	isync
543	msync
544	blr
545
546/*
547 * r29		current entry number
548 * r28		returned temp entry
549 * r3-r5	scratched
550 */
551tlb1_temp_mapping_as1:
552	/* Read our current translation */
553	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
554	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
555	mtspr	SPR_MAS0, %r3
556	isync
557	tlbre
558
559	/*
560	 * Prepare and write temp entry
561	 *
562	 * FIXME this is not robust against overflow i.e. when the current
563	 * entry is the last in TLB1
564	 */
565	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
566	addi	%r28, %r29, 1		/* Use next entry. */
567	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
568	mtspr	SPR_MAS0, %r3
569	isync
570	mfspr	%r5, SPR_MAS1
571	li	%r4, 1			/* AS=1 */
572	rlwimi	%r5, %r4, 12, 19, 19
573	li	%r4, 0			/* Global mapping, TID=0 */
574	rlwimi	%r5, %r4, 16, 8, 15
575	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
576	mtspr	SPR_MAS1, %r5
577	isync
578	mflr	%r3
579	bl	zero_mas7
580	bl	zero_mas8
581	mtlr	%r3
582	tlbwe
583	isync
584	msync
585	blr
586
587/*
588 * Loops over TLB1, invalidates all entries skipping the one which currently
589 * maps this code.
590 *
591 * r29		current entry
592 * r3-r5	scratched
593 */
594tlb1_inval_all_but_current:
595	mr	%r6, %r3
596	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
597	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
598	li	%r4, 0			/* Start from Entry 0 */
5991:	lis	%r5, MAS0_TLBSEL1@h
600	rlwimi	%r5, %r4, 16, 10, 15
601	mtspr	SPR_MAS0, %r5
602	isync
603	tlbre
604	mfspr	%r5, SPR_MAS1
605	cmpw	%r4, %r29		/* our current entry? */
606	beq	2f
607	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
608	mtspr	SPR_MAS1, %r5
609	isync
610	tlbwe
611	isync
612	msync
6132:	addi	%r4, %r4, 1
614	cmpw	%r4, %r3		/* Check if this is the last entry */
615	bne	1b
616	blr
617
618/*
619 * MAS7 and MAS8 conditional zeroing.
620 */
621.globl zero_mas7
622zero_mas7:
623	mfpvr	%r20
624	rlwinm	%r20, %r20, 16, 16, 31
625	cmpli	0, 0, %r20, FSL_E500v1
626	beq	1f
627
628	li	%r20, 0
629	mtspr	SPR_MAS7, %r20
630	isync
6311:
632	blr
633
634.globl zero_mas8
635zero_mas8:
636	mfpvr	%r20
637	rlwinm	%r20, %r20, 16, 16, 31
638	cmpli	0, 0, %r20, FSL_E500mc
639	beq	1f
640	cmpli	0, 0, %r20, FSL_E5500
641	beq	1f
642
643	blr
6441:
645	li	%r20, 0
646	mtspr	SPR_MAS8, %r20
647	isync
648	blr
649#endif
650
651#ifdef SMP
652.globl __boot_tlb1
653	/*
654	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
655	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
656	 * The BSP fills in the table in tlb_ap_prep() function. Next,
657	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
658	 */
659__boot_tlb1:
660	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
661
662__boot_page_padding:
663	/*
664	 * Boot page needs to be exactly 4K, with the last word of this page
665	 * acting as the reset vector, so we need to stuff the remainder.
666	 * Upon release from holdoff CPU fetches the last word of the boot
667	 * page.
668	 */
669	.space	4092 - (__boot_page_padding - __boot_page)
670	b	__boot_page
671#endif /* SMP */
672
673/************************************************************************/
674/* locore subroutines */
675/************************************************************************/
676
677/*
678 * Cache disable/enable/inval sequences according
679 * to section 2.16 of E500CORE RM.
680 */
681ENTRY(dcache_inval)
682	/* Invalidate d-cache */
683	mfspr	%r3, SPR_L1CSR0
684	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
685	msync
686	isync
687	mtspr	SPR_L1CSR0, %r3
688	isync
6891:	mfspr	%r3, SPR_L1CSR0
690	andi.	%r3, %r3, L1CSR0_DCFI
691	bne	1b
692	blr
693
694ENTRY(dcache_disable)
695	/* Disable d-cache */
696	mfspr	%r3, SPR_L1CSR0
697	li	%r4, L1CSR0_DCE@l
698	not	%r4, %r4
699	and	%r3, %r3, %r4
700	msync
701	isync
702	mtspr	SPR_L1CSR0, %r3
703	isync
704	blr
705
706ENTRY(dcache_enable)
707	/* Enable d-cache */
708	mfspr	%r3, SPR_L1CSR0
709	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
710	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
711	msync
712	isync
713	mtspr	SPR_L1CSR0, %r3
714	isync
715	blr
716
717ENTRY(icache_inval)
718	/* Invalidate i-cache */
719	mfspr	%r3, SPR_L1CSR1
720	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
721	isync
722	mtspr	SPR_L1CSR1, %r3
723	isync
7241:	mfspr	%r3, SPR_L1CSR1
725	andi.	%r3, %r3, L1CSR1_ICFI
726	bne	1b
727	blr
728
729ENTRY(icache_disable)
730	/* Disable i-cache */
731	mfspr	%r3, SPR_L1CSR1
732	li	%r4, L1CSR1_ICE@l
733	not	%r4, %r4
734	and	%r3, %r3, %r4
735	isync
736	mtspr	SPR_L1CSR1, %r3
737	isync
738	blr
739
740ENTRY(icache_enable)
741	/* Enable i-cache */
742	mfspr	%r3, SPR_L1CSR1
743	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
744	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
745	isync
746	mtspr	SPR_L1CSR1, %r3
747	isync
748	blr
749
750/*
751 * L2 cache disable/enable/inval sequences for E500mc.
752 */
753
754ENTRY(l2cache_inval)
755	mfspr	%r3, SPR_L2CSR0
756	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
757	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
758	isync
759	mtspr	SPR_L2CSR0, %r3
760	isync
7611:	mfspr   %r3, SPR_L2CSR0
762	andis.	%r3, %r3, L2CSR0_L2FI@h
763	bne	1b
764	blr
765
766ENTRY(l2cache_enable)
767	mfspr	%r3, SPR_L2CSR0
768	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
769	isync
770	mtspr	SPR_L2CSR0, %r3
771	isync
772	blr
773
774/*
775 * Branch predictor setup.
776 */
777ENTRY(bpred_enable)
778	mfspr	%r3, SPR_BUCSR
779	ori	%r3, %r3, BUCSR_BBFI
780	isync
781	mtspr	SPR_BUCSR, %r3
782	isync
783	ori	%r3, %r3, BUCSR_BPEN
784	isync
785	mtspr	SPR_BUCSR, %r3
786	isync
787	blr
788
789ENTRY(dataloss_erratum_access)
790	/* Lock two cache lines into I-Cache */
791	sync
792	mfspr	%r11, SPR_L1CSR1
793	rlwinm	%r11, %r11, 0, ~L1CSR1_ICUL
794	sync
795	isync
796	mtspr	SPR_L1CSR1, %r11
797	isync
798
799	lis	%r8, 2f@h
800	ori	%r8, %r8, 2f@l
801	icbtls	0, 0, %r8
802	addi	%r9, %r8, 64
803
804	sync
805	mfspr	%r11, SPR_L1CSR1
8063:	andi.	%r11, %r11, L1CSR1_ICUL
807	bne	3b
808
809	icbtls	0, 0, %r9
810
811	sync
812	mfspr	%r11, SPR_L1CSR1
8133:	andi.	%r11, %r11, L1CSR1_ICUL
814	bne	3b
815
816	b	2f
817	.align	6
818	/* Inside a locked cacheline, wait a while, write, then wait a while */
8192:	sync
820
821	mfspr	%r5, TBR_TBL
8224:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
823	mfspr	%r5, TBR_TBL
824	subf.	%r5, %r5, %r11
825	bgt	4b
826
827	stw	%r4, 0(%r3)
828
829	mfspr	%r5, TBR_TBL
8304:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
831	mfspr	%r5, TBR_TBL
832	subf.	%r5, %r5, %r11
833	bgt	4b
834
835	sync
836
837	/*
838	 * Fill out the rest of this cache line and the next with nops,
839	 * to ensure that nothing outside the locked area will be
840	 * fetched due to a branch.
841	 */
842	.rept 19
843	nop
844	.endr
845
846	icblc	0, 0, %r8
847	icblc	0, 0, %r9
848
849	blr
850
851/*
852 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
853 * created.
854 */
855ENTRY(get_spr)
856	mfspr	%r3, 0
857	blr
858
859/************************************************************************/
860/* Data section								*/
861/************************************************************************/
862	.data
863	.align 3
864GLOBAL(__startkernel)
865	.long   begin
866GLOBAL(__endkernel)
867	.long   end
868	.align	4
869tmpstack:
870	.space	TMPSTACKSZ
871tmpstackbound:
872	.space 10240	/* XXX: this really should not be necessary */
873
874/*
875 * Compiled KERNBASE locations
876 */
877	.globl	kernbase
878	.set	kernbase, KERNBASE
879
880#include <powerpc/booke/trap_subr.S>
881