xref: /freebsd/sys/powerpc/booke/locore.S (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44	.text
45	.globl	btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52	.globl	kernel_text
53kernel_text:
54
55/*
56 * Startup entry.  Note, this must be the first thing in the text segment!
57 */
58	.text
59	.globl	__start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 *  - System memory starts from physical address 0
65 *  - It's mapped by a single TLB1 entry
66 *  - TLB1 mapping is 1:1 pa to va
67 *  - Kernel is loaded at 64MB boundary
68 *  - All PID registers are set to the same value
69 *  - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 *	r1	: stack pointer
73 *	r3	: metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 *  - Find TLB1 entry we started in
77 *  - Make sure it's protected, invalidate other entries
78 *  - Create temp entry in the second AS (make sure it's not TLB[1])
79 *  - Switch to temp mapping
80 *  - Map 64MB of RAM in TLB1[1]
81 *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 *  - Switch to to TLB1[1] mapping
83 *  - Invalidate temp mapping
84 *
85 * locore registers use:
86 *	r1	: stack pointer
87 *	r2	: trace pointer (AP only, for early diagnostics)
88 *	r3-r27	: scratch registers
89 *	r28	: temp TLB1 entry
90 *	r29	: initial TLB1 entry we started in
91 *	r30-r31	: arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97	mr	%r30, %r3
98	mr	%r31, %r4
99
100/*
101 * Initial cleanup
102 */
103	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
104	mtmsr	%r3
105	isync
106
107	mfpvr	%r3
108	rlwinm	%r3, %r3, 16, 16, 31
109
110	lis	%r4, HID0_E500_DEFAULT_SET@h
111	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
112
113	/* Check for e500mc and e5500 */
114	cmpli	0, 0, %r3, FSL_E500mc
115	bne	2f
116
117	lis	%r4, HID0_E500MC_DEFAULT_SET@h
118	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
119	b	3f
1202:
121	cmpli	0, 0, %r3, FSL_E5500
122	bne	3f
123
124	lis	%r4, HID0_E5500_DEFAULT_SET@h
125	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
126
1273:
128	mtspr	SPR_HID0, %r4
129	isync
130
131/*
132 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
133 * this core.
134 */
135	cmpli	0, 0, %r3, FSL_E500mc
136	beq	1f
137	cmpli	0, 0, %r3, FSL_E5500
138	beq	1f
139
140	lis	%r3, HID1_E500_DEFAULT_SET@h
141	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
142	mtspr	SPR_HID1, %r3
143	isync
1441:
145	/* Invalidate all entries in TLB0 */
146	li	%r3, 0
147	bl	tlb_inval_all
148
149	cmpwi	%r30, 0
150	beq	done_mapping
151
152/*
153 * Locate the TLB1 entry that maps this code
154 */
155	bl	1f
1561:	mflr	%r3
157	bl	tlb1_find_current	/* the entry found is returned in r29 */
158
159	bl	tlb1_inval_all_but_current
160
161/*
162 * Create temporary mapping in AS=1 and switch to it
163 */
164	addi	%r3, %r29, 1
165	bl	tlb1_temp_mapping_as1
166
167	mfmsr	%r3
168	ori	%r3, %r3, (PSL_IS | PSL_DS)
169	bl	2f
1702:	mflr	%r4
171	addi	%r4, %r4, 20
172	mtspr	SPR_SRR0, %r4
173	mtspr	SPR_SRR1, %r3
174	rfi				/* Switch context */
175
176/*
177 * Invalidate initial entry
178 */
179	mr	%r3, %r29
180	bl	tlb1_inval_entry
181
182/*
183 * Setup final mapping in TLB1[1] and switch to it
184 */
185	/* Final kernel mapping, map in 64 MB of RAM */
186	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
187	li	%r4, 0			/* Entry 0 */
188	rlwimi	%r3, %r4, 16, 10, 15
189	mtspr	SPR_MAS0, %r3
190	isync
191
192	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
193	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
194	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
195	isync
196
197	lis	%r3, KERNBASE@h
198	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
199#ifdef SMP
200	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
201#endif
202	mtspr	SPR_MAS2, %r3
203	isync
204
205	/* Discover phys load address */
206	bl	3f
2073:	mflr	%r4			/* Use current address */
208	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
209	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
210	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
211	isync
212	bl	zero_mas7
213	bl	zero_mas8
214	tlbwe
215	isync
216	msync
217
218	/* Switch to the above TLB1[1] mapping */
219	bl	4f
2204:	mflr	%r4
221	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
222	rlwinm	%r3, %r3, 0, 0, 19
223	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
224	addi	%r4, %r4, 36
225	li	%r3, PSL_DE		/* Note AS=0 */
226	mtspr   SPR_SRR0, %r4
227	mtspr   SPR_SRR1, %r3
228	rfi
229
230/*
231 * Invalidate temp mapping
232 */
233	mr	%r3, %r28
234	bl	tlb1_inval_entry
235
236done_mapping:
237
238/*
239 * Setup a temporary stack
240 */
241	bl	1f
242	.long tmpstack-.
2431:	mflr	%r1
244	lwz	%r2,0(%r1)
245	add	%r1,%r1,%r2
246	addi	%r1, %r1, (TMPSTACKSZ - 16)
247
248/*
249 * Relocate kernel
250 */
251	bl      1f
252	.long   _DYNAMIC-.
253	.long   _GLOBAL_OFFSET_TABLE_-.
2541:	mflr    %r5
255	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
256	add	%r3,%r3,%r5
257	lwz	%r4,4(%r5)	/* GOT pointer */
258	add	%r4,%r4,%r5
259	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
260	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
261	bl	elf_reloc_self
262
263/*
264 * Initialise exception vector offsets
265 */
266	bl	ivor_setup
267
268/*
269 * Set up arguments and jump to system initialization code
270 */
271	mr	%r3, %r30
272	mr	%r4, %r31
273
274	/* Prepare core */
275	bl	booke_init
276
277	/* Switch to thread0.td_kstack now */
278	mr	%r1, %r3
279	li	%r3, 0
280	stw	%r3, 0(%r1)
281
282	/* Machine independet part, does not return */
283	bl	mi_startup
284	/* NOT REACHED */
2855:	b	5b
286
287
288#ifdef SMP
289/************************************************************************/
290/* AP Boot page */
291/************************************************************************/
292	.text
293	.globl	__boot_page
294	.align	12
295__boot_page:
296	bl	1f
297
298	.globl	bp_ntlb1s
299bp_ntlb1s:
300	.long	0
301
302	.globl	bp_tlb1
303bp_tlb1:
304	.space	4 * 3 * 16
305
306	.globl	bp_tlb1_end
307bp_tlb1_end:
308
309/*
310 * Initial configuration
311 */
3121:	mflr	%r31		/* r31 hold the address of bp_ntlb1s */
313
314	/* Set HIDs */
315	mfpvr	%r3
316	rlwinm	%r3, %r3, 16, 16, 31
317
318	/* HID0 for E500 is default */
319	lis	%r4, HID0_E500_DEFAULT_SET@h
320	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
321
322	cmpli	0, 0, %r3, FSL_E500mc
323	bne	2f
324	lis	%r4, HID0_E500MC_DEFAULT_SET@h
325	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
326	b	3f
3272:
328	cmpli	0, 0, %r3, FSL_E5500
329	bne	3f
330	lis	%r4, HID0_E5500_DEFAULT_SET@h
331	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
3323:
333	mtspr	SPR_HID0, %r4
334	isync
335/*
336 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
337 * this core.
338 */
339	cmpli	0, 0, %r3, FSL_E500mc
340	beq	1f
341	cmpli	0, 0, %r3, FSL_E5500
342	beq	1f
343
344	lis	%r3, HID1_E500_DEFAULT_SET@h
345	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
346	mtspr	SPR_HID1, %r3
347	isync
3481:
349	/* Enable branch prediction */
350	li	%r3, BUCSR_BPEN
351	mtspr	SPR_BUCSR, %r3
352	isync
353
354	/* Invalidate all entries in TLB0 */
355	li	%r3, 0
356	bl	tlb_inval_all
357
358/*
359 * Find TLB1 entry which is translating us now
360 */
361	bl	2f
3622:	mflr	%r3
363	bl	tlb1_find_current	/* the entry number found is in r29 */
364
365	bl	tlb1_inval_all_but_current
366
367/*
368 * Create temporary translation in AS=1 and switch to it
369 */
370	lwz	%r3, 0(%r31)
371	bl	tlb1_temp_mapping_as1
372
373	mfmsr	%r3
374	ori	%r3, %r3, (PSL_IS | PSL_DS)
375	bl	3f
3763:	mflr	%r4
377	addi	%r4, %r4, 20
378	mtspr	SPR_SRR0, %r4
379	mtspr	SPR_SRR1, %r3
380	rfi				/* Switch context */
381
382/*
383 * Invalidate initial entry
384 */
385	mr	%r3, %r29
386	bl	tlb1_inval_entry
387
388/*
389 * Setup final mapping in TLB1[1] and switch to it
390 */
391	lwz	%r6, 0(%r31)
392	addi	%r5, %r31, 4
393	li	%r4, 0
394
3954:	lis	%r3, MAS0_TLBSEL1@h
396	rlwimi	%r3, %r4, 16, 12, 15
397	mtspr	SPR_MAS0, %r3
398	isync
399	lwz	%r3, 0(%r5)
400	mtspr	SPR_MAS1, %r3
401	isync
402	lwz	%r3, 4(%r5)
403	mtspr	SPR_MAS2, %r3
404	isync
405	lwz	%r3, 8(%r5)
406	mtspr	SPR_MAS3, %r3
407	isync
408	tlbwe
409	isync
410	msync
411	addi	%r5, %r5, 12
412	addi	%r4, %r4, 1
413	cmpw	%r4, %r6
414	blt	4b
415
416	/* Switch to the final mapping */
417	bl	5f
418	.long __boot_page-.
4195:	mflr	%r5
420	lwz	%r3,0(%r3)
421	add	%r5,%r5,%r3		/* __boot_page in r5 */
422	bl	6f
4236:	mflr	%r3
424	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
425	add	%r3, %r3, %r5		/* Make this virtual address */
426	addi	%r3, %r3, 32
427	li	%r4, 0			/* Note AS=0 */
428	mtspr	SPR_SRR0, %r3
429	mtspr	SPR_SRR1, %r4
430	rfi
431
432/*
433 * At this point we're running at virtual addresses KERNBASE and beyond so
434 * it's allowed to directly access all locations the kernel was linked
435 * against.
436 */
437
438/*
439 * Invalidate temp mapping
440 */
441	mr	%r3, %r28
442	bl	tlb1_inval_entry
443
444/*
445 * Setup a temporary stack
446 */
447	bl	1f
448	.long tmpstack-.
4491:	mflr	%r1
450	lwz	%r2,0(%r1)
451	add	%r1,%r1,%r2
452	addi	%r1, %r1, (TMPSTACKSZ - 16)
453
454/*
455 * Initialise exception vector offsets
456 */
457	bl	ivor_setup
458
459	/*
460	 * Assign our pcpu instance
461	 */
462	bl	1f
463	.long ap_pcpu-.
4641:	mflr	%r4
465	lwz	%r3, 0(%r4)
466	add	%r3, %r3, %r4
467	lwz	%r3, 0(%r3)
468	mtsprg0	%r3
469
470	bl	pmap_bootstrap_ap
471
472	bl	cpudep_ap_bootstrap
473	/* Switch to the idle thread's kstack */
474	mr	%r1, %r3
475
476	bl	machdep_ap_bootstrap
477
478	/* NOT REACHED */
4796:	b	6b
480#endif /* SMP */
481
482/*
483 * Invalidate all entries in the given TLB.
484 *
485 * r3	TLBSEL
486 */
487tlb_inval_all:
488	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
489	ori	%r3, %r3, (1 << 2)	/* INVALL */
490	tlbivax	0, %r3
491	isync
492	msync
493
494	tlbsync
495	msync
496	blr
497
498/*
499 * expects address to look up in r3, returns entry number in r29
500 *
501 * FIXME: the hidden assumption is we are now running in AS=0, but we should
502 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
503 */
504tlb1_find_current:
505	mfspr	%r17, SPR_PID0
506	slwi	%r17, %r17, MAS6_SPID0_SHIFT
507	mtspr	SPR_MAS6, %r17
508	isync
509	tlbsx	0, %r3
510	mfspr	%r17, SPR_MAS0
511	rlwinm	%r29, %r17, 16, 20, 31		/* MAS0[ESEL] -> r29 */
512
513	/* Make sure we have IPROT set on the entry */
514	mfspr	%r17, SPR_MAS1
515	oris	%r17, %r17, MAS1_IPROT@h
516	mtspr	SPR_MAS1, %r17
517	isync
518	tlbwe
519	isync
520	msync
521	blr
522
523/*
524 * Invalidates a single entry in TLB1.
525 *
526 * r3		ESEL
527 * r4-r5	scratched
528 */
529tlb1_inval_entry:
530	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
531	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
532	mtspr	SPR_MAS0, %r4
533	isync
534	tlbre
535	li	%r5, 0			/* MAS1[V] = 0 */
536	mtspr	SPR_MAS1, %r5
537	isync
538	tlbwe
539	isync
540	msync
541	blr
542
543/*
544 * r3		entry of temp translation
545 * r29		entry of current translation
546 * r28		returns temp entry passed in r3
547 * r4-r5	scratched
548 */
549tlb1_temp_mapping_as1:
550	mr	%r28, %r3
551
552	/* Read our current translation */
553	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
554	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
555	mtspr	SPR_MAS0, %r3
556	isync
557	tlbre
558
559	/* Prepare and write temp entry */
560	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
561	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
562	mtspr	SPR_MAS0, %r3
563	isync
564	mfspr	%r5, SPR_MAS1
565	li	%r4, 1			/* AS=1 */
566	rlwimi	%r5, %r4, 12, 19, 19
567	li	%r4, 0			/* Global mapping, TID=0 */
568	rlwimi	%r5, %r4, 16, 8, 15
569	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
570	mtspr	SPR_MAS1, %r5
571	isync
572	mflr	%r3
573	bl	zero_mas7
574	bl	zero_mas8
575	mtlr	%r3
576	tlbwe
577	isync
578	msync
579	blr
580
581/*
582 * Loops over TLB1, invalidates all entries skipping the one which currently
583 * maps this code.
584 *
585 * r29		current entry
586 * r3-r5	scratched
587 */
588tlb1_inval_all_but_current:
589	mr	%r6, %r3
590	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
591	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
592	li	%r4, 0			/* Start from Entry 0 */
5931:	lis	%r5, MAS0_TLBSEL1@h
594	rlwimi	%r5, %r4, 16, 10, 15
595	mtspr	SPR_MAS0, %r5
596	isync
597	tlbre
598	mfspr	%r5, SPR_MAS1
599	cmpw	%r4, %r29		/* our current entry? */
600	beq	2f
601	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
602	mtspr	SPR_MAS1, %r5
603	isync
604	tlbwe
605	isync
606	msync
6072:	addi	%r4, %r4, 1
608	cmpw	%r4, %r3		/* Check if this is the last entry */
609	bne	1b
610	blr
611
612/*
613 * MAS7 and MAS8 conditional zeroing.
614 */
615.globl zero_mas7
616zero_mas7:
617	mfpvr	%r20
618	rlwinm	%r20, %r20, 16, 16, 31
619	cmpli	0, 0, %r20, FSL_E500v1
620	beq	1f
621
622	li	%r20, 0
623	mtspr	SPR_MAS7, %r20
624	isync
6251:
626	blr
627
628.globl zero_mas8
629zero_mas8:
630	mfpvr	%r20
631	rlwinm	%r20, %r20, 16, 16, 31
632	cmpli	0, 0, %r20, FSL_E500mc
633	beq	1f
634	cmpli	0, 0, %r20, FSL_E5500
635	beq	1f
636
637	blr
6381:
639	li	%r20, 0
640	mtspr	SPR_MAS8, %r20
641	isync
642	blr
643
644#ifdef SMP
645__boot_page_padding:
646	/*
647	 * Boot page needs to be exactly 4K, with the last word of this page
648	 * acting as the reset vector, so we need to stuff the remainder.
649	 * Upon release from holdoff CPU fetches the last word of the boot
650	 * page.
651	 */
652	.space	4092 - (__boot_page_padding - __boot_page)
653	b	__boot_page
654#endif /* SMP */
655
656/************************************************************************/
657/* locore subroutines */
658/************************************************************************/
659
660/*
661 * void tid_flush(tlbtid_t tid);
662 *
663 * Invalidate all TLB0 entries which match the given TID. Note this is
664 * dedicated for cases when invalidation(s) should NOT be propagated to other
665 * CPUs.
666 *
667 * void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
668 *
669 * XXX: why isn't this in C?
670 */
671ENTRY(tid_flush)
672	cmpwi	%r3, TID_KERNEL
673	beq	tid_flush_end	/* don't evict kernel translations */
674
675	/* Disable interrupts */
676	mfmsr	%r10
677	wrteei	0
678
679	li	%r6, 0		/* ways counter */
680loop_ways:
681	li	%r7, 0		/* entries [per way] counter */
682loop_entries:
683	/* Select TLB0 and ESEL (way) */
684	lis	%r8, MAS0_TLBSEL0@h
685	rlwimi	%r8, %r6, 16, 14, 15
686	mtspr	SPR_MAS0, %r8
687	isync
688
689	/* Select EPN (entry within the way) */
690	rlwinm	%r8, %r7, 12, 13, 19
691	mtspr	SPR_MAS2, %r8
692	isync
693	tlbre
694
695	/* Check if valid entry */
696	mfspr	%r8, SPR_MAS1
697	andis.	%r9, %r8, MAS1_VALID@h
698	beq	next_entry	/* invalid entry */
699
700	/* Check if this is our TID */
701	rlwinm	%r9, %r8, 16, 24, 31
702
703	cmplw	%r9, %r3
704	bne	next_entry	/* not our TID */
705
706	/* Clear VALID bit */
707	rlwinm	%r8, %r8, 0, 1, 31
708	mtspr	SPR_MAS1, %r8
709	isync
710	tlbwe
711	isync
712	msync
713
714next_entry:
715	addi	%r7, %r7, 1
716	cmpw	%r7, %r5
717	bne	loop_entries
718
719	/* Next way */
720	addi	%r6, %r6, 1
721	cmpw	%r6, %r4
722	bne	loop_ways
723
724	/* Restore MSR (possibly re-enable interrupts) */
725	mtmsr	%r10
726	isync
727
728tid_flush_end:
729	blr
730
731/*
732 * Cache disable/enable/inval sequences according
733 * to section 2.16 of E500CORE RM.
734 */
735ENTRY(dcache_inval)
736	/* Invalidate d-cache */
737	mfspr	%r3, SPR_L1CSR0
738	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
739	msync
740	isync
741	mtspr	SPR_L1CSR0, %r3
742	isync
7431:	mfspr	%r3, SPR_L1CSR0
744	andi.	%r3, %r3, L1CSR0_DCFI
745	bne	1b
746	blr
747
748ENTRY(dcache_disable)
749	/* Disable d-cache */
750	mfspr	%r3, SPR_L1CSR0
751	li	%r4, L1CSR0_DCE@l
752	not	%r4, %r4
753	and	%r3, %r3, %r4
754	msync
755	isync
756	mtspr	SPR_L1CSR0, %r3
757	isync
758	blr
759
760ENTRY(dcache_enable)
761	/* Enable d-cache */
762	mfspr	%r3, SPR_L1CSR0
763	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
764	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
765	msync
766	isync
767	mtspr	SPR_L1CSR0, %r3
768	isync
769	blr
770
771ENTRY(icache_inval)
772	/* Invalidate i-cache */
773	mfspr	%r3, SPR_L1CSR1
774	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
775	isync
776	mtspr	SPR_L1CSR1, %r3
777	isync
7781:	mfspr	%r3, SPR_L1CSR1
779	andi.	%r3, %r3, L1CSR1_ICFI
780	bne	1b
781	blr
782
783ENTRY(icache_disable)
784	/* Disable i-cache */
785	mfspr	%r3, SPR_L1CSR1
786	li	%r4, L1CSR1_ICE@l
787	not	%r4, %r4
788	and	%r3, %r3, %r4
789	isync
790	mtspr	SPR_L1CSR1, %r3
791	isync
792	blr
793
794ENTRY(icache_enable)
795	/* Enable i-cache */
796	mfspr	%r3, SPR_L1CSR1
797	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
798	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
799	isync
800	mtspr	SPR_L1CSR1, %r3
801	isync
802	blr
803
804/*
805 * int setfault()
806 *
807 * Similar to setjmp to setup for handling faults on accesses to user memory.
808 * Any routine using this may only call bcopy, either the form below,
809 * or the (currently used) C code optimized, so it doesn't use any non-volatile
810 * registers.
811 */
812	.globl	setfault
813setfault:
814	mflr	%r0
815	mfsprg0	%r4
816	lwz	%r4, TD_PCB(%r2)
817	stw	%r3, PCB_ONFAULT(%r4)
818	mfcr	%r4
819	stw	%r0, 0(%r3)
820	stw	%r1, 4(%r3)
821	stw	%r2, 8(%r3)
822	stw	%r4, 12(%r3)
823	stmw	%r13, 16(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
824	li	%r3, 0			/* return FALSE */
825	blr
826
827/************************************************************************/
828/* Data section								*/
829/************************************************************************/
830	.data
831	.align 3
832GLOBAL(__startkernel)
833	.long   begin
834GLOBAL(__endkernel)
835	.long   end
836	.align	4
837tmpstack:
838	.space	TMPSTACKSZ
839tmpstackbound:
840	.space 10240	/* XXX: this really should not be necessary */
841
842/*
843 * Compiled KERNBASE locations
844 */
845	.globl	kernbase
846	.set	kernbase, KERNBASE
847
848#include <powerpc/booke/trap_subr.S>
849