xref: /freebsd/sys/powerpc/booke/locore.S (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "assym.inc"
28
29#include "opt_hwpmc_hooks.h"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#ifdef _CALL_ELF
41.abiversion _CALL_ELF
42#endif
43
44#define TMPSTACKSZ	16384
45
46#ifdef __powerpc64__
47#define GET_TOCBASE(r)  \
48	mfspr	r, SPR_SPRG8
49#define	TOC_RESTORE	nop
50#define	CMPI	cmpdi
51#define	CMPL	cmpld
52#define	LOAD	ld
53#define	LOADX	ldarx
54#define	STORE	std
55#define	STOREX	stdcx.
56#define	STU	stdu
57#define	CALLSIZE	48
58#define	REDZONE		288
59#define	THREAD_REG	%r13
60#define	ADDR(x)	\
61	.llong	x
62#define	WORD_SIZE	8
63#else
64#define	GET_TOCBASE(r)
65#define	TOC_RESTORE
66#define	CMPI	cmpwi
67#define	CMPL	cmplw
68#define	LOAD	lwz
69#define	LOADX	lwarx
70#define	STOREX	stwcx.
71#define	STORE	stw
72#define	STU	stwu
73#define	CALLSIZE	8
74#define	REDZONE		0
75#define	THREAD_REG	%r2
76#define	ADDR(x)	\
77	.long	x
78#define	WORD_SIZE	4
79#endif
80
81#ifdef __powerpc64__
82	/* Placate lld by creating a kboot stub. */
83        .section ".text.kboot", "x", @progbits
84        b __start
85#endif
86
87	.text
88	.globl	btext
89btext:
90
91/*
92 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
93 * mark the start of kernel text.
94 */
95	.globl	kernel_text
96kernel_text:
97
98/*
99 * Startup entry.  Note, this must be the first thing in the text segment!
100 */
101	.text
102	.globl	__start
103__start:
104
105/*
106 * Assumptions on the boot loader:
107 *  - System memory starts from physical address 0
108 *  - It's mapped by a single TLB1 entry
109 *  - TLB1 mapping is 1:1 pa to va
110 *  - Kernel is loaded at 64MB boundary
111 *  - All PID registers are set to the same value
112 *  - CPU is running in AS=0
113 *
114 * Registers contents provided by the loader(8):
115 *	r1	: stack pointer
116 *	r3	: metadata pointer
117 *
118 * We rearrange the TLB1 layout as follows:
119 *  - Find TLB1 entry we started in
120 *  - Make sure it's protected, invalidate other entries
121 *  - Create temp entry in the second AS (make sure it's not TLB[1])
122 *  - Switch to temp mapping
123 *  - Map 64MB of RAM in TLB1[1]
124 *  - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
125 *  - Switch to TLB1[1] mapping
126 *  - Invalidate temp mapping
127 *
128 * locore registers use:
129 *	r1	: stack pointer
130 *	r2	: trace pointer (AP only, for early diagnostics)
131 *	r3-r27	: scratch registers
132 *	r28	: temp TLB1 entry
133 *	r29	: initial TLB1 entry we started in
134 *	r30-r31	: arguments (metadata pointer)
135 */
136
137/*
138 * Keep arguments in r30 & r31 for later use.
139 */
140	mr	%r30, %r3
141	mr	%r31, %r4
142
143/*
144 * Initial cleanup
145 */
146	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
147#ifdef __powerpc64__
148	oris	%r3, %r3, PSL_CM@h
149#endif
150	mtmsr	%r3
151	isync
152
153/*
154 * Initial HIDs configuration
155 */
1561:
157	mfpvr	%r3
158	rlwinm	%r3, %r3, 16, 16, 31
159
160	lis	%r4, HID0_E500_DEFAULT_SET@h
161	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
162
163	/* Check for e500mc and e5500 */
164	cmpli	0, 0, %r3, FSL_E500mc
165	bne	2f
166
167	lis	%r4, HID0_E500MC_DEFAULT_SET@h
168	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
169	b	3f
1702:
171	cmpli	0, 0, %r3, FSL_E5500
172	bne	3f
173
174	lis	%r4, HID0_E5500_DEFAULT_SET@h
175	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
176
1773:
178	mtspr	SPR_HID0, %r4
179	isync
180
181/*
182 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
183 * this core.
184 */
185	cmpli	0, 0, %r3, FSL_E500mc
186	beq	1f
187	cmpli	0, 0, %r3, FSL_E5500
188	beq	1f
189	cmpli	0, 0, %r3, FSL_E6500
190	beq	1f
191
192	lis	%r3, HID1_E500_DEFAULT_SET@h
193	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
194	mtspr	SPR_HID1, %r3
195	isync
1961:
197	/* Invalidate all entries in TLB0 */
198	li	%r3, 0
199	bl	tlb_inval_all
200
201	cmpwi	%r30, 0
202	beq	done_mapping
203
204/*
205 * Locate the TLB1 entry that maps this code
206 */
207	bl	1f
2081:	mflr	%r3
209	bl	tlb1_find_current	/* the entry found is returned in r29 */
210
211	bl	tlb1_inval_all_but_current
212
213/*
214 * Create temporary mapping in AS=1 and switch to it
215 */
216	bl	tlb1_temp_mapping_as1
217
218	mfmsr	%r3
219	ori	%r3, %r3, (PSL_IS | PSL_DS)
220	bl	2f
2212:	mflr	%r4
222	addi	%r4, %r4, (3f - 2b)
223	mtspr	SPR_SRR0, %r4
224	mtspr	SPR_SRR1, %r3
225	rfi				/* Switch context */
226
227/*
228 * Invalidate initial entry
229 */
2303:
231	mr	%r3, %r29
232	bl	tlb1_inval_entry
233
234/*
235 * Setup final mapping in TLB1[1] and switch to it
236 */
237	/* Final kernel mapping, map in 64 MB of RAM */
238	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
239	li	%r4, 0			/* Entry 0 */
240	rlwimi	%r3, %r4, 16, 10, 15
241	mtspr	SPR_MAS0, %r3
242	isync
243
244	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
245	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
246	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
247	isync
248
249	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
250	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
251	mtspr	SPR_MAS2, %r3
252	isync
253
254	/* Discover phys load address */
255	bl	3f
2563:	mflr	%r4			/* Use current address */
257	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
258	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
259	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
260	isync
261	li	%r4, 0
262	mtspr	SPR_MAS7, %r4
263	isync
264	tlbwe
265	isync
266	msync
267
268	/* Switch to the above TLB1[1] mapping */
269	bl	4f
2704:	mflr	%r4
271#ifdef __powerpc64__
272	clrldi	%r4, %r4, 38
273	clrrdi	%r3, %r3, 12
274#else
275	rlwinm	%r4, %r4, 0, 6, 31	/* Current offset from kernel load address */
276	rlwinm	%r3, %r3, 0, 0, 19
277#endif
278	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
279	addi	%r4, %r4, (5f - 4b)
280	li	%r3, PSL_DE		/* Note AS=0 */
281#ifdef __powerpc64__
282	oris	%r3, %r3, PSL_CM@h
283#endif
284	mtspr   SPR_SRR0, %r4
285	mtspr   SPR_SRR1, %r3
286	rfi
287
288/*
289 * Invalidate temp mapping
290 */
2915:
292	mr	%r3, %r28
293	bl	tlb1_inval_entry
294
295done_mapping:
296
297#ifdef __powerpc64__
298	/* Set up the TOC pointer */
299	b	0f
300	.align 3
3010:	nop
302	bl	1f
303	.llong	__tocbase + 0x8000 - .
3041:	mflr	%r2
305	ld	%r1,0(%r2)
306	add	%r2,%r1,%r2
307	mtspr	SPR_SPRG8, %r2
308	nop
309
310	/* Get load offset */
311	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
312	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
313
314	/* Set up the stack pointer */
315	bl	1f
316	.llong	tmpstack + TMPSTACKSZ - 96 - .
3171:	mflr	%r3
318	ld	%r1,0(%r3)
319	add	%r1,%r1,%r3
320/*
321 * Relocate kernel
322 */
323	bl	1f
324	.llong _DYNAMIC-.
3251:	mflr	%r3
326	ld	%r4,0(%r3)
327	add	%r3,%r4,%r3
328	mr	%r4,%r31
329#else
330/*
331 * Setup a temporary stack
332 */
333	bl	1f
334	.long tmpstack-.
3351:	mflr	%r1
336	lwz	%r2,0(%r1)
337	add	%r1,%r1,%r2
338	addi	%r1, %r1, (TMPSTACKSZ - 16)
339
340/*
341 * Relocate kernel
342 */
343	bl      1f
344	.long   _DYNAMIC-.
345	.long   _GLOBAL_OFFSET_TABLE_-.
3461:	mflr    %r5
347	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
348	add	%r3,%r3,%r5
349	lwz	%r4,4(%r5)	/* GOT pointer */
350	add	%r4,%r4,%r5
351	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
352	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
353#endif
354	bl	CNAME(elf_reloc_self)
355	TOC_RESTORE
356
357/*
358 * Initialise exception vector offsets
359 */
360	bl	CNAME(ivor_setup)
361	TOC_RESTORE
362
363/*
364 * Set up arguments and jump to system initialization code
365 */
366	mr	%r3, %r30
367	mr	%r4, %r31
368
369	/* Prepare core */
370	bl	CNAME(booke_init)
371	TOC_RESTORE
372
373	/* Switch to thread0.td_kstack now */
374	mr	%r1, %r3
375	li	%r3, 0
376	STORE	%r3, 0(%r1)
377
378	/* Machine independet part, does not return */
379	bl	CNAME(mi_startup)
380	TOC_RESTORE
381	/* NOT REACHED */
3825:	b	5b
383
384
385#ifdef SMP
386/************************************************************************/
387/* AP Boot page */
388/************************************************************************/
389	.text
390	.globl	__boot_page
391	.align	12
392__boot_page:
393	/*
394	 * The boot page is a special page of memory used during AP bringup.
395	 * Before the AP comes out of reset, the physical 4K page holding this
396	 * code is arranged to be mapped at 0xfffff000 by use of
397	 * platform-dependent registers.
398	 *
399	 * Alternatively, this page may be executed using an ePAPR-standardized
400	 * method -- writing to the address specified in "cpu-release-addr".
401	 *
402	 * In either case, execution begins at the last instruction of the
403	 * page, which is a branch back to the start of the page.
404	 *
405	 * The code in the page must do initial MMU setup and normalize the
406	 * TLBs for regular operation in the correct address space before
407	 * reading outside the page.
408	 *
409	 * This implementation accomplishes this by:
410	 * 1) Wiping TLB0 and all TLB1 entries but the one currently in use.
411	 * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching
412	 *    to it with rfi. This entry must NOT be in TLB1 slot 0.
413	 *    (This is needed to give the code freedom to clean up AS=0.)
414	 * 3) Removing the initial TLB1 entry, leaving us with a single valid
415	 *    TLB1 entry, NOT in slot 0.
416	 * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel
417	 *    segment at its final virtual address. A second rfi is done to
418	 *    switch to the final address space. At this point we can finally
419	 *    access the rest of the kernel segment safely.
420	 * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in
421	 *    a consistent (but minimal) state.
422	 * 6) Set up TOC, stack, and pcpu registers.
423	 * 7) Now that we can finally call C code, call pmap_boostrap_ap(),
424	 *    which finishes copying in the shared TLB1 entries.
425	 *
426	 * At this point, the MMU is fully set up, and we can proceed with
427	 * running the actual AP bootstrap code.
428	 *
429	 * Pieces of this code are also used for UP kernel, but in this case
430	 * the sections specific to boot page functionality are dropped by
431	 * the preprocessor.
432	 */
433#ifdef __powerpc64__
434	nop			/* PPC64 alignment word. 64-bit target. */
435#endif
436	bl	1f		/* 32-bit target. */
437
438	.globl	bp_trace
439bp_trace:
440	ADDR(0)			/* Trace pointer (%r31). */
441
442	.globl	bp_kernload
443bp_kernload:
444	.llong 0		/* Kern phys. load address. */
445
446	.globl	bp_virtaddr
447bp_virtaddr:
448	ADDR(0)			/* Virt. address of __boot_page. */
449
450/*
451 * Initial configuration
452 */
4531:
454	mflr    %r31		/* r31 hold the address of bp_trace */
455
456	/* Set HIDs */
457	mfpvr	%r3
458	rlwinm	%r3, %r3, 16, 16, 31
459
460	/* HID0 for E500 is default */
461	lis	%r4, HID0_E500_DEFAULT_SET@h
462	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
463
464	cmpli	0, 0, %r3, FSL_E500mc
465	bne	2f
466	lis	%r4, HID0_E500MC_DEFAULT_SET@h
467	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
468	b	3f
4692:
470	cmpli	0, 0, %r3, FSL_E5500
471	bne	3f
472	lis	%r4, HID0_E5500_DEFAULT_SET@h
473	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
4743:
475	mtspr	SPR_HID0, %r4
476	isync
477
478	/* Enable branch prediction */
479	li	%r3, BUCSR_BPEN
480	mtspr	SPR_BUCSR, %r3
481	isync
482
483	/* Invalidate all entries in TLB0 */
484	li	%r3, 0
485	bl	tlb_inval_all
486
487/*
488 * Find TLB1 entry which is translating us now
489 */
490	bl	2f
4912:	mflr	%r3
492	bl	tlb1_find_current	/* the entry number found is in r29 */
493
494	bl	tlb1_inval_all_but_current
495
496/*
497 * Create temporary translation in AS=1 and switch to it
498 */
499
500	bl	tlb1_temp_mapping_as1
501
502	mfmsr	%r3
503	ori	%r3, %r3, (PSL_IS | PSL_DS)
504#ifdef __powerpc64__
505	oris	%r3, %r3, PSL_CM@h	/* Ensure we're in 64-bit after RFI */
506#endif
507	bl	3f
5083:	mflr	%r4
509	addi	%r4, %r4, (4f - 3b)
510	mtspr	SPR_SRR0, %r4
511	mtspr	SPR_SRR1, %r3
512	rfi				/* Switch context */
513
514/*
515 * Invalidate initial entry
516 */
5174:
518	mr	%r3, %r29
519	bl	tlb1_inval_entry
520
521/*
522 * Setup final mapping in TLB1[0] and switch to it
523 */
524	/* Final kernel mapping, map in 64 MB of RAM */
525	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
526	li	%r4, 0			/* Entry 0 */
527	rlwimi	%r3, %r4, 16, 4, 15
528	mtspr	SPR_MAS0, %r3
529	isync
530
531	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
532	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
533	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
534	isync
535
536	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
537	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
538	mtspr	SPR_MAS2, %r3
539	isync
540
541	/* Retrieve kernel load [physical] address from bp_kernload */
5425:
543	mflr	%r3
544#ifdef __powerpc64__
545	clrrdi	%r3, %r3, PAGE_SHIFT	/* trunc_page(%r3) */
546#else
547	clrrwi	%r3, %r3, PAGE_SHIFT	/* trunc_page(%r3) */
548#endif
549	/* Load lower half of the kernel loadaddr. */
550	lwz	%r4, (bp_kernload - __boot_page + 4)(%r3)
551	LOAD	%r5, (bp_virtaddr - __boot_page)(%r3)
552
553	/* Set RPN and protection */
554	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
555	mtspr	SPR_MAS3, %r4
556	isync
557	lwz	%r4, (bp_kernload - __boot_page)(%r3)
558	mtspr	SPR_MAS7, %r4
559	isync
560	tlbwe
561	isync
562	msync
563
564	/* Switch to the final mapping */
565	bl	6f
5666:	mflr	%r3
567	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
568	add	%r3, %r3, %r5		/* Make this a virtual address */
569	addi	%r3, %r3, (7f - 6b)	/* And figure out return address. */
570#ifdef __powerpc64__
571	lis	%r4, PSL_CM@h		/* Note AS=0 */
572#else
573	li	%r4, 0			/* Note AS=0 */
574#endif
575	mtspr	SPR_SRR0, %r3
576	mtspr	SPR_SRR1, %r4
577	rfi
5787:
579
580/*
581 * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
582 * beyond so it's allowed to directly access all locations the kernel was linked
583 * against.
584 */
585
586/*
587 * Invalidate temp mapping
588 */
589	mr	%r3, %r28
590	bl	tlb1_inval_entry
591
592#ifdef __powerpc64__
593	/* Set up the TOC pointer */
594	b	0f
595	.align 3
5960:	nop
597	bl	1f
598	.llong	__tocbase + 0x8000 - .
5991:	mflr	%r2
600	ld	%r1,0(%r2)
601	add	%r2,%r1,%r2
602	mtspr	SPR_SPRG8, %r2
603
604	/* Set up the stack pointer */
605	addis	%r1,%r2,TOC_REF(tmpstack)@ha
606	ld	%r1,TOC_REF(tmpstack)@l(%r1)
607	addi	%r1,%r1,TMPSTACKSZ-96
608#else
609/*
610 * Setup a temporary stack
611 */
612	bl	1f
613	.long tmpstack-.
6141:	mflr	%r1
615	lwz	%r2,0(%r1)
616	add	%r1,%r1,%r2
617	stw	%r1, 0(%r1)
618	addi	%r1, %r1, (TMPSTACKSZ - 16)
619#endif
620
621/*
622 * Initialise exception vector offsets
623 */
624	bl	CNAME(ivor_setup)
625	TOC_RESTORE
626
627	/*
628	 * Assign our pcpu instance
629	 */
630	bl	1f
631	.long ap_pcpu-.
6321:	mflr	%r4
633	lwz	%r3, 0(%r4)
634	add	%r3, %r3, %r4
635	LOAD	%r3, 0(%r3)
636	mtsprg0	%r3
637
638	bl	CNAME(pmap_bootstrap_ap)
639	TOC_RESTORE
640
641	bl	CNAME(cpudep_ap_bootstrap)
642	TOC_RESTORE
643	/* Switch to the idle thread's kstack */
644	mr	%r1, %r3
645
646	bl	CNAME(machdep_ap_bootstrap)
647	TOC_RESTORE
648
649	/* NOT REACHED */
6506:	b	6b
651#endif /* SMP */
652
653#if defined (BOOKE_E500)
654/*
655 * Invalidate all entries in the given TLB.
656 *
657 * r3	TLBSEL
658 */
659tlb_inval_all:
660	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
661	ori	%r3, %r3, (1 << 2)	/* INVALL */
662	tlbivax	0, %r3
663	isync
664	msync
665
666	tlbsync
667	msync
668	blr
669
670/*
671 * expects address to look up in r3, returns entry number in r29
672 *
673 * FIXME: the hidden assumption is we are now running in AS=0, but we should
674 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
675 */
676tlb1_find_current:
677	mfspr	%r17, SPR_PID0
678	slwi	%r17, %r17, MAS6_SPID0_SHIFT
679	mtspr	SPR_MAS6, %r17
680	isync
681	tlbsx	0, %r3
682	mfspr	%r17, SPR_MAS0
683	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
684
685	/* Make sure we have IPROT set on the entry */
686	mfspr	%r17, SPR_MAS1
687	oris	%r17, %r17, MAS1_IPROT@h
688	mtspr	SPR_MAS1, %r17
689	isync
690	tlbwe
691	isync
692	msync
693	blr
694
695/*
696 * Invalidates a single entry in TLB1.
697 *
698 * r3		ESEL
699 * r4-r5	scratched
700 */
701tlb1_inval_entry:
702	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
703	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
704	mtspr	SPR_MAS0, %r4
705	isync
706	tlbre
707	li	%r5, 0			/* MAS1[V] = 0 */
708	mtspr	SPR_MAS1, %r5
709	isync
710	tlbwe
711	isync
712	msync
713	blr
714
715/*
716 * r29		current entry number
717 * r28		returned temp entry
718 * r3-r5	scratched
719 */
720tlb1_temp_mapping_as1:
721	/* Read our current translation */
722	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
723	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
724	mtspr	SPR_MAS0, %r3
725	isync
726	tlbre
727
728	/*
729	 * Prepare and write temp entry
730	 *
731	 * FIXME this is not robust against overflow i.e. when the current
732	 * entry is the last in TLB1
733	 */
734	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
735	addi	%r28, %r29, 1		/* Use next entry. */
736	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
737	mtspr	SPR_MAS0, %r3
738	isync
739	mfspr	%r5, SPR_MAS1
740	li	%r4, 1			/* AS=1 */
741	rlwimi	%r5, %r4, 12, 19, 19
742	li	%r4, 0			/* Global mapping, TID=0 */
743	rlwimi	%r5, %r4, 16, 8, 15
744	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
745	mtspr	SPR_MAS1, %r5
746	isync
747	mflr	%r3
748	li	%r4, 0
749	mtspr	SPR_MAS7, %r4
750	mtlr	%r3
751	isync
752	tlbwe
753	isync
754	msync
755	blr
756
757/*
758 * Loops over TLB1, invalidates all entries skipping the one which currently
759 * maps this code.
760 *
761 * r29		current entry
762 * r3-r5	scratched
763 */
764tlb1_inval_all_but_current:
765	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
766	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
767	li	%r4, 0			/* Start from Entry 0 */
7681:	lis	%r5, MAS0_TLBSEL1@h
769	rlwimi	%r5, %r4, 16, 10, 15
770	mtspr	SPR_MAS0, %r5
771	isync
772	tlbre
773	mfspr	%r5, SPR_MAS1
774	cmpw	%r4, %r29		/* our current entry? */
775	beq	2f
776	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
777	mtspr	SPR_MAS1, %r5
778	isync
779	tlbwe
780	isync
781	msync
7822:	addi	%r4, %r4, 1
783	cmpw	%r4, %r3		/* Check if this is the last entry */
784	bne	1b
785	blr
786#endif
787
788#ifdef SMP
789.globl __boot_tlb1
790	/*
791	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
792	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
793	 * The BSP fills in the table in tlb_ap_prep() function. Next,
794	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
795	 */
796__boot_tlb1:
797	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
798
799__boot_page_padding:
800	/*
801	 * Boot page needs to be exactly 4K, with the last word of this page
802	 * acting as the reset vector, so we need to stuff the remainder.
803	 * Upon release from holdoff CPU fetches the last word of the boot
804	 * page.
805	 */
806	.space	4092 - (__boot_page_padding - __boot_page)
807	b	__boot_page
808	/*
809	 * This is the end of the boot page.
810	 * During AP startup, the previous instruction is at 0xfffffffc
811	 * virtual (i.e. the reset vector.)
812	 */
813#endif /* SMP */
814
815/************************************************************************/
816/* locore subroutines */
817/************************************************************************/
818
819/*
820 * Cache disable/enable/inval sequences according
821 * to section 2.16 of E500CORE RM.
822 */
823ENTRY(dcache_inval)
824	/* Invalidate d-cache */
825	mfspr	%r3, SPR_L1CSR0
826	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
827	msync
828	isync
829	mtspr	SPR_L1CSR0, %r3
830	isync
8311:	mfspr	%r3, SPR_L1CSR0
832	andi.	%r3, %r3, L1CSR0_DCFI
833	bne	1b
834	blr
835END(dcache_inval)
836
837ENTRY(dcache_disable)
838	/* Disable d-cache */
839	mfspr	%r3, SPR_L1CSR0
840	li	%r4, L1CSR0_DCE@l
841	not	%r4, %r4
842	and	%r3, %r3, %r4
843	msync
844	isync
845	mtspr	SPR_L1CSR0, %r3
846	isync
847	blr
848END(dcache_disable)
849
850ENTRY(dcache_enable)
851	/* Enable d-cache */
852	mfspr	%r3, SPR_L1CSR0
853	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
854	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
855	msync
856	isync
857	mtspr	SPR_L1CSR0, %r3
858	isync
859	blr
860END(dcache_enable)
861
862ENTRY(icache_inval)
863	/* Invalidate i-cache */
864	mfspr	%r3, SPR_L1CSR1
865	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
866	isync
867	mtspr	SPR_L1CSR1, %r3
868	isync
8691:	mfspr	%r3, SPR_L1CSR1
870	andi.	%r3, %r3, L1CSR1_ICFI
871	bne	1b
872	blr
873END(icache_inval)
874
875ENTRY(icache_disable)
876	/* Disable i-cache */
877	mfspr	%r3, SPR_L1CSR1
878	li	%r4, L1CSR1_ICE@l
879	not	%r4, %r4
880	and	%r3, %r3, %r4
881	isync
882	mtspr	SPR_L1CSR1, %r3
883	isync
884	blr
885END(icache_disable)
886
887ENTRY(icache_enable)
888	/* Enable i-cache */
889	mfspr	%r3, SPR_L1CSR1
890	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
891	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
892	isync
893	mtspr	SPR_L1CSR1, %r3
894	isync
895	blr
896END(icache_enable)
897
898/*
899 * L2 cache disable/enable/inval sequences for E500mc.
900 */
901
902ENTRY(l2cache_inval)
903	mfspr	%r3, SPR_L2CSR0
904	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
905	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
906	isync
907	mtspr	SPR_L2CSR0, %r3
908	isync
9091:	mfspr   %r3, SPR_L2CSR0
910	andis.	%r3, %r3, L2CSR0_L2FI@h
911	bne	1b
912	blr
913END(l2cache_inval)
914
915ENTRY(l2cache_enable)
916	mfspr	%r3, SPR_L2CSR0
917	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
918	isync
919	mtspr	SPR_L2CSR0, %r3
920	isync
921	blr
922END(l2cache_enable)
923
924/*
925 * Branch predictor setup.
926 */
927ENTRY(bpred_enable)
928	mfspr	%r3, SPR_BUCSR
929	ori	%r3, %r3, BUCSR_BBFI
930	isync
931	mtspr	SPR_BUCSR, %r3
932	isync
933	ori	%r3, %r3, BUCSR_BPEN
934	isync
935	mtspr	SPR_BUCSR, %r3
936	isync
937	blr
938END(bpred_enable)
939
940/*
941 * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
942 * created.
943 */
944ENTRY(get_spr)
945	/* Note: The spr number is patched at runtime */
946	mfspr	%r3, 0
947	blr
948END(get_spr)
949
950/************************************************************************/
951/* Data section								*/
952/************************************************************************/
953	.data
954	.align 3
955GLOBAL(__startkernel)
956	ADDR(begin)
957GLOBAL(__endkernel)
958	ADDR(end)
959	.align	4
960tmpstack:
961	.space	TMPSTACKSZ
962tmpstackbound:
963	.space 10240	/* XXX: this really should not be necessary */
964#ifdef __powerpc64__
965TOC_ENTRY(tmpstack)
966#ifdef SMP
967TOC_ENTRY(bp_kernload)
968#endif
969#endif
970
971/*
972 * Compiled KERNBASE locations
973 */
974	.globl	kernbase
975	.set	kernbase, KERNBASE
976
977#include <powerpc/booke/trap_subr.S>
978