xref: /freebsd/sys/powerpc/booke/locore.S (revision ec0e626bafb335b30c499d06066997f54b10c092)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ	16384
41
42	.text
43	.globl	btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50	.globl	kernel_text
51kernel_text:
52
53/*
54 * Startup entry.  Note, this must be the first thing in the text segment!
55 */
56	.text
57	.globl	__start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 *  - system memory starts from physical address 0
63 *  - it's mapped by a single TBL1 entry
64 *  - TLB1 mapping is 1:1 pa to va
65 *  - kernel is loaded at 16MB boundary
66 *  - all PID registers are set to the same value
67 *  - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 *	r1	: stack pointer
71 *	r3	: metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 *  - find TLB1 entry we started in
75 *  - make sure it's protected, ivalidate other entries
76 *  - create temp entry in the second AS (make sure it's not TLB[1])
77 *  - switch to temp mapping
78 *  - map 16MB of RAM in TLB1[1]
79 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 *  - switch to to TLB1[1] mapping
81 *  - invalidate temp mapping
82 *
83 * locore registers use:
84 *	r1	: stack pointer
85 *	r2	: trace pointer (AP only, for early diagnostics)
86 *	r3-r27	: scratch registers
87 *	r28	: temp TLB1 entry
88 *	r29	: initial TLB1 entry we started in
89 *	r30-r31	: arguments (metadata pointer)
90 */
91
92/*
93 * Keep arguments in r30 & r31 for later use.
94 */
95	mr	%r30, %r3
96	mr	%r31, %r4
97
98/*
99 * Initial cleanup
100 */
101	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
102	mtmsr	%r3
103	isync
104
105	lis	%r3, HID0_E500_DEFAULT_SET@h
106	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
107	mtspr	SPR_HID0, %r3
108	isync
109	lis	%r3, HID1_E500_DEFAULT_SET@h
110	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
111	mtspr	SPR_HID1, %r3
112	isync
113
114	/* Invalidate all entries in TLB0 */
115	li	%r3, 0
116	bl	tlb_inval_all
117
118	cmpwi	%r30, 0
119	beq	done_mapping
120
121/*
122 * Locate the TLB1 entry that maps this code
123 */
124	bl	1f
1251:	mflr	%r3
126	bl	tlb1_find_current	/* the entry found is returned in r29 */
127
128	bl	tlb1_inval_all_but_current
129
130/*
131 * Create temporary mapping in AS=1 and switch to it
132 */
133	addi	%r3, %r29, 1
134	bl	tlb1_temp_mapping_as1
135
136	mfmsr	%r3
137	ori	%r3, %r3, (PSL_IS | PSL_DS)
138	bl	2f
1392:	mflr	%r4
140	addi	%r4, %r4, 20
141	mtspr	SPR_SRR0, %r4
142	mtspr	SPR_SRR1, %r3
143	rfi				/* Switch context */
144
145/*
146 * Invalidate initial entry
147 */
148	mr	%r3, %r29
149	bl	tlb1_inval_entry
150
151/*
152 * Setup final mapping in TLB1[1] and switch to it
153 */
154	/* Final kernel mapping, map in 16 MB of RAM */
155	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
156	li	%r4, 0			/* Entry 0 */
157	rlwimi	%r3, %r4, 16, 12, 15
158	mtspr	SPR_MAS0, %r3
159	isync
160
161	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
162	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
163	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
164	isync
165
166	lis	%r3, KERNBASE@h
167	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
168#ifdef SMP
169	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
170#endif
171	mtspr	SPR_MAS2, %r3
172	isync
173
174	/* Discover phys load address */
175	bl	3f
1763:	mflr	%r4			/* Use current address */
177	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
178	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
179	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
180	isync
181	tlbwe
182	isync
183	msync
184
185	/* Switch to the above TLB1[1] mapping */
186	bl	4f
1874:	mflr	%r4
188	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
189	rlwinm	%r3, %r3, 0, 0, 19
190	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
191	addi	%r4, %r4, 36
192	li	%r3, PSL_DE		/* Note AS=0 */
193	mtspr   SPR_SRR0, %r4
194	mtspr   SPR_SRR1, %r3
195	rfi
196
197/*
198 * Invalidate temp mapping
199 */
200	mr	%r3, %r28
201	bl	tlb1_inval_entry
202
203done_mapping:
204
205/*
206 * Setup a temporary stack
207 */
208	bl	1f
209	.long tmpstack-.
2101:	mflr	%r1
211	lwz	%r2,0(%r1)
212	add	%r1,%r1,%r2
213	addi	%r1, %r1, (TMPSTACKSZ - 16)
214
215/*
216 * Relocate kernel
217 */
218	bl      1f
219	.long   _DYNAMIC-.
220	.long   _GLOBAL_OFFSET_TABLE_-.
2211:	mflr    %r5
222	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
223	add	%r3,%r3,%r5
224	lwz	%r4,4(%r5)	/* GOT pointer */
225	add	%r4,%r4,%r5
226	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
227	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
228	bl	elf_reloc_self
229
230/*
231 * Initialise exception vector offsets
232 */
233	bl	ivor_setup
234
235/*
236 * Set up arguments and jump to system initialization code
237 */
238	mr	%r3, %r30
239	mr	%r4, %r31
240
241	/* Prepare core */
242	bl	booke_init
243
244	/* Switch to thread0.td_kstack now */
245	mr	%r1, %r3
246	li	%r3, 0
247	stw	%r3, 0(%r1)
248
249	/* Machine independet part, does not return */
250	bl	mi_startup
251	/* NOT REACHED */
2525:	b	5b
253
254
255#ifdef SMP
256/************************************************************************/
257/* AP Boot page */
258/************************************************************************/
259	.text
260	.globl	__boot_page
261	.align	12
262__boot_page:
263	bl	1f
264
265	.globl	bp_ntlb1s
266bp_ntlb1s:
267	.long	0
268
269	.globl	bp_tlb1
270bp_tlb1:
271	.space	4 * 3 * 16
272
273	.globl	bp_tlb1_end
274bp_tlb1_end:
275
276/*
277 * Initial configuration
278 */
2791:	mflr	%r31		/* r31 hold the address of bp_ntlb1s */
280
281	/* Set HIDs */
282	lis	%r3, HID0_E500_DEFAULT_SET@h
283	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
284	mtspr	SPR_HID0, %r3
285	isync
286	lis	%r3, HID1_E500_DEFAULT_SET@h
287	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
288	mtspr	SPR_HID1, %r3
289	isync
290
291	/* Enable branch prediction */
292	li	%r3, BUCSR_BPEN
293	mtspr	SPR_BUCSR, %r3
294	isync
295
296	/* Invalidate all entries in TLB0 */
297	li	%r3, 0
298	bl	tlb_inval_all
299
300/*
301 * Find TLB1 entry which is translating us now
302 */
303	bl	2f
3042:	mflr	%r3
305	bl	tlb1_find_current	/* the entry number found is in r29 */
306
307	bl	tlb1_inval_all_but_current
308
309/*
310 * Create temporary translation in AS=1 and switch to it
311 */
312	lwz	%r3, 0(%r31)
313	bl	tlb1_temp_mapping_as1
314
315	mfmsr	%r3
316	ori	%r3, %r3, (PSL_IS | PSL_DS)
317	bl	3f
3183:	mflr	%r4
319	addi	%r4, %r4, 20
320	mtspr	SPR_SRR0, %r4
321	mtspr	SPR_SRR1, %r3
322	rfi				/* Switch context */
323
324/*
325 * Invalidate initial entry
326 */
327	mr	%r3, %r29
328	bl	tlb1_inval_entry
329
330/*
331 * Setup final mapping in TLB1[1] and switch to it
332 */
333	lwz	%r6, 0(%r31)
334	addi	%r5, %r31, 4
335	li	%r4, 0
336
3374:	lis	%r3, MAS0_TLBSEL1@h
338	rlwimi	%r3, %r4, 16, 12, 15
339	mtspr	SPR_MAS0, %r3
340	isync
341	lwz	%r3, 0(%r5)
342	mtspr	SPR_MAS1, %r3
343	isync
344	lwz	%r3, 4(%r5)
345	mtspr	SPR_MAS2, %r3
346	isync
347	lwz	%r3, 8(%r5)
348	mtspr	SPR_MAS3, %r3
349	isync
350	tlbwe
351	isync
352	msync
353	addi	%r5, %r5, 12
354	addi	%r4, %r4, 1
355	cmpw	%r4, %r6
356	blt	4b
357
358	/* Switch to the final mapping */
359	bl	5f
360	.long __boot_page-.
3615:	mflr	%r5
362	lwz	%r3,0(%r3)
363	add	%r5,%r5,%r3		/* __boot_page in r5 */
364	bl	6f
3656:	mflr	%r3
366	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
367	add	%r3, %r3, %r5		/* Make this virtual address */
368	addi	%r3, %r3, 32
369	li	%r4, 0			/* Note AS=0 */
370	mtspr	SPR_SRR0, %r3
371	mtspr	SPR_SRR1, %r4
372	rfi
373
374/*
375 * At this point we're running at virtual addresses KERNBASE and beyond so
376 * it's allowed to directly access all locations the kernel was linked
377 * against.
378 */
379
380/*
381 * Invalidate temp mapping
382 */
383	mr	%r3, %r28
384	bl	tlb1_inval_entry
385
386/*
387 * Setup a temporary stack
388 */
389	bl	1f
390	.long tmpstack-.
3911:	mflr	%r1
392	lwz	%r2,0(%r1)
393	add	%r1,%r1,%r2
394	addi	%r1, %r1, (TMPSTACKSZ - 16)
395
396/*
397 * Initialise exception vector offsets
398 */
399	bl	ivor_setup
400
401	/*
402	 * Assign our pcpu instance
403	 */
404	bl	1f
405	.long ap_pcpu-.
4061:	mflr	%r4
407	lwz	%r3, 0(%r4)
408	add	%r3, %r3, %r4
409	lwz	%r3, 0(%r3)
410	mtsprg0	%r3
411
412	bl	pmap_bootstrap_ap
413
414	bl	cpudep_ap_bootstrap
415	/* Switch to the idle thread's kstack */
416	mr	%r1, %r3
417
418	bl	machdep_ap_bootstrap
419
420	/* NOT REACHED */
4216:	b	6b
422#endif /* SMP */
423
424/*
425 * Invalidate all entries in the given TLB.
426 *
427 * r3	TLBSEL
428 */
429tlb_inval_all:
430	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
431	ori	%r3, %r3, 0x4		/* INVALL */
432	tlbivax	0, %r3
433	isync
434	msync
435
436	tlbsync
437	msync
438	blr
439
440/*
441 * expects address to look up in r3, returns entry number in r29
442 *
443 * FIXME: the hidden assumption is we are now running in AS=0, but we should
444 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
445 */
446tlb1_find_current:
447	mfspr	%r17, SPR_PID0
448	slwi	%r17, %r17, MAS6_SPID0_SHIFT
449	mtspr	SPR_MAS6, %r17
450	isync
451	tlbsx	0, %r3
452	mfspr	%r17, SPR_MAS0
453	rlwinm	%r29, %r17, 16, 20, 31		/* MAS0[ESEL] -> r29 */
454
455	/* Make sure we have IPROT set on the entry */
456	mfspr	%r17, SPR_MAS1
457	oris	%r17, %r17, MAS1_IPROT@h
458	mtspr	SPR_MAS1, %r17
459	isync
460	tlbwe
461	isync
462	msync
463	blr
464
465/*
466 * Invalidates a single entry in TLB1.
467 *
468 * r3		ESEL
469 * r4-r5	scratched
470 */
471tlb1_inval_entry:
472	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
473	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
474	mtspr	SPR_MAS0, %r4
475	isync
476	tlbre
477	li	%r5, 0			/* MAS1[V] = 0 */
478	mtspr	SPR_MAS1, %r5
479	isync
480	tlbwe
481	isync
482	msync
483	blr
484
485/*
486 * r3		entry of temp translation
487 * r29		entry of current translation
488 * r28		returns temp entry passed in r3
489 * r4-r5	scratched
490 */
491tlb1_temp_mapping_as1:
492	mr	%r28, %r3
493
494	/* Read our current translation */
495	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
496	rlwimi	%r3, %r29, 16, 12, 15	/* Select our current entry */
497	mtspr	SPR_MAS0, %r3
498	isync
499	tlbre
500
501	/* Prepare and write temp entry */
502	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
503	rlwimi	%r3, %r28, 16, 12, 15	/* Select temp entry */
504	mtspr	SPR_MAS0, %r3
505	isync
506	mfspr	%r5, SPR_MAS1
507	li	%r4, 1			/* AS=1 */
508	rlwimi	%r5, %r4, 12, 19, 19
509	li	%r4, 0			/* Global mapping, TID=0 */
510	rlwimi	%r5, %r4, 16, 8, 15
511	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
512	mtspr	SPR_MAS1, %r5
513	isync
514	tlbwe
515	isync
516	msync
517	blr
518
519/*
520 * Loops over TLB1, invalidates all entries skipping the one which currently
521 * maps this code.
522 *
523 * r29		current entry
524 * r3-r5	scratched
525 */
526tlb1_inval_all_but_current:
527	mr	%r6, %r3
528	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
529	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
530	li	%r4, 0			/* Start from Entry 0 */
5311:	lis	%r5, MAS0_TLBSEL1@h
532	rlwimi	%r5, %r4, 16, 12, 15
533	mtspr	SPR_MAS0, %r5
534	isync
535	tlbre
536	mfspr	%r5, SPR_MAS1
537	cmpw	%r4, %r29		/* our current entry? */
538	beq	2f
539	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
540	mtspr	SPR_MAS1, %r5
541	isync
542	tlbwe
543	isync
544	msync
5452:	addi	%r4, %r4, 1
546	cmpw	%r4, %r3		/* Check if this is the last entry */
547	bne	1b
548	blr
549
550#ifdef SMP
551__boot_page_padding:
552	/*
553	 * Boot page needs to be exactly 4K, with the last word of this page
554	 * acting as the reset vector, so we need to stuff the remainder.
555	 * Upon release from holdoff CPU fetches the last word of the boot
556	 * page.
557	 */
558	.space	4092 - (__boot_page_padding - __boot_page)
559	b	__boot_page
560#endif /* SMP */
561
562/************************************************************************/
563/* locore subroutines */
564/************************************************************************/
565
566/*
567 * void tid_flush(tlbtid_t tid);
568 *
569 * Invalidate all TLB0 entries which match the given TID. Note this is
570 * dedicated for cases when invalidation(s) should NOT be propagated to other
571 * CPUs.
572 *
573 * void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
574 *
575 * XXX: why isn't this in C?
576 */
577ENTRY(tid_flush)
578	cmpwi	%r3, TID_KERNEL
579	beq	tid_flush_end	/* don't evict kernel translations */
580
581	/* Disable interrupts */
582	mfmsr	%r10
583	wrteei	0
584
585	li	%r6, 0		/* ways counter */
586loop_ways:
587	li	%r7, 0		/* entries [per way] counter */
588loop_entries:
589	/* Select TLB0 and ESEL (way) */
590	lis	%r8, MAS0_TLBSEL0@h
591	rlwimi	%r8, %r6, 16, 14, 15
592	mtspr	SPR_MAS0, %r8
593	isync
594
595	/* Select EPN (entry within the way) */
596	rlwinm	%r8, %r7, 12, 13, 19
597	mtspr	SPR_MAS2, %r8
598	isync
599	tlbre
600
601	/* Check if valid entry */
602	mfspr	%r8, SPR_MAS1
603	andis.	%r9, %r8, MAS1_VALID@h
604	beq	next_entry	/* invalid entry */
605
606	/* Check if this is our TID */
607	rlwinm	%r9, %r8, 16, 24, 31
608
609	cmplw	%r9, %r3
610	bne	next_entry	/* not our TID */
611
612	/* Clear VALID bit */
613	rlwinm	%r8, %r8, 0, 1, 31
614	mtspr	SPR_MAS1, %r8
615	isync
616	tlbwe
617	isync
618	msync
619
620next_entry:
621	addi	%r7, %r7, 1
622	cmpw	%r7, %r5
623	bne	loop_entries
624
625	/* Next way */
626	addi	%r6, %r6, 1
627	cmpw	%r6, %r4
628	bne	loop_ways
629
630	/* Restore MSR (possibly re-enable interrupts) */
631	mtmsr	%r10
632	isync
633
634tid_flush_end:
635	blr
636
637/*
638 * Cache disable/enable/inval sequences according
639 * to section 2.16 of E500CORE RM.
640 */
641ENTRY(dcache_inval)
642	/* Invalidate d-cache */
643	mfspr	%r3, SPR_L1CSR0
644	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
645	msync
646	isync
647	mtspr	SPR_L1CSR0, %r3
648	isync
6491:	mfspr	%r3, SPR_L1CSR0
650	andi.	%r3, %r3, L1CSR0_DCFI
651	bne	1b
652	blr
653
654ENTRY(dcache_disable)
655	/* Disable d-cache */
656	mfspr	%r3, SPR_L1CSR0
657	li	%r4, L1CSR0_DCE@l
658	not	%r4, %r4
659	and	%r3, %r3, %r4
660	msync
661	isync
662	mtspr	SPR_L1CSR0, %r3
663	isync
664	blr
665
666ENTRY(dcache_enable)
667	/* Enable d-cache */
668	mfspr	%r3, SPR_L1CSR0
669	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
670	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
671	msync
672	isync
673	mtspr	SPR_L1CSR0, %r3
674	isync
675	blr
676
677ENTRY(icache_inval)
678	/* Invalidate i-cache */
679	mfspr	%r3, SPR_L1CSR1
680	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
681	isync
682	mtspr	SPR_L1CSR1, %r3
683	isync
6841:	mfspr	%r3, SPR_L1CSR1
685	andi.	%r3, %r3, L1CSR1_ICFI
686	bne	1b
687	blr
688
689ENTRY(icache_disable)
690	/* Disable i-cache */
691	mfspr	%r3, SPR_L1CSR1
692	li	%r4, L1CSR1_ICE@l
693	not	%r4, %r4
694	and	%r3, %r3, %r4
695	isync
696	mtspr	SPR_L1CSR1, %r3
697	isync
698	blr
699
700ENTRY(icache_enable)
701	/* Enable i-cache */
702	mfspr	%r3, SPR_L1CSR1
703	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
704	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
705	isync
706	mtspr	SPR_L1CSR1, %r3
707	isync
708	blr
709
710/*
711 * int setfault()
712 *
713 * Similar to setjmp to setup for handling faults on accesses to user memory.
714 * Any routine using this may only call bcopy, either the form below,
715 * or the (currently used) C code optimized, so it doesn't use any non-volatile
716 * registers.
717 */
718	.globl	setfault
719setfault:
720	mflr	%r0
721	mfsprg0	%r4
722	lwz	%r4, TD_PCB(%r2)
723	stw	%r3, PCB_ONFAULT(%r4)
724	mfcr	%r10
725	mfctr	%r11
726	mfxer	%r12
727	stw	%r0, 0(%r3)
728	stw	%r1, 4(%r3)
729	stw	%r2, 8(%r3)
730	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
731	li	%r3, 0			/* return FALSE */
732	blr
733
734/************************************************************************/
735/* Data section								*/
736/************************************************************************/
737	.data
738	.align 3
739GLOBAL(__startkernel)
740	.long   begin
741GLOBAL(__endkernel)
742	.long   end
743	.align	4
744tmpstack:
745	.space	TMPSTACKSZ
746tmpstackbound:
747	.space 10240	/* XXX: this really should not be necessary */
748
749/*
750 * Compiled KERNBASE locations
751 */
752	.globl	kernbase
753	.set	kernbase, KERNBASE
754
755#include <powerpc/booke/trap_subr.S>
756