xref: /freebsd/sys/powerpc/booke/locore.S (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44	.text
45	.globl	btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52	.globl	kernel_text
53kernel_text:
54
55/*
56 * Startup entry.  Note, this must be the first thing in the text segment!
57 */
58	.text
59	.globl	__start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 *  - system memory starts from physical address 0
65 *  - it's mapped by a single TBL1 entry
66 *  - TLB1 mapping is 1:1 pa to va
67 *  - kernel is loaded at 16MB boundary
68 *  - all PID registers are set to the same value
69 *  - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 *	r1	: stack pointer
73 *	r3	: metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 *  - find TLB1 entry we started in
77 *  - make sure it's protected, ivalidate other entries
78 *  - create temp entry in the second AS (make sure it's not TLB[1])
79 *  - switch to temp mapping
80 *  - map 16MB of RAM in TLB1[1]
81 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 *  - switch to to TLB1[1] mapping
83 *  - invalidate temp mapping
84 *
85 * locore registers use:
86 *	r1	: stack pointer
87 *	r2	: trace pointer (AP only, for early diagnostics)
88 *	r3-r27	: scratch registers
89 *	r28	: temp TLB1 entry
90 *	r29	: initial TLB1 entry we started in
91 *	r30-r31	: arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97	mr	%r30, %r3
98	mr	%r31, %r4
99
100/*
101 * Initial cleanup
102 */
103	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
104	mtmsr	%r3
105	isync
106
107	lis	%r3, HID0_E500_DEFAULT_SET@h
108	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
109	mtspr	SPR_HID0, %r3
110	isync
111	lis	%r3, HID1_E500_DEFAULT_SET@h
112	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
113	mtspr	SPR_HID1, %r3
114	isync
115
116	/* Invalidate all entries in TLB0 */
117	li	%r3, 0
118	bl	tlb_inval_all
119
120	cmpwi	%r30, 0
121	beq	done_mapping
122
123/*
124 * Locate the TLB1 entry that maps this code
125 */
126	bl	1f
1271:	mflr	%r3
128	bl	tlb1_find_current	/* the entry found is returned in r29 */
129
130	bl	tlb1_inval_all_but_current
131
132/*
133 * Create temporary mapping in AS=1 and switch to it
134 */
135	addi	%r3, %r29, 1
136	bl	tlb1_temp_mapping_as1
137
138	mfmsr	%r3
139	ori	%r3, %r3, (PSL_IS | PSL_DS)
140	bl	2f
1412:	mflr	%r4
142	addi	%r4, %r4, 20
143	mtspr	SPR_SRR0, %r4
144	mtspr	SPR_SRR1, %r3
145	rfi				/* Switch context */
146
147/*
148 * Invalidate initial entry
149 */
150	mr	%r3, %r29
151	bl	tlb1_inval_entry
152
153/*
154 * Setup final mapping in TLB1[1] and switch to it
155 */
156	/* Final kernel mapping, map in 16 MB of RAM */
157	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
158	li	%r4, 0			/* Entry 0 */
159	rlwimi	%r3, %r4, 16, 12, 15
160	mtspr	SPR_MAS0, %r3
161	isync
162
163	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
164	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
165	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
166	isync
167
168	lis	%r3, KERNBASE@h
169	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
170#ifdef SMP
171	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
172#endif
173	mtspr	SPR_MAS2, %r3
174	isync
175
176	/* Discover phys load address */
177	bl	3f
1783:	mflr	%r4			/* Use current address */
179	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
180	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
181	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
182	isync
183	tlbwe
184	isync
185	msync
186
187	/* Switch to the above TLB1[1] mapping */
188	bl	4f
1894:	mflr	%r4
190	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
191	rlwinm	%r3, %r3, 0, 0, 19
192	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
193	addi	%r4, %r4, 36
194	li	%r3, PSL_DE		/* Note AS=0 */
195	mtspr   SPR_SRR0, %r4
196	mtspr   SPR_SRR1, %r3
197	rfi
198
199/*
200 * Invalidate temp mapping
201 */
202	mr	%r3, %r28
203	bl	tlb1_inval_entry
204
205done_mapping:
206
207/*
208 * Setup a temporary stack
209 */
210	bl	1f
211	.long tmpstack-.
2121:	mflr	%r1
213	lwz	%r2,0(%r1)
214	add	%r1,%r1,%r2
215	addi	%r1, %r1, (TMPSTACKSZ - 16)
216
217/*
218 * Relocate kernel
219 */
220	bl      1f
221	.long   _DYNAMIC-.
222	.long   _GLOBAL_OFFSET_TABLE_-.
2231:	mflr    %r5
224	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
225	add	%r3,%r3,%r5
226	lwz	%r4,4(%r5)	/* GOT pointer */
227	add	%r4,%r4,%r5
228	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
229	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
230	bl	elf_reloc_self
231
232/*
233 * Initialise exception vector offsets
234 */
235	bl	ivor_setup
236
237/*
238 * Set up arguments and jump to system initialization code
239 */
240	mr	%r3, %r30
241	mr	%r4, %r31
242
243	/* Prepare core */
244	bl	booke_init
245
246	/* Switch to thread0.td_kstack now */
247	mr	%r1, %r3
248	li	%r3, 0
249	stw	%r3, 0(%r1)
250
251	/* Machine independet part, does not return */
252	bl	mi_startup
253	/* NOT REACHED */
2545:	b	5b
255
256
257#ifdef SMP
258/************************************************************************/
259/* AP Boot page */
260/************************************************************************/
261	.text
262	.globl	__boot_page
263	.align	12
264__boot_page:
265	bl	1f
266
267	.globl	bp_ntlb1s
268bp_ntlb1s:
269	.long	0
270
271	.globl	bp_tlb1
272bp_tlb1:
273	.space	4 * 3 * 16
274
275	.globl	bp_tlb1_end
276bp_tlb1_end:
277
278/*
279 * Initial configuration
280 */
2811:	mflr	%r31		/* r31 hold the address of bp_ntlb1s */
282
283	/* Set HIDs */
284	lis	%r3, HID0_E500_DEFAULT_SET@h
285	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
286	mtspr	SPR_HID0, %r3
287	isync
288	lis	%r3, HID1_E500_DEFAULT_SET@h
289	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
290	mtspr	SPR_HID1, %r3
291	isync
292
293	/* Enable branch prediction */
294	li	%r3, BUCSR_BPEN
295	mtspr	SPR_BUCSR, %r3
296	isync
297
298	/* Invalidate all entries in TLB0 */
299	li	%r3, 0
300	bl	tlb_inval_all
301
302/*
303 * Find TLB1 entry which is translating us now
304 */
305	bl	2f
3062:	mflr	%r3
307	bl	tlb1_find_current	/* the entry number found is in r29 */
308
309	bl	tlb1_inval_all_but_current
310
311/*
312 * Create temporary translation in AS=1 and switch to it
313 */
314	lwz	%r3, 0(%r31)
315	bl	tlb1_temp_mapping_as1
316
317	mfmsr	%r3
318	ori	%r3, %r3, (PSL_IS | PSL_DS)
319	bl	3f
3203:	mflr	%r4
321	addi	%r4, %r4, 20
322	mtspr	SPR_SRR0, %r4
323	mtspr	SPR_SRR1, %r3
324	rfi				/* Switch context */
325
326/*
327 * Invalidate initial entry
328 */
329	mr	%r3, %r29
330	bl	tlb1_inval_entry
331
332/*
333 * Setup final mapping in TLB1[1] and switch to it
334 */
335	lwz	%r6, 0(%r31)
336	addi	%r5, %r31, 4
337	li	%r4, 0
338
3394:	lis	%r3, MAS0_TLBSEL1@h
340	rlwimi	%r3, %r4, 16, 12, 15
341	mtspr	SPR_MAS0, %r3
342	isync
343	lwz	%r3, 0(%r5)
344	mtspr	SPR_MAS1, %r3
345	isync
346	lwz	%r3, 4(%r5)
347	mtspr	SPR_MAS2, %r3
348	isync
349	lwz	%r3, 8(%r5)
350	mtspr	SPR_MAS3, %r3
351	isync
352	tlbwe
353	isync
354	msync
355	addi	%r5, %r5, 12
356	addi	%r4, %r4, 1
357	cmpw	%r4, %r6
358	blt	4b
359
360	/* Switch to the final mapping */
361	bl	5f
362	.long __boot_page-.
3635:	mflr	%r5
364	lwz	%r3,0(%r3)
365	add	%r5,%r5,%r3		/* __boot_page in r5 */
366	bl	6f
3676:	mflr	%r3
368	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
369	add	%r3, %r3, %r5		/* Make this virtual address */
370	addi	%r3, %r3, 32
371	li	%r4, 0			/* Note AS=0 */
372	mtspr	SPR_SRR0, %r3
373	mtspr	SPR_SRR1, %r4
374	rfi
375
376/*
377 * At this point we're running at virtual addresses KERNBASE and beyond so
378 * it's allowed to directly access all locations the kernel was linked
379 * against.
380 */
381
382/*
383 * Invalidate temp mapping
384 */
385	mr	%r3, %r28
386	bl	tlb1_inval_entry
387
388/*
389 * Setup a temporary stack
390 */
391	bl	1f
392	.long tmpstack-.
3931:	mflr	%r1
394	lwz	%r2,0(%r1)
395	add	%r1,%r1,%r2
396	addi	%r1, %r1, (TMPSTACKSZ - 16)
397
398/*
399 * Initialise exception vector offsets
400 */
401	bl	ivor_setup
402
403	/*
404	 * Assign our pcpu instance
405	 */
406	bl	1f
407	.long ap_pcpu-.
4081:	mflr	%r4
409	lwz	%r3, 0(%r4)
410	add	%r3, %r3, %r4
411	lwz	%r3, 0(%r3)
412	mtsprg0	%r3
413
414	bl	pmap_bootstrap_ap
415
416	bl	cpudep_ap_bootstrap
417	/* Switch to the idle thread's kstack */
418	mr	%r1, %r3
419
420	bl	machdep_ap_bootstrap
421
422	/* NOT REACHED */
4236:	b	6b
424#endif /* SMP */
425
426/*
427 * Invalidate all entries in the given TLB.
428 *
429 * r3	TLBSEL
430 */
431tlb_inval_all:
432	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
433	ori	%r3, %r3, 0x4		/* INVALL */
434	tlbivax	0, %r3
435	isync
436	msync
437
438	tlbsync
439	msync
440	blr
441
442/*
443 * expects address to look up in r3, returns entry number in r29
444 *
445 * FIXME: the hidden assumption is we are now running in AS=0, but we should
446 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
447 */
448tlb1_find_current:
449	mfspr	%r17, SPR_PID0
450	slwi	%r17, %r17, MAS6_SPID0_SHIFT
451	mtspr	SPR_MAS6, %r17
452	isync
453	tlbsx	0, %r3
454	mfspr	%r17, SPR_MAS0
455	rlwinm	%r29, %r17, 16, 20, 31		/* MAS0[ESEL] -> r29 */
456
457	/* Make sure we have IPROT set on the entry */
458	mfspr	%r17, SPR_MAS1
459	oris	%r17, %r17, MAS1_IPROT@h
460	mtspr	SPR_MAS1, %r17
461	isync
462	tlbwe
463	isync
464	msync
465	blr
466
467/*
468 * Invalidates a single entry in TLB1.
469 *
470 * r3		ESEL
471 * r4-r5	scratched
472 */
473tlb1_inval_entry:
474	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
475	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
476	mtspr	SPR_MAS0, %r4
477	isync
478	tlbre
479	li	%r5, 0			/* MAS1[V] = 0 */
480	mtspr	SPR_MAS1, %r5
481	isync
482	tlbwe
483	isync
484	msync
485	blr
486
487/*
488 * r3		entry of temp translation
489 * r29		entry of current translation
490 * r28		returns temp entry passed in r3
491 * r4-r5	scratched
492 */
493tlb1_temp_mapping_as1:
494	mr	%r28, %r3
495
496	/* Read our current translation */
497	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
498	rlwimi	%r3, %r29, 16, 12, 15	/* Select our current entry */
499	mtspr	SPR_MAS0, %r3
500	isync
501	tlbre
502
503	/* Prepare and write temp entry */
504	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
505	rlwimi	%r3, %r28, 16, 12, 15	/* Select temp entry */
506	mtspr	SPR_MAS0, %r3
507	isync
508	mfspr	%r5, SPR_MAS1
509	li	%r4, 1			/* AS=1 */
510	rlwimi	%r5, %r4, 12, 19, 19
511	li	%r4, 0			/* Global mapping, TID=0 */
512	rlwimi	%r5, %r4, 16, 8, 15
513	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
514	mtspr	SPR_MAS1, %r5
515	isync
516	tlbwe
517	isync
518	msync
519	blr
520
521/*
522 * Loops over TLB1, invalidates all entries skipping the one which currently
523 * maps this code.
524 *
525 * r29		current entry
526 * r3-r5	scratched
527 */
528tlb1_inval_all_but_current:
529	mr	%r6, %r3
530	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
531	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
532	li	%r4, 0			/* Start from Entry 0 */
5331:	lis	%r5, MAS0_TLBSEL1@h
534	rlwimi	%r5, %r4, 16, 12, 15
535	mtspr	SPR_MAS0, %r5
536	isync
537	tlbre
538	mfspr	%r5, SPR_MAS1
539	cmpw	%r4, %r29		/* our current entry? */
540	beq	2f
541	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
542	mtspr	SPR_MAS1, %r5
543	isync
544	tlbwe
545	isync
546	msync
5472:	addi	%r4, %r4, 1
548	cmpw	%r4, %r3		/* Check if this is the last entry */
549	bne	1b
550	blr
551
552#ifdef SMP
553__boot_page_padding:
554	/*
555	 * Boot page needs to be exactly 4K, with the last word of this page
556	 * acting as the reset vector, so we need to stuff the remainder.
557	 * Upon release from holdoff CPU fetches the last word of the boot
558	 * page.
559	 */
560	.space	4092 - (__boot_page_padding - __boot_page)
561	b	__boot_page
562#endif /* SMP */
563
564/************************************************************************/
565/* locore subroutines */
566/************************************************************************/
567
568/*
569 * void tid_flush(tlbtid_t tid);
570 *
571 * Invalidate all TLB0 entries which match the given TID. Note this is
572 * dedicated for cases when invalidation(s) should NOT be propagated to other
573 * CPUs.
574 *
575 * void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
576 *
577 * XXX: why isn't this in C?
578 */
579ENTRY(tid_flush)
580	cmpwi	%r3, TID_KERNEL
581	beq	tid_flush_end	/* don't evict kernel translations */
582
583	/* Disable interrupts */
584	mfmsr	%r10
585	wrteei	0
586
587	li	%r6, 0		/* ways counter */
588loop_ways:
589	li	%r7, 0		/* entries [per way] counter */
590loop_entries:
591	/* Select TLB0 and ESEL (way) */
592	lis	%r8, MAS0_TLBSEL0@h
593	rlwimi	%r8, %r6, 16, 14, 15
594	mtspr	SPR_MAS0, %r8
595	isync
596
597	/* Select EPN (entry within the way) */
598	rlwinm	%r8, %r7, 12, 13, 19
599	mtspr	SPR_MAS2, %r8
600	isync
601	tlbre
602
603	/* Check if valid entry */
604	mfspr	%r8, SPR_MAS1
605	andis.	%r9, %r8, MAS1_VALID@h
606	beq	next_entry	/* invalid entry */
607
608	/* Check if this is our TID */
609	rlwinm	%r9, %r8, 16, 24, 31
610
611	cmplw	%r9, %r3
612	bne	next_entry	/* not our TID */
613
614	/* Clear VALID bit */
615	rlwinm	%r8, %r8, 0, 1, 31
616	mtspr	SPR_MAS1, %r8
617	isync
618	tlbwe
619	isync
620	msync
621
622next_entry:
623	addi	%r7, %r7, 1
624	cmpw	%r7, %r5
625	bne	loop_entries
626
627	/* Next way */
628	addi	%r6, %r6, 1
629	cmpw	%r6, %r4
630	bne	loop_ways
631
632	/* Restore MSR (possibly re-enable interrupts) */
633	mtmsr	%r10
634	isync
635
636tid_flush_end:
637	blr
638
639/*
640 * Cache disable/enable/inval sequences according
641 * to section 2.16 of E500CORE RM.
642 */
643ENTRY(dcache_inval)
644	/* Invalidate d-cache */
645	mfspr	%r3, SPR_L1CSR0
646	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
647	msync
648	isync
649	mtspr	SPR_L1CSR0, %r3
650	isync
6511:	mfspr	%r3, SPR_L1CSR0
652	andi.	%r3, %r3, L1CSR0_DCFI
653	bne	1b
654	blr
655
656ENTRY(dcache_disable)
657	/* Disable d-cache */
658	mfspr	%r3, SPR_L1CSR0
659	li	%r4, L1CSR0_DCE@l
660	not	%r4, %r4
661	and	%r3, %r3, %r4
662	msync
663	isync
664	mtspr	SPR_L1CSR0, %r3
665	isync
666	blr
667
668ENTRY(dcache_enable)
669	/* Enable d-cache */
670	mfspr	%r3, SPR_L1CSR0
671	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
672	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
673	msync
674	isync
675	mtspr	SPR_L1CSR0, %r3
676	isync
677	blr
678
679ENTRY(icache_inval)
680	/* Invalidate i-cache */
681	mfspr	%r3, SPR_L1CSR1
682	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
683	isync
684	mtspr	SPR_L1CSR1, %r3
685	isync
6861:	mfspr	%r3, SPR_L1CSR1
687	andi.	%r3, %r3, L1CSR1_ICFI
688	bne	1b
689	blr
690
691ENTRY(icache_disable)
692	/* Disable i-cache */
693	mfspr	%r3, SPR_L1CSR1
694	li	%r4, L1CSR1_ICE@l
695	not	%r4, %r4
696	and	%r3, %r3, %r4
697	isync
698	mtspr	SPR_L1CSR1, %r3
699	isync
700	blr
701
702ENTRY(icache_enable)
703	/* Enable i-cache */
704	mfspr	%r3, SPR_L1CSR1
705	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
706	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
707	isync
708	mtspr	SPR_L1CSR1, %r3
709	isync
710	blr
711
712/*
713 * int setfault()
714 *
715 * Similar to setjmp to setup for handling faults on accesses to user memory.
716 * Any routine using this may only call bcopy, either the form below,
717 * or the (currently used) C code optimized, so it doesn't use any non-volatile
718 * registers.
719 */
720	.globl	setfault
721setfault:
722	mflr	%r0
723	mfsprg0	%r4
724	lwz	%r4, TD_PCB(%r2)
725	stw	%r3, PCB_ONFAULT(%r4)
726	mfcr	%r10
727	stw	%r0, 0(%r3)
728	stw	%r1, 4(%r3)
729	stw	%r2, 8(%r3)
730	stmw	%r13, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
731	li	%r3, 0			/* return FALSE */
732	blr
733
734/************************************************************************/
735/* Data section								*/
736/************************************************************************/
737	.data
738	.align 3
739GLOBAL(__startkernel)
740	.long   begin
741GLOBAL(__endkernel)
742	.long   end
743	.align	4
744tmpstack:
745	.space	TMPSTACKSZ
746tmpstackbound:
747	.space 10240	/* XXX: this really should not be necessary */
748
749/*
750 * Compiled KERNBASE locations
751 */
752	.globl	kernbase
753	.set	kernbase, KERNBASE
754
755#include <powerpc/booke/trap_subr.S>
756