xref: /freebsd/sys/powerpc/aim/locore64.S (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1c3e289e1SNathan Whitehorn
2c3e289e1SNathan Whitehorn/*-
3ca496abdSNathan Whitehorn * Copyright (C) 2010-2016 Nathan Whitehorn
4c3e289e1SNathan Whitehorn * All rights reserved.
5c3e289e1SNathan Whitehorn *
6c3e289e1SNathan Whitehorn * Redistribution and use in source and binary forms, with or without
7c3e289e1SNathan Whitehorn * modification, are permitted provided that the following conditions
8c3e289e1SNathan Whitehorn * are met:
9c3e289e1SNathan Whitehorn * 1. Redistributions of source code must retain the above copyright
10c3e289e1SNathan Whitehorn *    notice, this list of conditions and the following disclaimer.
11c3e289e1SNathan Whitehorn * 2. Redistributions in binary form must reproduce the above copyright
12c3e289e1SNathan Whitehorn *    notice, this list of conditions and the following disclaimer in the
13c3e289e1SNathan Whitehorn *    documentation and/or other materials provided with the distribution.
14c3e289e1SNathan Whitehorn *
15ca496abdSNathan Whitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16c3e289e1SNathan Whitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17c3e289e1SNathan Whitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18c3e289e1SNathan Whitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19c3e289e1SNathan Whitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20c3e289e1SNathan Whitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21c3e289e1SNathan Whitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22c3e289e1SNathan Whitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23c3e289e1SNathan Whitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
24c3e289e1SNathan Whitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25c3e289e1SNathan Whitehorn */
26c3e289e1SNathan Whitehorn
27fc2a8776SEd Maste#include "assym.inc"
28c3e289e1SNathan Whitehorn
29c3e289e1SNathan Whitehorn#include <sys/syscall.h>
30c3e289e1SNathan Whitehorn
31c3e289e1SNathan Whitehorn#include <machine/trap.h>
32c3e289e1SNathan Whitehorn#include <machine/param.h>
33c3e289e1SNathan Whitehorn#include <machine/spr.h>
34c3e289e1SNathan Whitehorn#include <machine/asm.h>
358f69e36dSJustin Hibbits#include <machine/vmparam.h>
36c3e289e1SNathan Whitehorn
377c259020SNathan Whitehorn#ifdef _CALL_ELF
387c259020SNathan Whitehorn.abiversion _CALL_ELF
397c259020SNathan Whitehorn#endif
407c259020SNathan Whitehorn
41bb808254SNathan Whitehorn/* Glue for linker script */
42c3e289e1SNathan Whitehorn.globl  kernbase
43c3e289e1SNathan Whitehorn.set    kernbase, KERNBASE
44c3e289e1SNathan Whitehorn
45c3e289e1SNathan Whitehorn/*
46c3e289e1SNathan Whitehorn * Globals
47c3e289e1SNathan Whitehorn */
48c3e289e1SNathan Whitehorn	.data
49bb808254SNathan Whitehorn	.align 3
50bb808254SNathan WhitehornGLOBAL(__startkernel)
51bb808254SNathan Whitehorn	.llong	begin
52bb808254SNathan WhitehornGLOBAL(__endkernel)
53bb808254SNathan Whitehorn	.llong	end
546d13fd63SWojciech MacekGLOBAL(can_wakeup)
556d13fd63SWojciech Macek	.llong	0x0
56bb808254SNathan Whitehorn
57c3e289e1SNathan Whitehorn	.align	4
58bb808254SNathan Whitehorn#define	TMPSTKSZ	16384		/* 16K temporary stack */
59c3e289e1SNathan WhitehornGLOBAL(tmpstk)
60c3e289e1SNathan Whitehorn	.space	TMPSTKSZ
61c3e289e1SNathan Whitehorn
629cecb88cSNathan WhitehornTOC_ENTRY(tmpstk)
636d13fd63SWojciech MacekTOC_ENTRY(can_wakeup)
649cecb88cSNathan Whitehorn
6561740482SLeandro Lupori#ifdef KDB
66d7271aceSLeandro Lupori#define TRAPSTKSZ       8192            /* 8k trap stack */
6761740482SLeandro LuporiGLOBAL(trapstk)
6861740482SLeandro Lupori        .space        TRAPSTKSZ
6961740482SLeandro LuporiTOC_ENTRY(trapstk)
7061740482SLeandro Lupori#endif
7161740482SLeandro Lupori
7261740482SLeandro Lupori
7370f65499SNathan Whitehorn/*
7470f65499SNathan Whitehorn * Entry point for bootloaders that do not fully implement ELF and start
7570f65499SNathan Whitehorn * at the beginning of the image (kexec, notably). In its own section so
7670f65499SNathan Whitehorn * that it ends up before any linker-generated call stubs and actually at
7770f65499SNathan Whitehorn * the beginning of the image. kexec on some systems also enters at
7870f65499SNathan Whitehorn * (start of image) + 0x60, so put a spin loop there.
7970f65499SNathan Whitehorn */
8070f65499SNathan Whitehorn	.section ".text.kboot", "x", @progbits
8170f65499SNathan Whitehornkbootentry:
82*e141b62dSLeandro Lupori#ifdef __LITTLE_ENDIAN__
83*e141b62dSLeandro Lupori	RETURN_TO_NATIVE_ENDIAN
84*e141b62dSLeandro Lupori#endif
8570f65499SNathan Whitehorn	b __start
8670f65499SNathan Whitehorn. = kbootentry + 0x40	/* Magic address used in platform layer */
8770f65499SNathan Whitehorn	.global smp_spin_sem
8870f65499SNathan Whitehornap_kexec_spin_sem:
8970f65499SNathan Whitehorn	.long   -1
9070f65499SNathan Whitehorn. = kbootentry + 0x60	/* Entry point for kexec APs */
9170f65499SNathan Whitehornap_kexec_start:		/* At 0x60 past start, copied to 0x60 by kexec */
9270f65499SNathan Whitehorn	/* r3 set to CPU ID by kexec */
9370f65499SNathan Whitehorn
9470f65499SNathan Whitehorn	/* Invalidate icache for low-memory copy and jump there */
9570f65499SNathan Whitehorn	li	%r0,0x80
9670f65499SNathan Whitehorn	dcbst	0,%r0
9770f65499SNathan Whitehorn	sync
9870f65499SNathan Whitehorn	icbi	0,%r0
9970f65499SNathan Whitehorn	isync
100a891d21aSNathan Whitehorn	ba	0x80			/* Absolute branch to next inst */
10170f65499SNathan Whitehorn
102a891d21aSNathan Whitehorn. = kbootentry + 0x80			/* Aligned to cache line */
10370f65499SNathan Whitehorn1:	or	31,31,31		/* yield */
10470f65499SNathan Whitehorn	sync
10570f65499SNathan Whitehorn	lwz	%r1,0x40(0)		/* Spin on ap_kexec_spin_sem */
10670f65499SNathan Whitehorn	cmpw	%r1,%r3			/* Until it equals our CPU ID */
10770f65499SNathan Whitehorn	bne	1b
10870f65499SNathan Whitehorn
10970f65499SNathan Whitehorn	/* Released */
11070f65499SNathan Whitehorn	or	2,2,2			/* unyield */
1116d13fd63SWojciech Macek
1126d13fd63SWojciech Macek	/* Make sure that it will be software reset. Clear SRR1 */
1136d13fd63SWojciech Macek	li	%r1,0
1146d13fd63SWojciech Macek	mtsrr1	%r1
11570f65499SNathan Whitehorn	ba	EXC_RST
11670f65499SNathan Whitehorn
11770f65499SNathan Whitehorn/*
11870f65499SNathan Whitehorn * Now start the real text section
11970f65499SNathan Whitehorn */
12070f65499SNathan Whitehorn
121c3e289e1SNathan Whitehorn	.text
122c3e289e1SNathan Whitehorn	.globl	btext
123c3e289e1SNathan Whitehornbtext:
124c3e289e1SNathan Whitehorn
125c3e289e1SNathan Whitehorn/*
126ca496abdSNathan Whitehorn * Main kernel entry point.
127bb808254SNathan Whitehorn *
128bb808254SNathan Whitehorn * Calling convention:
129bb808254SNathan Whitehorn * r3: Flattened Device Tree pointer (or zero)
130bb808254SNathan Whitehorn * r4: ignored
131bb808254SNathan Whitehorn * r5: OF client interface pointer (or zero)
132bb808254SNathan Whitehorn * r6: Loader metadata pointer (or zero)
13347f69f4fSNathan Whitehorn * r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata
134c3e289e1SNathan Whitehorn */
135c3e289e1SNathan Whitehorn	.text
136a6625592SBrandon Bergren_NAKED_ENTRY(__start)
137a6625592SBrandon Bergren
138a6625592SBrandon Bergren#ifdef	__LITTLE_ENDIAN__
139a6625592SBrandon Bergren	RETURN_TO_NATIVE_ENDIAN
140a6625592SBrandon Bergren#endif
141d929c32bSNathan Whitehorn	/* Set 64-bit mode if not yet set before branching to C */
142d929c32bSNathan Whitehorn	mfmsr	%r20
143d929c32bSNathan Whitehorn	li	%r21,1
144d929c32bSNathan Whitehorn	insrdi	%r20,%r21,1,0
145d929c32bSNathan Whitehorn	mtmsrd	%r20
146d929c32bSNathan Whitehorn	isync
147d929c32bSNathan Whitehorn	nop	/* Make this block a multiple of 8 bytes */
148d929c32bSNathan Whitehorn
149bb808254SNathan Whitehorn	/* Set up the TOC pointer */
150bb808254SNathan Whitehorn	b	0f
151bb808254SNathan Whitehorn	.align 3
152bb808254SNathan Whitehorn0:	nop
153bb808254SNathan Whitehorn	bl	1f
15498cd7a66SNathan Whitehorn	.llong	__tocbase + 0x8000 - .
155bb808254SNathan Whitehorn1:	mflr	%r2
15698cd7a66SNathan Whitehorn	ld	%r1,0(%r2)
15798cd7a66SNathan Whitehorn	add	%r2,%r1,%r2
158c3e289e1SNathan Whitehorn
1597a28efd9SNathan Whitehorn	/* Get load offset */
1607a28efd9SNathan Whitehorn	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
1617a28efd9SNathan Whitehorn	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
1627a28efd9SNathan Whitehorn
163c3e289e1SNathan Whitehorn	/* Set up the stack pointer */
1648864f359SNathan Whitehorn	bl	1f
1658864f359SNathan Whitehorn	.llong	tmpstk + TMPSTKSZ - 96 - .
1668864f359SNathan Whitehorn1:	mflr	%r30
1678864f359SNathan Whitehorn	ld	%r1,0(%r30)
1688864f359SNathan Whitehorn	add	%r1,%r1,%r30
1698864f359SNathan Whitehorn	nop
17098cd7a66SNathan Whitehorn
17198cd7a66SNathan Whitehorn	/* Relocate kernel */
17298cd7a66SNathan Whitehorn	std	%r3,48(%r1)
17398cd7a66SNathan Whitehorn	std	%r4,56(%r1)
17498cd7a66SNathan Whitehorn	std	%r5,64(%r1)
17598cd7a66SNathan Whitehorn	std	%r6,72(%r1)
17647f69f4fSNathan Whitehorn	std	%r7,80(%r1)
17747f69f4fSNathan Whitehorn
17898cd7a66SNathan Whitehorn	bl	1f
17998cd7a66SNathan Whitehorn	.llong _DYNAMIC-.
18098cd7a66SNathan Whitehorn1:	mflr	%r3
18198cd7a66SNathan Whitehorn	ld	%r4,0(%r3)
18298cd7a66SNathan Whitehorn	add	%r3,%r4,%r3
1837a28efd9SNathan Whitehorn	mr	%r4,%r31
18498cd7a66SNathan Whitehorn	bl	elf_reloc_self
18598cd7a66SNathan Whitehorn	nop
18698cd7a66SNathan Whitehorn	ld	%r3,48(%r1)
18798cd7a66SNathan Whitehorn	ld	%r4,56(%r1)
18898cd7a66SNathan Whitehorn	ld	%r5,64(%r1)
18998cd7a66SNathan Whitehorn	ld	%r6,72(%r1)
19047f69f4fSNathan Whitehorn	ld	%r7,80(%r1)
191c3e289e1SNathan Whitehorn
192bb808254SNathan Whitehorn	/* Begin CPU init */
193bb808254SNathan Whitehorn	mr	%r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */
19479c77d72SNathan Whitehorn	bl	powerpc_init
195c3e289e1SNathan Whitehorn	nop
196bb808254SNathan Whitehorn
197bb808254SNathan Whitehorn	/* Set stack pointer to new value and branch to mi_startup */
198c3e289e1SNathan Whitehorn	mr	%r1, %r3
199c3e289e1SNathan Whitehorn	li	%r3, 0
200c3e289e1SNathan Whitehorn	std	%r3, 0(%r1)
20179c77d72SNathan Whitehorn	bl	mi_startup
202c3e289e1SNathan Whitehorn	nop
203bb808254SNathan Whitehorn
204ca496abdSNathan Whitehorn	/* Unreachable */
205ca496abdSNathan Whitehorn	b	.
20678599c32SConrad Meyer_END(__start)
207c3e289e1SNathan Whitehorn
2089411e24dSBrandon BergrenASENTRY_NOPROF(__restartkernel_virtual)
2099411e24dSBrandon Bergren	/*
2109411e24dSBrandon Bergren	 * When coming in via this entry point, we need to alter the SLB to
2119411e24dSBrandon Bergren	 * shadow the segment register emulation entries in DMAP space.
2129411e24dSBrandon Bergren	 * We need to do this dance because we are running with virtual-mode
2139411e24dSBrandon Bergren	 * OpenFirmware and have not yet taken over the MMU.
2149411e24dSBrandon Bergren	 *
2159411e24dSBrandon Bergren	 * Assumptions:
2169411e24dSBrandon Bergren	 * 1) The kernel is currently identity-mapped.
2179411e24dSBrandon Bergren	 * 2) We are currently executing at an address compatible with
2189411e24dSBrandon Bergren	 *    real mode.
2199411e24dSBrandon Bergren	 * 3) The first 16 SLB entries are emulating SRs.
2209411e24dSBrandon Bergren	 * 4) The rest of the SLB is not in use.
2219411e24dSBrandon Bergren	 * 5) OpenFirmware is not manipulating the SLB at runtime.
2229411e24dSBrandon Bergren	 * 6) We are running on 64-bit AIM.
2239411e24dSBrandon Bergren	 *
2249411e24dSBrandon Bergren	 * Tested on a G5.
2259411e24dSBrandon Bergren	 */
2269411e24dSBrandon Bergren	mfmsr	%r14
2279411e24dSBrandon Bergren	/* Switch to real mode because we are about to mess with the SLB. */
2289411e24dSBrandon Bergren	andi.	%r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
2299411e24dSBrandon Bergren	mtmsr	%r14
2309411e24dSBrandon Bergren	isync
2319411e24dSBrandon Bergren	/* Prepare variables for later use. */
2329411e24dSBrandon Bergren	li	%r14, 0
2339411e24dSBrandon Bergren	li	%r18, 0
2349411e24dSBrandon Bergren	oris	%r18, %r18, 0xc000
2359411e24dSBrandon Bergren	sldi	%r18, %r18, 32		/* r18: 0xc000000000000000 */
2369411e24dSBrandon Bergren1:
2379411e24dSBrandon Bergren	/*
2389411e24dSBrandon Bergren	 * Loop over the first 16 SLB entries.
2399411e24dSBrandon Bergren	 * Offset the SLBE into the DMAP, add 16 to the index, and write
2409411e24dSBrandon Bergren	 * it back to the SLB.
2419411e24dSBrandon Bergren	 */
2429411e24dSBrandon Bergren	/* XXX add more safety checks */
2439411e24dSBrandon Bergren	slbmfev	%r15, %r14
2449411e24dSBrandon Bergren	slbmfee	%r16, %r14
2459411e24dSBrandon Bergren	or	%r16, %r16, %r14	/* index is 0-15 */
2469411e24dSBrandon Bergren	ori	%r16, %r16, 0x10	/* add 16 to index. */
2479411e24dSBrandon Bergren	or	%r16, %r16, %r18	/* SLBE DMAP offset */
2489411e24dSBrandon Bergren	rldicr	%r17, %r16, 0, 37	/* Invalidation SLBE */
2499411e24dSBrandon Bergren
2509411e24dSBrandon Bergren	isync
2519411e24dSBrandon Bergren	slbie	%r17
2529411e24dSBrandon Bergren	/* isync */
2539411e24dSBrandon Bergren	slbmte	%r15, %r16
2549411e24dSBrandon Bergren	isync
2559411e24dSBrandon Bergren	addi	%r14, %r14, 1
2569411e24dSBrandon Bergren	cmpdi	%r14, 16
2579411e24dSBrandon Bergren	blt	1b
258d26f2a50SBrandon Bergren
259d26f2a50SBrandon Bergren	/*
260d26f2a50SBrandon Bergren	 * Now that we are set up with a temporary direct map, we can
261d26f2a50SBrandon Bergren	 * continue with __restartkernel. Translation will be switched
262d26f2a50SBrandon Bergren	 * back on at the rfid, at which point we will be executing from
263d26f2a50SBrandon Bergren	 * the temporary direct map we just installed, until the kernel
264d26f2a50SBrandon Bergren	 * takes over responsibility for the MMU.
265d26f2a50SBrandon Bergren	 */
266d26f2a50SBrandon Bergren	bl	__restartkernel
267d26f2a50SBrandon Bergren	nop
26878599c32SConrad MeyerASEND(__restartkernel_virtual)
2699411e24dSBrandon Bergren
2708864f359SNathan WhitehornASENTRY_NOPROF(__restartkernel)
2718864f359SNathan Whitehorn	/*
2728864f359SNathan Whitehorn	 * r3-r7: arguments to go to __start
2738864f359SNathan Whitehorn	 * r8: offset from current kernel address to apply
2748864f359SNathan Whitehorn	 * r9: MSR to set when (atomically) jumping to __start + r8
2758864f359SNathan Whitehorn	 */
2768864f359SNathan Whitehorn	mtsrr1	%r9
2778864f359SNathan Whitehorn	bl	1f
2788864f359SNathan Whitehorn1:	mflr	%r25
2798864f359SNathan Whitehorn	add	%r25,%r8,%r25
2808864f359SNathan Whitehorn	addi	%r25,%r25,2f-1b
2818864f359SNathan Whitehorn	mtsrr0	%r25
2828864f359SNathan Whitehorn	rfid
2838864f359SNathan Whitehorn2:	bl	__start
2848864f359SNathan Whitehorn	nop
28578599c32SConrad MeyerASEND(__restartkernel)
2868864f359SNathan Whitehorn
287c3e289e1SNathan Whitehorn#include <powerpc/aim/trap_subr64.S>
288