xref: /freebsd/sys/powerpc/aim/locore64.S (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1/* $FreeBSD$ */
2
3/*-
4 * Copyright (C) 2010-2016 Nathan Whitehorn
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include "assym.inc"
29
30#include <sys/syscall.h>
31
32#include <machine/trap.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/asm.h>
36#include <machine/vmparam.h>
37
38#ifdef _CALL_ELF
39.abiversion _CALL_ELF
40#endif
41
42/* Glue for linker script */
43.globl  kernbase
44.set    kernbase, KERNBASE
45
46/*
47 * Globals
48 */
49	.data
50	.align 3
51GLOBAL(__startkernel)
52	.llong	begin
53GLOBAL(__endkernel)
54	.llong	end
55GLOBAL(can_wakeup)
56	.llong	0x0
57
58	.align	4
59#define	TMPSTKSZ	16384		/* 16K temporary stack */
60GLOBAL(tmpstk)
61	.space	TMPSTKSZ
62
63TOC_ENTRY(tmpstk)
64TOC_ENTRY(can_wakeup)
65
66#ifdef KDB
67#define TRAPSTKSZ       8192            /* 8k trap stack */
68GLOBAL(trapstk)
69        .space        TRAPSTKSZ
70TOC_ENTRY(trapstk)
71#endif
72
73
74/*
75 * Entry point for bootloaders that do not fully implement ELF and start
76 * at the beginning of the image (kexec, notably). In its own section so
77 * that it ends up before any linker-generated call stubs and actually at
78 * the beginning of the image. kexec on some systems also enters at
79 * (start of image) + 0x60, so put a spin loop there.
80 */
81	.section ".text.kboot", "x", @progbits
82kbootentry:
83#ifdef __LITTLE_ENDIAN__
84	RETURN_TO_NATIVE_ENDIAN
85#endif
86	b __start
87. = kbootentry + 0x40	/* Magic address used in platform layer */
88	.global smp_spin_sem
89ap_kexec_spin_sem:
90	.long   -1
91. = kbootentry + 0x60	/* Entry point for kexec APs */
92ap_kexec_start:		/* At 0x60 past start, copied to 0x60 by kexec */
93	/* r3 set to CPU ID by kexec */
94
95	/* Invalidate icache for low-memory copy and jump there */
96	li	%r0,0x80
97	dcbst	0,%r0
98	sync
99	icbi	0,%r0
100	isync
101	ba	0x80			/* Absolute branch to next inst */
102
103. = kbootentry + 0x80			/* Aligned to cache line */
1041:	or	31,31,31		/* yield */
105	sync
106	lwz	%r1,0x40(0)		/* Spin on ap_kexec_spin_sem */
107	cmpw	%r1,%r3			/* Until it equals our CPU ID */
108	bne	1b
109
110	/* Released */
111	or	2,2,2			/* unyield */
112
113	/* Make sure that it will be software reset. Clear SRR1 */
114	li	%r1,0
115	mtsrr1	%r1
116	ba	EXC_RST
117
118/*
119 * Now start the real text section
120 */
121
122	.text
123	.globl	btext
124btext:
125
126/*
127 * Main kernel entry point.
128 *
129 * Calling convention:
130 * r3: Flattened Device Tree pointer (or zero)
131 * r4: ignored
132 * r5: OF client interface pointer (or zero)
133 * r6: Loader metadata pointer (or zero)
134 * r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata
135 */
136	.text
137_NAKED_ENTRY(__start)
138
139#ifdef	__LITTLE_ENDIAN__
140	RETURN_TO_NATIVE_ENDIAN
141#endif
142	/* Set 64-bit mode if not yet set before branching to C */
143	mfmsr	%r20
144	li	%r21,1
145	insrdi	%r20,%r21,1,0
146	mtmsrd	%r20
147	isync
148	nop	/* Make this block a multiple of 8 bytes */
149
150	/* Set up the TOC pointer */
151	b	0f
152	.align 3
1530:	nop
154	bl	1f
155	.llong	__tocbase + 0x8000 - .
1561:	mflr	%r2
157	ld	%r1,0(%r2)
158	add	%r2,%r1,%r2
159
160	/* Get load offset */
161	ld	%r31,-0x8000(%r2) /* First TOC entry is TOC base */
162	subf    %r31,%r31,%r2	/* Subtract from real TOC base to get base */
163
164	/* Set up the stack pointer */
165	bl	1f
166	.llong	tmpstk + TMPSTKSZ - 96 - .
1671:	mflr	%r30
168	ld	%r1,0(%r30)
169	add	%r1,%r1,%r30
170	nop
171
172	/* Relocate kernel */
173	std	%r3,48(%r1)
174	std	%r4,56(%r1)
175	std	%r5,64(%r1)
176	std	%r6,72(%r1)
177	std	%r7,80(%r1)
178
179	bl	1f
180	.llong _DYNAMIC-.
1811:	mflr	%r3
182	ld	%r4,0(%r3)
183	add	%r3,%r4,%r3
184	mr	%r4,%r31
185	bl	elf_reloc_self
186	nop
187	ld	%r3,48(%r1)
188	ld	%r4,56(%r1)
189	ld	%r5,64(%r1)
190	ld	%r6,72(%r1)
191	ld	%r7,80(%r1)
192
193	/* Begin CPU init */
194	mr	%r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */
195	bl	powerpc_init
196	nop
197
198	/* Set stack pointer to new value and branch to mi_startup */
199	mr	%r1, %r3
200	li	%r3, 0
201	std	%r3, 0(%r1)
202	bl	mi_startup
203	nop
204
205	/* Unreachable */
206	b	.
207_END(__start)
208
209ASENTRY_NOPROF(__restartkernel_virtual)
210	/*
211	 * When coming in via this entry point, we need to alter the SLB to
212	 * shadow the segment register emulation entries in DMAP space.
213	 * We need to do this dance because we are running with virtual-mode
214	 * OpenFirmware and have not yet taken over the MMU.
215	 *
216	 * Assumptions:
217	 * 1) The kernel is currently identity-mapped.
218	 * 2) We are currently executing at an address compatible with
219	 *    real mode.
220	 * 3) The first 16 SLB entries are emulating SRs.
221	 * 4) The rest of the SLB is not in use.
222	 * 5) OpenFirmware is not manipulating the SLB at runtime.
223	 * 6) We are running on 64-bit AIM.
224	 *
225	 * Tested on a G5.
226	 */
227	mfmsr	%r14
228	/* Switch to real mode because we are about to mess with the SLB. */
229	andi.	%r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
230	mtmsr	%r14
231	isync
232	/* Prepare variables for later use. */
233	li	%r14, 0
234	li	%r18, 0
235	oris	%r18, %r18, 0xc000
236	sldi	%r18, %r18, 32		/* r18: 0xc000000000000000 */
2371:
238	/*
239	 * Loop over the first 16 SLB entries.
240	 * Offset the SLBE into the DMAP, add 16 to the index, and write
241	 * it back to the SLB.
242	 */
243	/* XXX add more safety checks */
244	slbmfev	%r15, %r14
245	slbmfee	%r16, %r14
246	or	%r16, %r16, %r14	/* index is 0-15 */
247	ori	%r16, %r16, 0x10	/* add 16 to index. */
248	or	%r16, %r16, %r18	/* SLBE DMAP offset */
249	rldicr	%r17, %r16, 0, 37	/* Invalidation SLBE */
250
251	isync
252	slbie	%r17
253	/* isync */
254	slbmte	%r15, %r16
255	isync
256	addi	%r14, %r14, 1
257	cmpdi	%r14, 16
258	blt	1b
259
260	/*
261	 * Now that we are set up with a temporary direct map, we can
262	 * continue with __restartkernel. Translation will be switched
263	 * back on at the rfid, at which point we will be executing from
264	 * the temporary direct map we just installed, until the kernel
265	 * takes over responsibility for the MMU.
266	 */
267	bl	__restartkernel
268	nop
269ASEND(__restartkernel_virtual)
270
271ASENTRY_NOPROF(__restartkernel)
272	/*
273	 * r3-r7: arguments to go to __start
274	 * r8: offset from current kernel address to apply
275	 * r9: MSR to set when (atomically) jumping to __start + r8
276	 */
277	mtsrr1	%r9
278	bl	1f
2791:	mflr	%r25
280	add	%r25,%r8,%r25
281	addi	%r25,%r25,2f-1b
282	mtsrr0	%r25
283	rfid
2842:	bl	__start
285	nop
286ASEND(__restartkernel)
287
288#include <powerpc/aim/trap_subr64.S>
289