xref: /linux/arch/x86/realmode/rm/trampoline_64.S (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *
4 *	Trampoline.S	Derived from Setup.S by Linus Torvalds
5 *
6 *	4 Jan 1997 Michael Chastain: changed to gnu as.
7 *	15 Sept 2005 Eric Biederman: 64bit PIC support
8 *
9 *	Entry: CS:IP point to the start of our code, we are
10 *	in real mode with no stack, but the rest of the
11 *	trampoline page to make our stack and everything else
12 *	is a mystery.
13 *
14 *	On entry to trampoline_start, the processor is in real mode
15 *	with 16-bit addressing and 16-bit data.  CS has some value
16 *	and IP is zero.  Thus, data addresses need to be absolute
17 *	(no relocation) and are taken with regard to r_base.
18 *
19 *	With the addition of trampoline_level4_pgt this code can
20 *	now enter a 64bit kernel that lives at arbitrary 64bit
21 *	physical addresses.
22 *
23 *	If you work on this file, check the object module with objdump
24 *	--full-contents --reloc to make sure there are no relocation
25 *	entries.
26 */
27
28#include <linux/linkage.h>
29#include <asm/pgtable_types.h>
30#include <asm/page_types.h>
31#include <asm/msr.h>
32#include <asm/segment.h>
33#include <asm/processor-flags.h>
34#include <asm/realmode.h>
35#include "realmode.h"
36
37	.text
38	.code16
39
40.macro LOCK_AND_LOAD_REALMODE_ESP lock_pa=0
41	/*
42	 * Make sure only one CPU fiddles with the realmode stack
43	 */
44.Llock_rm\@:
45	.if \lock_pa
46        lock btsl       $0, pa_tr_lock
47	.else
48        lock btsl       $0, tr_lock
49	.endif
50        jnc             2f
51        pause
52        jmp             .Llock_rm\@
532:
54	# Setup stack
55	movl	$rm_stack_end, %esp
56.endm
57
58	.balign	PAGE_SIZE
59SYM_CODE_START(trampoline_start)
60	cli			# We should be safe anyway
61	wbinvd
62
63	LJMPW_RM(1f)
641:
65	mov	%cs, %ax	# Code and data in the same place
66	mov	%ax, %ds
67	mov	%ax, %es
68	mov	%ax, %ss
69
70	LOCK_AND_LOAD_REALMODE_ESP
71
72	call	verify_cpu		# Verify the cpu supports long mode
73	testl   %eax, %eax		# Check for return code
74	jnz	no_longmode
75
76.Lswitch_to_protected:
77	/*
78	 * GDT tables in non default location kernel can be beyond 16MB and
79	 * lgdt will not be able to load the address as in real mode default
80	 * operand size is 16bit. Use lgdtl instead to force operand size
81	 * to 32 bit.
82	 */
83
84	lidtl	tr_idt	# load idt with 0, 0
85	lgdtl	tr_gdt	# load gdt with whatever is appropriate
86
87	movw	$__KERNEL_DS, %dx	# Data segment descriptor
88
89	# Enable protected mode
90	movl	$(CR0_STATE & ~X86_CR0_PG), %eax
91	movl	%eax, %cr0		# into protected mode
92
93	# flush prefetch and jump to startup_32
94	ljmpl	$__KERNEL32_CS, $pa_startup_32
95
96no_longmode:
97	hlt
98	jmp no_longmode
99SYM_CODE_END(trampoline_start)
100
101#ifdef CONFIG_AMD_MEM_ENCRYPT
102/* SEV-ES supports non-zero IP for entry points - no alignment needed */
103SYM_CODE_START(sev_es_trampoline_start)
104	cli			# We should be safe anyway
105
106	LJMPW_RM(1f)
1071:
108	mov	%cs, %ax	# Code and data in the same place
109	mov	%ax, %ds
110	mov	%ax, %es
111	mov	%ax, %ss
112
113	LOCK_AND_LOAD_REALMODE_ESP
114
115	jmp	.Lswitch_to_protected
116SYM_CODE_END(sev_es_trampoline_start)
117#endif	/* CONFIG_AMD_MEM_ENCRYPT */
118
119#include "../kernel/verify_cpu.S"
120
121	.section ".text32","ax"
122	.code32
123	.balign 4
124SYM_CODE_START(startup_32)
125	movl	%edx, %ss
126	addl	$pa_real_mode_base, %esp
127	movl	%edx, %ds
128	movl	%edx, %es
129	movl	%edx, %fs
130	movl	%edx, %gs
131
132	/*
133	 * Check for memory encryption support. This is a safety net in
134	 * case BIOS hasn't done the necessary step of setting the bit in
135	 * the MSR for this AP. If SME is active and we've gotten this far
136	 * then it is safe for us to set the MSR bit and continue. If we
137	 * don't we'll eventually crash trying to execute encrypted
138	 * instructions.
139	 */
140	btl	$TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
141	jnc	.Ldone
142	movl	$MSR_AMD64_SYSCFG, %ecx
143	rdmsr
144	bts	$MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
145	jc	.Ldone
146
147	/*
148	 * Memory encryption is enabled but the SME enable bit for this
149	 * CPU has has not been set.  It is safe to set it, so do so.
150	 */
151	wrmsr
152.Ldone:
153
154	movl	pa_tr_cr4, %eax
155	movl	%eax, %cr4		# Enable PAE mode
156
157	# Setup trampoline 4 level pagetables
158	movl	$pa_trampoline_pgd, %eax
159	movl	%eax, %cr3
160
161	# Set up EFER
162	movl	$MSR_EFER, %ecx
163	rdmsr
164	/*
165	 * Skip writing to EFER if the register already has desired
166	 * value (to avoid #VE for the TDX guest).
167	 */
168	cmp	pa_tr_efer, %eax
169	jne	.Lwrite_efer
170	cmp	pa_tr_efer + 4, %edx
171	je	.Ldone_efer
172.Lwrite_efer:
173	movl	pa_tr_efer, %eax
174	movl	pa_tr_efer + 4, %edx
175	wrmsr
176
177.Ldone_efer:
178	# Enable paging and in turn activate Long Mode.
179	movl	$CR0_STATE, %eax
180	movl	%eax, %cr0
181
182	/*
183	 * At this point we're in long mode but in 32bit compatibility mode
184	 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
185	 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
186	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
187	 */
188	ljmpl	$__KERNEL_CS, $pa_startup_64
189SYM_CODE_END(startup_32)
190
191SYM_CODE_START(pa_trampoline_compat)
192	/*
193	 * In compatibility mode.  Prep ESP and DX for startup_32, then disable
194	 * paging and complete the switch to legacy 32-bit mode.
195	 */
196	LOCK_AND_LOAD_REALMODE_ESP lock_pa=1
197	movw	$__KERNEL_DS, %dx
198
199	movl	$(CR0_STATE & ~X86_CR0_PG), %eax
200	movl	%eax, %cr0
201	ljmpl   $__KERNEL32_CS, $pa_startup_32
202SYM_CODE_END(pa_trampoline_compat)
203
204	.section ".text64","ax"
205	.code64
206	.balign 4
207SYM_CODE_START(startup_64)
208	# Now jump into the kernel using virtual addresses
209	jmpq	*tr_start(%rip)
210SYM_CODE_END(startup_64)
211
212SYM_CODE_START(trampoline_start64)
213	/*
214	 * APs start here on a direct transfer from 64-bit BIOS with identity
215	 * mapped page tables.  Load the kernel's GDT in order to gear down to
216	 * 32-bit mode (to handle 4-level vs. 5-level paging), and to (re)load
217	 * segment registers.  Load the zero IDT so any fault triggers a
218	 * shutdown instead of jumping back into BIOS.
219	 */
220	lidt	tr_idt(%rip)
221	lgdt	tr_gdt64(%rip)
222
223	ljmpl	*tr_compat(%rip)
224SYM_CODE_END(trampoline_start64)
225
226	.section ".rodata","a"
227	# Duplicate the global descriptor table
228	# so the kernel can live anywhere
229	.balign	16
230SYM_DATA_START(tr_gdt)
231	.short	tr_gdt_end - tr_gdt - 1	# gdt limit
232	.long	pa_tr_gdt
233	.short	0
234	.quad	0x00cf9b000000ffff	# __KERNEL32_CS
235	.quad	0x00af9b000000ffff	# __KERNEL_CS
236	.quad	0x00cf93000000ffff	# __KERNEL_DS
237SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
238
239SYM_DATA_START(tr_gdt64)
240	.short	tr_gdt_end - tr_gdt - 1	# gdt limit
241	.long	pa_tr_gdt
242	.long	0
243SYM_DATA_END(tr_gdt64)
244
245SYM_DATA_START(tr_compat)
246	.long	pa_trampoline_compat
247	.short	__KERNEL32_CS
248SYM_DATA_END(tr_compat)
249
250	.bss
251	.balign	PAGE_SIZE
252SYM_DATA(trampoline_pgd, .space PAGE_SIZE)
253
254	.balign	8
255SYM_DATA_START(trampoline_header)
256	SYM_DATA_LOCAL(tr_start,	.space 8)
257	SYM_DATA(tr_efer,		.space 8)
258	SYM_DATA(tr_cr4,		.space 4)
259	SYM_DATA(tr_flags,		.space 4)
260	SYM_DATA(tr_lock,		.space 4)
261SYM_DATA_END(trampoline_header)
262
263#include "trampoline_common.S"
264