xref: /linux/arch/x86/boot/startup/sev-startup.c (revision 18ea89eae404d119ced26d80ac3e62255ce15409)
1a3cbbb47SArd Biesheuvel // SPDX-License-Identifier: GPL-2.0-only
2a3cbbb47SArd Biesheuvel /*
3a3cbbb47SArd Biesheuvel  * AMD Memory Encryption Support
4a3cbbb47SArd Biesheuvel  *
5a3cbbb47SArd Biesheuvel  * Copyright (C) 2019 SUSE
6a3cbbb47SArd Biesheuvel  *
7a3cbbb47SArd Biesheuvel  * Author: Joerg Roedel <jroedel@suse.de>
8a3cbbb47SArd Biesheuvel  */
9a3cbbb47SArd Biesheuvel 
10a3cbbb47SArd Biesheuvel #define pr_fmt(fmt)	"SEV: " fmt
11a3cbbb47SArd Biesheuvel 
12a3cbbb47SArd Biesheuvel #include <linux/sched/debug.h>	/* For show_regs() */
13a3cbbb47SArd Biesheuvel #include <linux/percpu-defs.h>
14a3cbbb47SArd Biesheuvel #include <linux/cc_platform.h>
15a3cbbb47SArd Biesheuvel #include <linux/printk.h>
16a3cbbb47SArd Biesheuvel #include <linux/mm_types.h>
17a3cbbb47SArd Biesheuvel #include <linux/set_memory.h>
18a3cbbb47SArd Biesheuvel #include <linux/memblock.h>
19a3cbbb47SArd Biesheuvel #include <linux/kernel.h>
20a3cbbb47SArd Biesheuvel #include <linux/mm.h>
21a3cbbb47SArd Biesheuvel #include <linux/cpumask.h>
22a3cbbb47SArd Biesheuvel #include <linux/efi.h>
23a3cbbb47SArd Biesheuvel #include <linux/io.h>
24a3cbbb47SArd Biesheuvel #include <linux/psp-sev.h>
25a3cbbb47SArd Biesheuvel #include <uapi/linux/sev-guest.h>
26a3cbbb47SArd Biesheuvel 
27a3cbbb47SArd Biesheuvel #include <asm/init.h>
28a3cbbb47SArd Biesheuvel #include <asm/cpu_entry_area.h>
29a3cbbb47SArd Biesheuvel #include <asm/stacktrace.h>
30a3cbbb47SArd Biesheuvel #include <asm/sev.h>
31a3cbbb47SArd Biesheuvel #include <asm/sev-internal.h>
32a3cbbb47SArd Biesheuvel #include <asm/insn-eval.h>
33a3cbbb47SArd Biesheuvel #include <asm/fpu/xcr.h>
34a3cbbb47SArd Biesheuvel #include <asm/processor.h>
35a3cbbb47SArd Biesheuvel #include <asm/realmode.h>
36a3cbbb47SArd Biesheuvel #include <asm/setup.h>
37a3cbbb47SArd Biesheuvel #include <asm/traps.h>
38a3cbbb47SArd Biesheuvel #include <asm/svm.h>
39a3cbbb47SArd Biesheuvel #include <asm/smp.h>
40a3cbbb47SArd Biesheuvel #include <asm/cpu.h>
41a3cbbb47SArd Biesheuvel #include <asm/apic.h>
42a3cbbb47SArd Biesheuvel #include <asm/cpuid.h>
43a3cbbb47SArd Biesheuvel #include <asm/cmdline.h>
44a3cbbb47SArd Biesheuvel 
45a3cbbb47SArd Biesheuvel /* For early boot hypervisor communication in SEV-ES enabled guests */
46a3cbbb47SArd Biesheuvel struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
47a3cbbb47SArd Biesheuvel 
48a3cbbb47SArd Biesheuvel /*
49a3cbbb47SArd Biesheuvel  * Needs to be in the .data section because we need it NULL before bss is
50a3cbbb47SArd Biesheuvel  * cleared
51a3cbbb47SArd Biesheuvel  */
52a3cbbb47SArd Biesheuvel struct ghcb *boot_ghcb __section(".data");
53a3cbbb47SArd Biesheuvel 
54a3cbbb47SArd Biesheuvel /* Bitmap of SEV features supported by the hypervisor */
55a3cbbb47SArd Biesheuvel u64 sev_hv_features __ro_after_init;
56a3cbbb47SArd Biesheuvel 
57a3cbbb47SArd Biesheuvel /* Secrets page physical address from the CC blob */
58*18ea89eaSTom Lendacky u64 sev_secrets_pa __ro_after_init;
59a3cbbb47SArd Biesheuvel 
60a3cbbb47SArd Biesheuvel /* For early boot SVSM communication */
61a3cbbb47SArd Biesheuvel struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
62a3cbbb47SArd Biesheuvel 
63a3cbbb47SArd Biesheuvel DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
64a3cbbb47SArd Biesheuvel DEFINE_PER_CPU(u64, svsm_caa_pa);
65a3cbbb47SArd Biesheuvel 
66a3cbbb47SArd Biesheuvel /*
67a3cbbb47SArd Biesheuvel  * Nothing shall interrupt this code path while holding the per-CPU
68a3cbbb47SArd Biesheuvel  * GHCB. The backup GHCB is only for NMIs interrupting this path.
69a3cbbb47SArd Biesheuvel  *
70a3cbbb47SArd Biesheuvel  * Callers must disable local interrupts around it.
71a3cbbb47SArd Biesheuvel  */
72a3cbbb47SArd Biesheuvel noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
73a3cbbb47SArd Biesheuvel {
74a3cbbb47SArd Biesheuvel 	struct sev_es_runtime_data *data;
75a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
76a3cbbb47SArd Biesheuvel 
77a3cbbb47SArd Biesheuvel 	WARN_ON(!irqs_disabled());
78a3cbbb47SArd Biesheuvel 
79a3cbbb47SArd Biesheuvel 	data = this_cpu_read(runtime_data);
80a3cbbb47SArd Biesheuvel 	ghcb = &data->ghcb_page;
81a3cbbb47SArd Biesheuvel 
82a3cbbb47SArd Biesheuvel 	if (unlikely(data->ghcb_active)) {
83a3cbbb47SArd Biesheuvel 		/* GHCB is already in use - save its contents */
84a3cbbb47SArd Biesheuvel 
85a3cbbb47SArd Biesheuvel 		if (unlikely(data->backup_ghcb_active)) {
86a3cbbb47SArd Biesheuvel 			/*
87a3cbbb47SArd Biesheuvel 			 * Backup-GHCB is also already in use. There is no way
88a3cbbb47SArd Biesheuvel 			 * to continue here so just kill the machine. To make
89a3cbbb47SArd Biesheuvel 			 * panic() work, mark GHCBs inactive so that messages
90a3cbbb47SArd Biesheuvel 			 * can be printed out.
91a3cbbb47SArd Biesheuvel 			 */
92a3cbbb47SArd Biesheuvel 			data->ghcb_active        = false;
93a3cbbb47SArd Biesheuvel 			data->backup_ghcb_active = false;
94a3cbbb47SArd Biesheuvel 
95a3cbbb47SArd Biesheuvel 			instrumentation_begin();
96a3cbbb47SArd Biesheuvel 			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
97a3cbbb47SArd Biesheuvel 			instrumentation_end();
98a3cbbb47SArd Biesheuvel 		}
99a3cbbb47SArd Biesheuvel 
100a3cbbb47SArd Biesheuvel 		/* Mark backup_ghcb active before writing to it */
101a3cbbb47SArd Biesheuvel 		data->backup_ghcb_active = true;
102a3cbbb47SArd Biesheuvel 
103a3cbbb47SArd Biesheuvel 		state->ghcb = &data->backup_ghcb;
104a3cbbb47SArd Biesheuvel 
105a3cbbb47SArd Biesheuvel 		/* Backup GHCB content */
106a3cbbb47SArd Biesheuvel 		*state->ghcb = *ghcb;
107a3cbbb47SArd Biesheuvel 	} else {
108a3cbbb47SArd Biesheuvel 		state->ghcb = NULL;
109a3cbbb47SArd Biesheuvel 		data->ghcb_active = true;
110a3cbbb47SArd Biesheuvel 	}
111a3cbbb47SArd Biesheuvel 
112a3cbbb47SArd Biesheuvel 	return ghcb;
113a3cbbb47SArd Biesheuvel }
114a3cbbb47SArd Biesheuvel 
115a3cbbb47SArd Biesheuvel static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
116a3cbbb47SArd Biesheuvel 				unsigned char *buffer)
117a3cbbb47SArd Biesheuvel {
118a3cbbb47SArd Biesheuvel 	return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
119a3cbbb47SArd Biesheuvel }
120a3cbbb47SArd Biesheuvel 
121a3cbbb47SArd Biesheuvel static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
122a3cbbb47SArd Biesheuvel {
123a3cbbb47SArd Biesheuvel 	char buffer[MAX_INSN_SIZE];
124a3cbbb47SArd Biesheuvel 	int insn_bytes;
125a3cbbb47SArd Biesheuvel 
126a3cbbb47SArd Biesheuvel 	insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
127a3cbbb47SArd Biesheuvel 	if (insn_bytes == 0) {
128a3cbbb47SArd Biesheuvel 		/* Nothing could be copied */
129a3cbbb47SArd Biesheuvel 		ctxt->fi.vector     = X86_TRAP_PF;
130a3cbbb47SArd Biesheuvel 		ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
131a3cbbb47SArd Biesheuvel 		ctxt->fi.cr2        = ctxt->regs->ip;
132a3cbbb47SArd Biesheuvel 		return ES_EXCEPTION;
133a3cbbb47SArd Biesheuvel 	} else if (insn_bytes == -EINVAL) {
134a3cbbb47SArd Biesheuvel 		/* Effective RIP could not be calculated */
135a3cbbb47SArd Biesheuvel 		ctxt->fi.vector     = X86_TRAP_GP;
136a3cbbb47SArd Biesheuvel 		ctxt->fi.error_code = 0;
137a3cbbb47SArd Biesheuvel 		ctxt->fi.cr2        = 0;
138a3cbbb47SArd Biesheuvel 		return ES_EXCEPTION;
139a3cbbb47SArd Biesheuvel 	}
140a3cbbb47SArd Biesheuvel 
141a3cbbb47SArd Biesheuvel 	if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
142a3cbbb47SArd Biesheuvel 		return ES_DECODE_FAILED;
143a3cbbb47SArd Biesheuvel 
144a3cbbb47SArd Biesheuvel 	if (ctxt->insn.immediate.got)
145a3cbbb47SArd Biesheuvel 		return ES_OK;
146a3cbbb47SArd Biesheuvel 	else
147a3cbbb47SArd Biesheuvel 		return ES_DECODE_FAILED;
148a3cbbb47SArd Biesheuvel }
149a3cbbb47SArd Biesheuvel 
150a3cbbb47SArd Biesheuvel static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
151a3cbbb47SArd Biesheuvel {
152a3cbbb47SArd Biesheuvel 	char buffer[MAX_INSN_SIZE];
153a3cbbb47SArd Biesheuvel 	int res, ret;
154a3cbbb47SArd Biesheuvel 
155a3cbbb47SArd Biesheuvel 	res = vc_fetch_insn_kernel(ctxt, buffer);
156a3cbbb47SArd Biesheuvel 	if (res) {
157a3cbbb47SArd Biesheuvel 		ctxt->fi.vector     = X86_TRAP_PF;
158a3cbbb47SArd Biesheuvel 		ctxt->fi.error_code = X86_PF_INSTR;
159a3cbbb47SArd Biesheuvel 		ctxt->fi.cr2        = ctxt->regs->ip;
160a3cbbb47SArd Biesheuvel 		return ES_EXCEPTION;
161a3cbbb47SArd Biesheuvel 	}
162a3cbbb47SArd Biesheuvel 
163a3cbbb47SArd Biesheuvel 	ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
164a3cbbb47SArd Biesheuvel 	if (ret < 0)
165a3cbbb47SArd Biesheuvel 		return ES_DECODE_FAILED;
166a3cbbb47SArd Biesheuvel 	else
167a3cbbb47SArd Biesheuvel 		return ES_OK;
168a3cbbb47SArd Biesheuvel }
169a3cbbb47SArd Biesheuvel 
170a3cbbb47SArd Biesheuvel static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
171a3cbbb47SArd Biesheuvel {
172a3cbbb47SArd Biesheuvel 	if (user_mode(ctxt->regs))
173a3cbbb47SArd Biesheuvel 		return __vc_decode_user_insn(ctxt);
174a3cbbb47SArd Biesheuvel 	else
175a3cbbb47SArd Biesheuvel 		return __vc_decode_kern_insn(ctxt);
176a3cbbb47SArd Biesheuvel }
177a3cbbb47SArd Biesheuvel 
178a3cbbb47SArd Biesheuvel static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
179a3cbbb47SArd Biesheuvel 				   char *dst, char *buf, size_t size)
180a3cbbb47SArd Biesheuvel {
181a3cbbb47SArd Biesheuvel 	unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
182a3cbbb47SArd Biesheuvel 
183a3cbbb47SArd Biesheuvel 	/*
184a3cbbb47SArd Biesheuvel 	 * This function uses __put_user() independent of whether kernel or user
185a3cbbb47SArd Biesheuvel 	 * memory is accessed. This works fine because __put_user() does no
186a3cbbb47SArd Biesheuvel 	 * sanity checks of the pointer being accessed. All that it does is
187a3cbbb47SArd Biesheuvel 	 * to report when the access failed.
188a3cbbb47SArd Biesheuvel 	 *
189a3cbbb47SArd Biesheuvel 	 * Also, this function runs in atomic context, so __put_user() is not
190a3cbbb47SArd Biesheuvel 	 * allowed to sleep. The page-fault handler detects that it is running
191a3cbbb47SArd Biesheuvel 	 * in atomic context and will not try to take mmap_sem and handle the
192a3cbbb47SArd Biesheuvel 	 * fault, so additional pagefault_enable()/disable() calls are not
193a3cbbb47SArd Biesheuvel 	 * needed.
194a3cbbb47SArd Biesheuvel 	 *
195a3cbbb47SArd Biesheuvel 	 * The access can't be done via copy_to_user() here because
196a3cbbb47SArd Biesheuvel 	 * vc_write_mem() must not use string instructions to access unsafe
197a3cbbb47SArd Biesheuvel 	 * memory. The reason is that MOVS is emulated by the #VC handler by
198a3cbbb47SArd Biesheuvel 	 * splitting the move up into a read and a write and taking a nested #VC
199a3cbbb47SArd Biesheuvel 	 * exception on whatever of them is the MMIO access. Using string
200a3cbbb47SArd Biesheuvel 	 * instructions here would cause infinite nesting.
201a3cbbb47SArd Biesheuvel 	 */
202a3cbbb47SArd Biesheuvel 	switch (size) {
203a3cbbb47SArd Biesheuvel 	case 1: {
204a3cbbb47SArd Biesheuvel 		u8 d1;
205a3cbbb47SArd Biesheuvel 		u8 __user *target = (u8 __user *)dst;
206a3cbbb47SArd Biesheuvel 
207a3cbbb47SArd Biesheuvel 		memcpy(&d1, buf, 1);
208a3cbbb47SArd Biesheuvel 		if (__put_user(d1, target))
209a3cbbb47SArd Biesheuvel 			goto fault;
210a3cbbb47SArd Biesheuvel 		break;
211a3cbbb47SArd Biesheuvel 	}
212a3cbbb47SArd Biesheuvel 	case 2: {
213a3cbbb47SArd Biesheuvel 		u16 d2;
214a3cbbb47SArd Biesheuvel 		u16 __user *target = (u16 __user *)dst;
215a3cbbb47SArd Biesheuvel 
216a3cbbb47SArd Biesheuvel 		memcpy(&d2, buf, 2);
217a3cbbb47SArd Biesheuvel 		if (__put_user(d2, target))
218a3cbbb47SArd Biesheuvel 			goto fault;
219a3cbbb47SArd Biesheuvel 		break;
220a3cbbb47SArd Biesheuvel 	}
221a3cbbb47SArd Biesheuvel 	case 4: {
222a3cbbb47SArd Biesheuvel 		u32 d4;
223a3cbbb47SArd Biesheuvel 		u32 __user *target = (u32 __user *)dst;
224a3cbbb47SArd Biesheuvel 
225a3cbbb47SArd Biesheuvel 		memcpy(&d4, buf, 4);
226a3cbbb47SArd Biesheuvel 		if (__put_user(d4, target))
227a3cbbb47SArd Biesheuvel 			goto fault;
228a3cbbb47SArd Biesheuvel 		break;
229a3cbbb47SArd Biesheuvel 	}
230a3cbbb47SArd Biesheuvel 	case 8: {
231a3cbbb47SArd Biesheuvel 		u64 d8;
232a3cbbb47SArd Biesheuvel 		u64 __user *target = (u64 __user *)dst;
233a3cbbb47SArd Biesheuvel 
234a3cbbb47SArd Biesheuvel 		memcpy(&d8, buf, 8);
235a3cbbb47SArd Biesheuvel 		if (__put_user(d8, target))
236a3cbbb47SArd Biesheuvel 			goto fault;
237a3cbbb47SArd Biesheuvel 		break;
238a3cbbb47SArd Biesheuvel 	}
239a3cbbb47SArd Biesheuvel 	default:
240a3cbbb47SArd Biesheuvel 		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
241a3cbbb47SArd Biesheuvel 		return ES_UNSUPPORTED;
242a3cbbb47SArd Biesheuvel 	}
243a3cbbb47SArd Biesheuvel 
244a3cbbb47SArd Biesheuvel 	return ES_OK;
245a3cbbb47SArd Biesheuvel 
246a3cbbb47SArd Biesheuvel fault:
247a3cbbb47SArd Biesheuvel 	if (user_mode(ctxt->regs))
248a3cbbb47SArd Biesheuvel 		error_code |= X86_PF_USER;
249a3cbbb47SArd Biesheuvel 
250a3cbbb47SArd Biesheuvel 	ctxt->fi.vector = X86_TRAP_PF;
251a3cbbb47SArd Biesheuvel 	ctxt->fi.error_code = error_code;
252a3cbbb47SArd Biesheuvel 	ctxt->fi.cr2 = (unsigned long)dst;
253a3cbbb47SArd Biesheuvel 
254a3cbbb47SArd Biesheuvel 	return ES_EXCEPTION;
255a3cbbb47SArd Biesheuvel }
256a3cbbb47SArd Biesheuvel 
257a3cbbb47SArd Biesheuvel static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
258a3cbbb47SArd Biesheuvel 				  char *src, char *buf, size_t size)
259a3cbbb47SArd Biesheuvel {
260a3cbbb47SArd Biesheuvel 	unsigned long error_code = X86_PF_PROT;
261a3cbbb47SArd Biesheuvel 
262a3cbbb47SArd Biesheuvel 	/*
263a3cbbb47SArd Biesheuvel 	 * This function uses __get_user() independent of whether kernel or user
264a3cbbb47SArd Biesheuvel 	 * memory is accessed. This works fine because __get_user() does no
265a3cbbb47SArd Biesheuvel 	 * sanity checks of the pointer being accessed. All that it does is
266a3cbbb47SArd Biesheuvel 	 * to report when the access failed.
267a3cbbb47SArd Biesheuvel 	 *
268a3cbbb47SArd Biesheuvel 	 * Also, this function runs in atomic context, so __get_user() is not
269a3cbbb47SArd Biesheuvel 	 * allowed to sleep. The page-fault handler detects that it is running
270a3cbbb47SArd Biesheuvel 	 * in atomic context and will not try to take mmap_sem and handle the
271a3cbbb47SArd Biesheuvel 	 * fault, so additional pagefault_enable()/disable() calls are not
272a3cbbb47SArd Biesheuvel 	 * needed.
273a3cbbb47SArd Biesheuvel 	 *
274a3cbbb47SArd Biesheuvel 	 * The access can't be done via copy_from_user() here because
275a3cbbb47SArd Biesheuvel 	 * vc_read_mem() must not use string instructions to access unsafe
276a3cbbb47SArd Biesheuvel 	 * memory. The reason is that MOVS is emulated by the #VC handler by
277a3cbbb47SArd Biesheuvel 	 * splitting the move up into a read and a write and taking a nested #VC
278a3cbbb47SArd Biesheuvel 	 * exception on whatever of them is the MMIO access. Using string
279a3cbbb47SArd Biesheuvel 	 * instructions here would cause infinite nesting.
280a3cbbb47SArd Biesheuvel 	 */
281a3cbbb47SArd Biesheuvel 	switch (size) {
282a3cbbb47SArd Biesheuvel 	case 1: {
283a3cbbb47SArd Biesheuvel 		u8 d1;
284a3cbbb47SArd Biesheuvel 		u8 __user *s = (u8 __user *)src;
285a3cbbb47SArd Biesheuvel 
286a3cbbb47SArd Biesheuvel 		if (__get_user(d1, s))
287a3cbbb47SArd Biesheuvel 			goto fault;
288a3cbbb47SArd Biesheuvel 		memcpy(buf, &d1, 1);
289a3cbbb47SArd Biesheuvel 		break;
290a3cbbb47SArd Biesheuvel 	}
291a3cbbb47SArd Biesheuvel 	case 2: {
292a3cbbb47SArd Biesheuvel 		u16 d2;
293a3cbbb47SArd Biesheuvel 		u16 __user *s = (u16 __user *)src;
294a3cbbb47SArd Biesheuvel 
295a3cbbb47SArd Biesheuvel 		if (__get_user(d2, s))
296a3cbbb47SArd Biesheuvel 			goto fault;
297a3cbbb47SArd Biesheuvel 		memcpy(buf, &d2, 2);
298a3cbbb47SArd Biesheuvel 		break;
299a3cbbb47SArd Biesheuvel 	}
300a3cbbb47SArd Biesheuvel 	case 4: {
301a3cbbb47SArd Biesheuvel 		u32 d4;
302a3cbbb47SArd Biesheuvel 		u32 __user *s = (u32 __user *)src;
303a3cbbb47SArd Biesheuvel 
304a3cbbb47SArd Biesheuvel 		if (__get_user(d4, s))
305a3cbbb47SArd Biesheuvel 			goto fault;
306a3cbbb47SArd Biesheuvel 		memcpy(buf, &d4, 4);
307a3cbbb47SArd Biesheuvel 		break;
308a3cbbb47SArd Biesheuvel 	}
309a3cbbb47SArd Biesheuvel 	case 8: {
310a3cbbb47SArd Biesheuvel 		u64 d8;
311a3cbbb47SArd Biesheuvel 		u64 __user *s = (u64 __user *)src;
312a3cbbb47SArd Biesheuvel 		if (__get_user(d8, s))
313a3cbbb47SArd Biesheuvel 			goto fault;
314a3cbbb47SArd Biesheuvel 		memcpy(buf, &d8, 8);
315a3cbbb47SArd Biesheuvel 		break;
316a3cbbb47SArd Biesheuvel 	}
317a3cbbb47SArd Biesheuvel 	default:
318a3cbbb47SArd Biesheuvel 		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
319a3cbbb47SArd Biesheuvel 		return ES_UNSUPPORTED;
320a3cbbb47SArd Biesheuvel 	}
321a3cbbb47SArd Biesheuvel 
322a3cbbb47SArd Biesheuvel 	return ES_OK;
323a3cbbb47SArd Biesheuvel 
324a3cbbb47SArd Biesheuvel fault:
325a3cbbb47SArd Biesheuvel 	if (user_mode(ctxt->regs))
326a3cbbb47SArd Biesheuvel 		error_code |= X86_PF_USER;
327a3cbbb47SArd Biesheuvel 
328a3cbbb47SArd Biesheuvel 	ctxt->fi.vector = X86_TRAP_PF;
329a3cbbb47SArd Biesheuvel 	ctxt->fi.error_code = error_code;
330a3cbbb47SArd Biesheuvel 	ctxt->fi.cr2 = (unsigned long)src;
331a3cbbb47SArd Biesheuvel 
332a3cbbb47SArd Biesheuvel 	return ES_EXCEPTION;
333a3cbbb47SArd Biesheuvel }
334a3cbbb47SArd Biesheuvel 
335a3cbbb47SArd Biesheuvel static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
336a3cbbb47SArd Biesheuvel 					   unsigned long vaddr, phys_addr_t *paddr)
337a3cbbb47SArd Biesheuvel {
338a3cbbb47SArd Biesheuvel 	unsigned long va = (unsigned long)vaddr;
339a3cbbb47SArd Biesheuvel 	unsigned int level;
340a3cbbb47SArd Biesheuvel 	phys_addr_t pa;
341a3cbbb47SArd Biesheuvel 	pgd_t *pgd;
342a3cbbb47SArd Biesheuvel 	pte_t *pte;
343a3cbbb47SArd Biesheuvel 
344a3cbbb47SArd Biesheuvel 	pgd = __va(read_cr3_pa());
345a3cbbb47SArd Biesheuvel 	pgd = &pgd[pgd_index(va)];
346a3cbbb47SArd Biesheuvel 	pte = lookup_address_in_pgd(pgd, va, &level);
347a3cbbb47SArd Biesheuvel 	if (!pte) {
348a3cbbb47SArd Biesheuvel 		ctxt->fi.vector     = X86_TRAP_PF;
349a3cbbb47SArd Biesheuvel 		ctxt->fi.cr2        = vaddr;
350a3cbbb47SArd Biesheuvel 		ctxt->fi.error_code = 0;
351a3cbbb47SArd Biesheuvel 
352a3cbbb47SArd Biesheuvel 		if (user_mode(ctxt->regs))
353a3cbbb47SArd Biesheuvel 			ctxt->fi.error_code |= X86_PF_USER;
354a3cbbb47SArd Biesheuvel 
355a3cbbb47SArd Biesheuvel 		return ES_EXCEPTION;
356a3cbbb47SArd Biesheuvel 	}
357a3cbbb47SArd Biesheuvel 
358a3cbbb47SArd Biesheuvel 	if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
359a3cbbb47SArd Biesheuvel 		/* Emulated MMIO to/from encrypted memory not supported */
360a3cbbb47SArd Biesheuvel 		return ES_UNSUPPORTED;
361a3cbbb47SArd Biesheuvel 
362a3cbbb47SArd Biesheuvel 	pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
363a3cbbb47SArd Biesheuvel 	pa |= va & ~page_level_mask(level);
364a3cbbb47SArd Biesheuvel 
365a3cbbb47SArd Biesheuvel 	*paddr = pa;
366a3cbbb47SArd Biesheuvel 
367a3cbbb47SArd Biesheuvel 	return ES_OK;
368a3cbbb47SArd Biesheuvel }
369a3cbbb47SArd Biesheuvel 
370a3cbbb47SArd Biesheuvel static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
371a3cbbb47SArd Biesheuvel {
372a3cbbb47SArd Biesheuvel 	BUG_ON(size > 4);
373a3cbbb47SArd Biesheuvel 
374a3cbbb47SArd Biesheuvel 	if (user_mode(ctxt->regs)) {
375a3cbbb47SArd Biesheuvel 		struct thread_struct *t = &current->thread;
376a3cbbb47SArd Biesheuvel 		struct io_bitmap *iobm = t->io_bitmap;
377a3cbbb47SArd Biesheuvel 		size_t idx;
378a3cbbb47SArd Biesheuvel 
379a3cbbb47SArd Biesheuvel 		if (!iobm)
380a3cbbb47SArd Biesheuvel 			goto fault;
381a3cbbb47SArd Biesheuvel 
382a3cbbb47SArd Biesheuvel 		for (idx = port; idx < port + size; ++idx) {
383a3cbbb47SArd Biesheuvel 			if (test_bit(idx, iobm->bitmap))
384a3cbbb47SArd Biesheuvel 				goto fault;
385a3cbbb47SArd Biesheuvel 		}
386a3cbbb47SArd Biesheuvel 	}
387a3cbbb47SArd Biesheuvel 
388a3cbbb47SArd Biesheuvel 	return ES_OK;
389a3cbbb47SArd Biesheuvel 
390a3cbbb47SArd Biesheuvel fault:
391a3cbbb47SArd Biesheuvel 	ctxt->fi.vector = X86_TRAP_GP;
392a3cbbb47SArd Biesheuvel 	ctxt->fi.error_code = 0;
393a3cbbb47SArd Biesheuvel 
394a3cbbb47SArd Biesheuvel 	return ES_EXCEPTION;
395a3cbbb47SArd Biesheuvel }
396a3cbbb47SArd Biesheuvel 
397a3cbbb47SArd Biesheuvel static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
398a3cbbb47SArd Biesheuvel {
399a3cbbb47SArd Biesheuvel 	long error_code = ctxt->fi.error_code;
400a3cbbb47SArd Biesheuvel 	int trapnr = ctxt->fi.vector;
401a3cbbb47SArd Biesheuvel 
402a3cbbb47SArd Biesheuvel 	ctxt->regs->orig_ax = ctxt->fi.error_code;
403a3cbbb47SArd Biesheuvel 
404a3cbbb47SArd Biesheuvel 	switch (trapnr) {
405a3cbbb47SArd Biesheuvel 	case X86_TRAP_GP:
406a3cbbb47SArd Biesheuvel 		exc_general_protection(ctxt->regs, error_code);
407a3cbbb47SArd Biesheuvel 		break;
408a3cbbb47SArd Biesheuvel 	case X86_TRAP_UD:
409a3cbbb47SArd Biesheuvel 		exc_invalid_op(ctxt->regs);
410a3cbbb47SArd Biesheuvel 		break;
411a3cbbb47SArd Biesheuvel 	case X86_TRAP_PF:
412a3cbbb47SArd Biesheuvel 		write_cr2(ctxt->fi.cr2);
413a3cbbb47SArd Biesheuvel 		exc_page_fault(ctxt->regs, error_code);
414a3cbbb47SArd Biesheuvel 		break;
415a3cbbb47SArd Biesheuvel 	case X86_TRAP_AC:
416a3cbbb47SArd Biesheuvel 		exc_alignment_check(ctxt->regs, error_code);
417a3cbbb47SArd Biesheuvel 		break;
418a3cbbb47SArd Biesheuvel 	default:
419a3cbbb47SArd Biesheuvel 		pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
420a3cbbb47SArd Biesheuvel 		BUG();
421a3cbbb47SArd Biesheuvel 	}
422a3cbbb47SArd Biesheuvel }
423a3cbbb47SArd Biesheuvel 
424a3cbbb47SArd Biesheuvel /* Include code shared with pre-decompression boot stage */
425a3cbbb47SArd Biesheuvel #include "sev-shared.c"
426a3cbbb47SArd Biesheuvel 
427a3cbbb47SArd Biesheuvel noinstr void __sev_put_ghcb(struct ghcb_state *state)
428a3cbbb47SArd Biesheuvel {
429a3cbbb47SArd Biesheuvel 	struct sev_es_runtime_data *data;
430a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
431a3cbbb47SArd Biesheuvel 
432a3cbbb47SArd Biesheuvel 	WARN_ON(!irqs_disabled());
433a3cbbb47SArd Biesheuvel 
434a3cbbb47SArd Biesheuvel 	data = this_cpu_read(runtime_data);
435a3cbbb47SArd Biesheuvel 	ghcb = &data->ghcb_page;
436a3cbbb47SArd Biesheuvel 
437a3cbbb47SArd Biesheuvel 	if (state->ghcb) {
438a3cbbb47SArd Biesheuvel 		/* Restore GHCB from Backup */
439a3cbbb47SArd Biesheuvel 		*ghcb = *state->ghcb;
440a3cbbb47SArd Biesheuvel 		data->backup_ghcb_active = false;
441a3cbbb47SArd Biesheuvel 		state->ghcb = NULL;
442a3cbbb47SArd Biesheuvel 	} else {
443a3cbbb47SArd Biesheuvel 		/*
444a3cbbb47SArd Biesheuvel 		 * Invalidate the GHCB so a VMGEXIT instruction issued
445a3cbbb47SArd Biesheuvel 		 * from userspace won't appear to be valid.
446a3cbbb47SArd Biesheuvel 		 */
447a3cbbb47SArd Biesheuvel 		vc_ghcb_invalidate(ghcb);
448a3cbbb47SArd Biesheuvel 		data->ghcb_active = false;
449a3cbbb47SArd Biesheuvel 	}
450a3cbbb47SArd Biesheuvel }
451a3cbbb47SArd Biesheuvel 
452a3cbbb47SArd Biesheuvel int svsm_perform_call_protocol(struct svsm_call *call)
453a3cbbb47SArd Biesheuvel {
454a3cbbb47SArd Biesheuvel 	struct ghcb_state state;
455a3cbbb47SArd Biesheuvel 	unsigned long flags;
456a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
457a3cbbb47SArd Biesheuvel 	int ret;
458a3cbbb47SArd Biesheuvel 
459a3cbbb47SArd Biesheuvel 	/*
460a3cbbb47SArd Biesheuvel 	 * This can be called very early in the boot, use native functions in
461a3cbbb47SArd Biesheuvel 	 * order to avoid paravirt issues.
462a3cbbb47SArd Biesheuvel 	 */
463a3cbbb47SArd Biesheuvel 	flags = native_local_irq_save();
464a3cbbb47SArd Biesheuvel 
465681e2901SArd Biesheuvel 	if (sev_cfg.ghcbs_initialized)
466a3cbbb47SArd Biesheuvel 		ghcb = __sev_get_ghcb(&state);
467681e2901SArd Biesheuvel 	else if (boot_ghcb)
468681e2901SArd Biesheuvel 		ghcb = boot_ghcb;
469a3cbbb47SArd Biesheuvel 	else
470a3cbbb47SArd Biesheuvel 		ghcb = NULL;
471a3cbbb47SArd Biesheuvel 
472a3cbbb47SArd Biesheuvel 	do {
473a3cbbb47SArd Biesheuvel 		ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
474a3cbbb47SArd Biesheuvel 			   : svsm_perform_msr_protocol(call);
475a3cbbb47SArd Biesheuvel 	} while (ret == -EAGAIN);
476a3cbbb47SArd Biesheuvel 
477681e2901SArd Biesheuvel 	if (sev_cfg.ghcbs_initialized)
478a3cbbb47SArd Biesheuvel 		__sev_put_ghcb(&state);
479a3cbbb47SArd Biesheuvel 
480a3cbbb47SArd Biesheuvel 	native_local_irq_restore(flags);
481a3cbbb47SArd Biesheuvel 
482a3cbbb47SArd Biesheuvel 	return ret;
483a3cbbb47SArd Biesheuvel }
484a3cbbb47SArd Biesheuvel 
485a3cbbb47SArd Biesheuvel void __head
486a3cbbb47SArd Biesheuvel early_set_pages_state(unsigned long vaddr, unsigned long paddr,
487a3cbbb47SArd Biesheuvel 		      unsigned long npages, enum psc_op op)
488a3cbbb47SArd Biesheuvel {
489a3cbbb47SArd Biesheuvel 	unsigned long paddr_end;
490a3cbbb47SArd Biesheuvel 	u64 val;
491a3cbbb47SArd Biesheuvel 
492a3cbbb47SArd Biesheuvel 	vaddr = vaddr & PAGE_MASK;
493a3cbbb47SArd Biesheuvel 
494a3cbbb47SArd Biesheuvel 	paddr = paddr & PAGE_MASK;
495a3cbbb47SArd Biesheuvel 	paddr_end = paddr + (npages << PAGE_SHIFT);
496a3cbbb47SArd Biesheuvel 
497a3cbbb47SArd Biesheuvel 	while (paddr < paddr_end) {
498a3cbbb47SArd Biesheuvel 		/* Page validation must be rescinded before changing to shared */
499a3cbbb47SArd Biesheuvel 		if (op == SNP_PAGE_STATE_SHARED)
500a3cbbb47SArd Biesheuvel 			pvalidate_4k_page(vaddr, paddr, false);
501a3cbbb47SArd Biesheuvel 
502a3cbbb47SArd Biesheuvel 		/*
503a3cbbb47SArd Biesheuvel 		 * Use the MSR protocol because this function can be called before
504a3cbbb47SArd Biesheuvel 		 * the GHCB is established.
505a3cbbb47SArd Biesheuvel 		 */
506a3cbbb47SArd Biesheuvel 		sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
507a3cbbb47SArd Biesheuvel 		VMGEXIT();
508a3cbbb47SArd Biesheuvel 
509a3cbbb47SArd Biesheuvel 		val = sev_es_rd_ghcb_msr();
510a3cbbb47SArd Biesheuvel 
511a3cbbb47SArd Biesheuvel 		if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
512a3cbbb47SArd Biesheuvel 			goto e_term;
513a3cbbb47SArd Biesheuvel 
514a3cbbb47SArd Biesheuvel 		if (GHCB_MSR_PSC_RESP_VAL(val))
515a3cbbb47SArd Biesheuvel 			goto e_term;
516a3cbbb47SArd Biesheuvel 
517a3cbbb47SArd Biesheuvel 		/* Page validation must be performed after changing to private */
518a3cbbb47SArd Biesheuvel 		if (op == SNP_PAGE_STATE_PRIVATE)
519a3cbbb47SArd Biesheuvel 			pvalidate_4k_page(vaddr, paddr, true);
520a3cbbb47SArd Biesheuvel 
521a3cbbb47SArd Biesheuvel 		vaddr += PAGE_SIZE;
522a3cbbb47SArd Biesheuvel 		paddr += PAGE_SIZE;
523a3cbbb47SArd Biesheuvel 	}
524a3cbbb47SArd Biesheuvel 
525a3cbbb47SArd Biesheuvel 	return;
526a3cbbb47SArd Biesheuvel 
527a3cbbb47SArd Biesheuvel e_term:
528a3cbbb47SArd Biesheuvel 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
529a3cbbb47SArd Biesheuvel }
530a3cbbb47SArd Biesheuvel 
531a3cbbb47SArd Biesheuvel void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
532a3cbbb47SArd Biesheuvel 					 unsigned long npages)
533a3cbbb47SArd Biesheuvel {
534a3cbbb47SArd Biesheuvel 	/*
535a3cbbb47SArd Biesheuvel 	 * This can be invoked in early boot while running identity mapped, so
536a3cbbb47SArd Biesheuvel 	 * use an open coded check for SNP instead of using cc_platform_has().
537a3cbbb47SArd Biesheuvel 	 * This eliminates worries about jump tables or checking boot_cpu_data
538a3cbbb47SArd Biesheuvel 	 * in the cc_platform_has() function.
539a3cbbb47SArd Biesheuvel 	 */
540681e2901SArd Biesheuvel 	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
541a3cbbb47SArd Biesheuvel 		return;
542a3cbbb47SArd Biesheuvel 
543a3cbbb47SArd Biesheuvel 	 /*
544a3cbbb47SArd Biesheuvel 	  * Ask the hypervisor to mark the memory pages as private in the RMP
545a3cbbb47SArd Biesheuvel 	  * table.
546a3cbbb47SArd Biesheuvel 	  */
547a3cbbb47SArd Biesheuvel 	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
548a3cbbb47SArd Biesheuvel }
549a3cbbb47SArd Biesheuvel 
550a3cbbb47SArd Biesheuvel void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
551a3cbbb47SArd Biesheuvel 					unsigned long npages)
552a3cbbb47SArd Biesheuvel {
553a3cbbb47SArd Biesheuvel 	/*
554a3cbbb47SArd Biesheuvel 	 * This can be invoked in early boot while running identity mapped, so
555a3cbbb47SArd Biesheuvel 	 * use an open coded check for SNP instead of using cc_platform_has().
556a3cbbb47SArd Biesheuvel 	 * This eliminates worries about jump tables or checking boot_cpu_data
557a3cbbb47SArd Biesheuvel 	 * in the cc_platform_has() function.
558a3cbbb47SArd Biesheuvel 	 */
559681e2901SArd Biesheuvel 	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
560a3cbbb47SArd Biesheuvel 		return;
561a3cbbb47SArd Biesheuvel 
562a3cbbb47SArd Biesheuvel 	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
563a3cbbb47SArd Biesheuvel 	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
564a3cbbb47SArd Biesheuvel }
565a3cbbb47SArd Biesheuvel 
566a3cbbb47SArd Biesheuvel /* Writes to the SVSM CAA MSR are ignored */
567a3cbbb47SArd Biesheuvel static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
568a3cbbb47SArd Biesheuvel {
569a3cbbb47SArd Biesheuvel 	if (write)
570a3cbbb47SArd Biesheuvel 		return ES_OK;
571a3cbbb47SArd Biesheuvel 
572a3cbbb47SArd Biesheuvel 	regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
573a3cbbb47SArd Biesheuvel 	regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
574a3cbbb47SArd Biesheuvel 
575a3cbbb47SArd Biesheuvel 	return ES_OK;
576a3cbbb47SArd Biesheuvel }
577a3cbbb47SArd Biesheuvel 
578a3cbbb47SArd Biesheuvel /*
579a3cbbb47SArd Biesheuvel  * TSC related accesses should not exit to the hypervisor when a guest is
580a3cbbb47SArd Biesheuvel  * executing with Secure TSC enabled, so special handling is required for
581a3cbbb47SArd Biesheuvel  * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
582a3cbbb47SArd Biesheuvel  */
583a3cbbb47SArd Biesheuvel static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
584a3cbbb47SArd Biesheuvel {
585a3cbbb47SArd Biesheuvel 	u64 tsc;
586a3cbbb47SArd Biesheuvel 
587a3cbbb47SArd Biesheuvel 	/*
588a3cbbb47SArd Biesheuvel 	 * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
589a3cbbb47SArd Biesheuvel 	 * Terminate the SNP guest when the interception is enabled.
590a3cbbb47SArd Biesheuvel 	 */
591a3cbbb47SArd Biesheuvel 	if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
592a3cbbb47SArd Biesheuvel 		return ES_VMM_ERROR;
593a3cbbb47SArd Biesheuvel 
594a3cbbb47SArd Biesheuvel 	/*
595a3cbbb47SArd Biesheuvel 	 * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
596a3cbbb47SArd Biesheuvel 	 *         to return undefined values, so ignore all writes.
597a3cbbb47SArd Biesheuvel 	 *
598a3cbbb47SArd Biesheuvel 	 * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
599a3cbbb47SArd Biesheuvel 	 *        the value returned by rdtsc_ordered().
600a3cbbb47SArd Biesheuvel 	 */
601a3cbbb47SArd Biesheuvel 	if (write) {
602a3cbbb47SArd Biesheuvel 		WARN_ONCE(1, "TSC MSR writes are verboten!\n");
603a3cbbb47SArd Biesheuvel 		return ES_OK;
604a3cbbb47SArd Biesheuvel 	}
605a3cbbb47SArd Biesheuvel 
606a3cbbb47SArd Biesheuvel 	tsc = rdtsc_ordered();
607a3cbbb47SArd Biesheuvel 	regs->ax = lower_32_bits(tsc);
608a3cbbb47SArd Biesheuvel 	regs->dx = upper_32_bits(tsc);
609a3cbbb47SArd Biesheuvel 
610a3cbbb47SArd Biesheuvel 	return ES_OK;
611a3cbbb47SArd Biesheuvel }
612a3cbbb47SArd Biesheuvel 
613a3cbbb47SArd Biesheuvel static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
614a3cbbb47SArd Biesheuvel {
615a3cbbb47SArd Biesheuvel 	struct pt_regs *regs = ctxt->regs;
616a3cbbb47SArd Biesheuvel 	enum es_result ret;
617a3cbbb47SArd Biesheuvel 	bool write;
618a3cbbb47SArd Biesheuvel 
619a3cbbb47SArd Biesheuvel 	/* Is it a WRMSR? */
620a3cbbb47SArd Biesheuvel 	write = ctxt->insn.opcode.bytes[1] == 0x30;
621a3cbbb47SArd Biesheuvel 
622a3cbbb47SArd Biesheuvel 	switch (regs->cx) {
623a3cbbb47SArd Biesheuvel 	case MSR_SVSM_CAA:
624a3cbbb47SArd Biesheuvel 		return __vc_handle_msr_caa(regs, write);
625a3cbbb47SArd Biesheuvel 	case MSR_IA32_TSC:
626a3cbbb47SArd Biesheuvel 	case MSR_AMD64_GUEST_TSC_FREQ:
627a3cbbb47SArd Biesheuvel 		if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
628a3cbbb47SArd Biesheuvel 			return __vc_handle_secure_tsc_msrs(regs, write);
629a3cbbb47SArd Biesheuvel 		break;
630a3cbbb47SArd Biesheuvel 	default:
631a3cbbb47SArd Biesheuvel 		break;
632a3cbbb47SArd Biesheuvel 	}
633a3cbbb47SArd Biesheuvel 
634a3cbbb47SArd Biesheuvel 	ghcb_set_rcx(ghcb, regs->cx);
635a3cbbb47SArd Biesheuvel 	if (write) {
636a3cbbb47SArd Biesheuvel 		ghcb_set_rax(ghcb, regs->ax);
637a3cbbb47SArd Biesheuvel 		ghcb_set_rdx(ghcb, regs->dx);
638a3cbbb47SArd Biesheuvel 	}
639a3cbbb47SArd Biesheuvel 
640a3cbbb47SArd Biesheuvel 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
641a3cbbb47SArd Biesheuvel 
642a3cbbb47SArd Biesheuvel 	if ((ret == ES_OK) && !write) {
643a3cbbb47SArd Biesheuvel 		regs->ax = ghcb->save.rax;
644a3cbbb47SArd Biesheuvel 		regs->dx = ghcb->save.rdx;
645a3cbbb47SArd Biesheuvel 	}
646a3cbbb47SArd Biesheuvel 
647a3cbbb47SArd Biesheuvel 	return ret;
648a3cbbb47SArd Biesheuvel }
649a3cbbb47SArd Biesheuvel 
650a3cbbb47SArd Biesheuvel static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
651a3cbbb47SArd Biesheuvel {
652a3cbbb47SArd Biesheuvel 	int trapnr = ctxt->fi.vector;
653a3cbbb47SArd Biesheuvel 
654a3cbbb47SArd Biesheuvel 	if (trapnr == X86_TRAP_PF)
655a3cbbb47SArd Biesheuvel 		native_write_cr2(ctxt->fi.cr2);
656a3cbbb47SArd Biesheuvel 
657a3cbbb47SArd Biesheuvel 	ctxt->regs->orig_ax = ctxt->fi.error_code;
658a3cbbb47SArd Biesheuvel 	do_early_exception(ctxt->regs, trapnr);
659a3cbbb47SArd Biesheuvel }
660a3cbbb47SArd Biesheuvel 
661a3cbbb47SArd Biesheuvel static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
662a3cbbb47SArd Biesheuvel {
663a3cbbb47SArd Biesheuvel 	long *reg_array;
664a3cbbb47SArd Biesheuvel 	int offset;
665a3cbbb47SArd Biesheuvel 
666a3cbbb47SArd Biesheuvel 	reg_array = (long *)ctxt->regs;
667a3cbbb47SArd Biesheuvel 	offset    = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
668a3cbbb47SArd Biesheuvel 
669a3cbbb47SArd Biesheuvel 	if (offset < 0)
670a3cbbb47SArd Biesheuvel 		return NULL;
671a3cbbb47SArd Biesheuvel 
672a3cbbb47SArd Biesheuvel 	offset /= sizeof(long);
673a3cbbb47SArd Biesheuvel 
674a3cbbb47SArd Biesheuvel 	return reg_array + offset;
675a3cbbb47SArd Biesheuvel }
676a3cbbb47SArd Biesheuvel static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
677a3cbbb47SArd Biesheuvel 				 unsigned int bytes, bool read)
678a3cbbb47SArd Biesheuvel {
679a3cbbb47SArd Biesheuvel 	u64 exit_code, exit_info_1, exit_info_2;
680a3cbbb47SArd Biesheuvel 	unsigned long ghcb_pa = __pa(ghcb);
681a3cbbb47SArd Biesheuvel 	enum es_result res;
682a3cbbb47SArd Biesheuvel 	phys_addr_t paddr;
683a3cbbb47SArd Biesheuvel 	void __user *ref;
684a3cbbb47SArd Biesheuvel 
685a3cbbb47SArd Biesheuvel 	ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
686a3cbbb47SArd Biesheuvel 	if (ref == (void __user *)-1L)
687a3cbbb47SArd Biesheuvel 		return ES_UNSUPPORTED;
688a3cbbb47SArd Biesheuvel 
689a3cbbb47SArd Biesheuvel 	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
690a3cbbb47SArd Biesheuvel 
691a3cbbb47SArd Biesheuvel 	res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
692a3cbbb47SArd Biesheuvel 	if (res != ES_OK) {
693a3cbbb47SArd Biesheuvel 		if (res == ES_EXCEPTION && !read)
694a3cbbb47SArd Biesheuvel 			ctxt->fi.error_code |= X86_PF_WRITE;
695a3cbbb47SArd Biesheuvel 
696a3cbbb47SArd Biesheuvel 		return res;
697a3cbbb47SArd Biesheuvel 	}
698a3cbbb47SArd Biesheuvel 
699a3cbbb47SArd Biesheuvel 	exit_info_1 = paddr;
700a3cbbb47SArd Biesheuvel 	/* Can never be greater than 8 */
701a3cbbb47SArd Biesheuvel 	exit_info_2 = bytes;
702a3cbbb47SArd Biesheuvel 
703a3cbbb47SArd Biesheuvel 	ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
704a3cbbb47SArd Biesheuvel 
705a3cbbb47SArd Biesheuvel 	return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
706a3cbbb47SArd Biesheuvel }
707a3cbbb47SArd Biesheuvel 
708a3cbbb47SArd Biesheuvel /*
709a3cbbb47SArd Biesheuvel  * The MOVS instruction has two memory operands, which raises the
710a3cbbb47SArd Biesheuvel  * problem that it is not known whether the access to the source or the
711a3cbbb47SArd Biesheuvel  * destination caused the #VC exception (and hence whether an MMIO read
712a3cbbb47SArd Biesheuvel  * or write operation needs to be emulated).
713a3cbbb47SArd Biesheuvel  *
714a3cbbb47SArd Biesheuvel  * Instead of playing games with walking page-tables and trying to guess
715a3cbbb47SArd Biesheuvel  * whether the source or destination is an MMIO range, split the move
716a3cbbb47SArd Biesheuvel  * into two operations, a read and a write with only one memory operand.
717a3cbbb47SArd Biesheuvel  * This will cause a nested #VC exception on the MMIO address which can
718a3cbbb47SArd Biesheuvel  * then be handled.
719a3cbbb47SArd Biesheuvel  *
720a3cbbb47SArd Biesheuvel  * This implementation has the benefit that it also supports MOVS where
721a3cbbb47SArd Biesheuvel  * source _and_ destination are MMIO regions.
722a3cbbb47SArd Biesheuvel  *
723a3cbbb47SArd Biesheuvel  * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
724a3cbbb47SArd Biesheuvel  * rare operation. If it turns out to be a performance problem the split
725a3cbbb47SArd Biesheuvel  * operations can be moved to memcpy_fromio() and memcpy_toio().
726a3cbbb47SArd Biesheuvel  */
727a3cbbb47SArd Biesheuvel static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
728a3cbbb47SArd Biesheuvel 					  unsigned int bytes)
729a3cbbb47SArd Biesheuvel {
730a3cbbb47SArd Biesheuvel 	unsigned long ds_base, es_base;
731a3cbbb47SArd Biesheuvel 	unsigned char *src, *dst;
732a3cbbb47SArd Biesheuvel 	unsigned char buffer[8];
733a3cbbb47SArd Biesheuvel 	enum es_result ret;
734a3cbbb47SArd Biesheuvel 	bool rep;
735a3cbbb47SArd Biesheuvel 	int off;
736a3cbbb47SArd Biesheuvel 
737a3cbbb47SArd Biesheuvel 	ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
738a3cbbb47SArd Biesheuvel 	es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
739a3cbbb47SArd Biesheuvel 
740a3cbbb47SArd Biesheuvel 	if (ds_base == -1L || es_base == -1L) {
741a3cbbb47SArd Biesheuvel 		ctxt->fi.vector = X86_TRAP_GP;
742a3cbbb47SArd Biesheuvel 		ctxt->fi.error_code = 0;
743a3cbbb47SArd Biesheuvel 		return ES_EXCEPTION;
744a3cbbb47SArd Biesheuvel 	}
745a3cbbb47SArd Biesheuvel 
746a3cbbb47SArd Biesheuvel 	src = ds_base + (unsigned char *)ctxt->regs->si;
747a3cbbb47SArd Biesheuvel 	dst = es_base + (unsigned char *)ctxt->regs->di;
748a3cbbb47SArd Biesheuvel 
749a3cbbb47SArd Biesheuvel 	ret = vc_read_mem(ctxt, src, buffer, bytes);
750a3cbbb47SArd Biesheuvel 	if (ret != ES_OK)
751a3cbbb47SArd Biesheuvel 		return ret;
752a3cbbb47SArd Biesheuvel 
753a3cbbb47SArd Biesheuvel 	ret = vc_write_mem(ctxt, dst, buffer, bytes);
754a3cbbb47SArd Biesheuvel 	if (ret != ES_OK)
755a3cbbb47SArd Biesheuvel 		return ret;
756a3cbbb47SArd Biesheuvel 
757a3cbbb47SArd Biesheuvel 	if (ctxt->regs->flags & X86_EFLAGS_DF)
758a3cbbb47SArd Biesheuvel 		off = -bytes;
759a3cbbb47SArd Biesheuvel 	else
760a3cbbb47SArd Biesheuvel 		off =  bytes;
761a3cbbb47SArd Biesheuvel 
762a3cbbb47SArd Biesheuvel 	ctxt->regs->si += off;
763a3cbbb47SArd Biesheuvel 	ctxt->regs->di += off;
764a3cbbb47SArd Biesheuvel 
765a3cbbb47SArd Biesheuvel 	rep = insn_has_rep_prefix(&ctxt->insn);
766a3cbbb47SArd Biesheuvel 	if (rep)
767a3cbbb47SArd Biesheuvel 		ctxt->regs->cx -= 1;
768a3cbbb47SArd Biesheuvel 
769a3cbbb47SArd Biesheuvel 	if (!rep || ctxt->regs->cx == 0)
770a3cbbb47SArd Biesheuvel 		return ES_OK;
771a3cbbb47SArd Biesheuvel 	else
772a3cbbb47SArd Biesheuvel 		return ES_RETRY;
773a3cbbb47SArd Biesheuvel }
774a3cbbb47SArd Biesheuvel 
775a3cbbb47SArd Biesheuvel static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
776a3cbbb47SArd Biesheuvel {
777a3cbbb47SArd Biesheuvel 	struct insn *insn = &ctxt->insn;
778a3cbbb47SArd Biesheuvel 	enum insn_mmio_type mmio;
779a3cbbb47SArd Biesheuvel 	unsigned int bytes = 0;
780a3cbbb47SArd Biesheuvel 	enum es_result ret;
781a3cbbb47SArd Biesheuvel 	u8 sign_byte;
782a3cbbb47SArd Biesheuvel 	long *reg_data;
783a3cbbb47SArd Biesheuvel 
784a3cbbb47SArd Biesheuvel 	mmio = insn_decode_mmio(insn, &bytes);
785a3cbbb47SArd Biesheuvel 	if (mmio == INSN_MMIO_DECODE_FAILED)
786a3cbbb47SArd Biesheuvel 		return ES_DECODE_FAILED;
787a3cbbb47SArd Biesheuvel 
788a3cbbb47SArd Biesheuvel 	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
789a3cbbb47SArd Biesheuvel 		reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
790a3cbbb47SArd Biesheuvel 		if (!reg_data)
791a3cbbb47SArd Biesheuvel 			return ES_DECODE_FAILED;
792a3cbbb47SArd Biesheuvel 	}
793a3cbbb47SArd Biesheuvel 
794a3cbbb47SArd Biesheuvel 	if (user_mode(ctxt->regs))
795a3cbbb47SArd Biesheuvel 		return ES_UNSUPPORTED;
796a3cbbb47SArd Biesheuvel 
797a3cbbb47SArd Biesheuvel 	switch (mmio) {
798a3cbbb47SArd Biesheuvel 	case INSN_MMIO_WRITE:
799a3cbbb47SArd Biesheuvel 		memcpy(ghcb->shared_buffer, reg_data, bytes);
800a3cbbb47SArd Biesheuvel 		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
801a3cbbb47SArd Biesheuvel 		break;
802a3cbbb47SArd Biesheuvel 	case INSN_MMIO_WRITE_IMM:
803a3cbbb47SArd Biesheuvel 		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
804a3cbbb47SArd Biesheuvel 		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
805a3cbbb47SArd Biesheuvel 		break;
806a3cbbb47SArd Biesheuvel 	case INSN_MMIO_READ:
807a3cbbb47SArd Biesheuvel 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
808a3cbbb47SArd Biesheuvel 		if (ret)
809a3cbbb47SArd Biesheuvel 			break;
810a3cbbb47SArd Biesheuvel 
811a3cbbb47SArd Biesheuvel 		/* Zero-extend for 32-bit operation */
812a3cbbb47SArd Biesheuvel 		if (bytes == 4)
813a3cbbb47SArd Biesheuvel 			*reg_data = 0;
814a3cbbb47SArd Biesheuvel 
815a3cbbb47SArd Biesheuvel 		memcpy(reg_data, ghcb->shared_buffer, bytes);
816a3cbbb47SArd Biesheuvel 		break;
817a3cbbb47SArd Biesheuvel 	case INSN_MMIO_READ_ZERO_EXTEND:
818a3cbbb47SArd Biesheuvel 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
819a3cbbb47SArd Biesheuvel 		if (ret)
820a3cbbb47SArd Biesheuvel 			break;
821a3cbbb47SArd Biesheuvel 
822a3cbbb47SArd Biesheuvel 		/* Zero extend based on operand size */
823a3cbbb47SArd Biesheuvel 		memset(reg_data, 0, insn->opnd_bytes);
824a3cbbb47SArd Biesheuvel 		memcpy(reg_data, ghcb->shared_buffer, bytes);
825a3cbbb47SArd Biesheuvel 		break;
826a3cbbb47SArd Biesheuvel 	case INSN_MMIO_READ_SIGN_EXTEND:
827a3cbbb47SArd Biesheuvel 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
828a3cbbb47SArd Biesheuvel 		if (ret)
829a3cbbb47SArd Biesheuvel 			break;
830a3cbbb47SArd Biesheuvel 
831a3cbbb47SArd Biesheuvel 		if (bytes == 1) {
832a3cbbb47SArd Biesheuvel 			u8 *val = (u8 *)ghcb->shared_buffer;
833a3cbbb47SArd Biesheuvel 
834a3cbbb47SArd Biesheuvel 			sign_byte = (*val & 0x80) ? 0xff : 0x00;
835a3cbbb47SArd Biesheuvel 		} else {
836a3cbbb47SArd Biesheuvel 			u16 *val = (u16 *)ghcb->shared_buffer;
837a3cbbb47SArd Biesheuvel 
838a3cbbb47SArd Biesheuvel 			sign_byte = (*val & 0x8000) ? 0xff : 0x00;
839a3cbbb47SArd Biesheuvel 		}
840a3cbbb47SArd Biesheuvel 
841a3cbbb47SArd Biesheuvel 		/* Sign extend based on operand size */
842a3cbbb47SArd Biesheuvel 		memset(reg_data, sign_byte, insn->opnd_bytes);
843a3cbbb47SArd Biesheuvel 		memcpy(reg_data, ghcb->shared_buffer, bytes);
844a3cbbb47SArd Biesheuvel 		break;
845a3cbbb47SArd Biesheuvel 	case INSN_MMIO_MOVS:
846a3cbbb47SArd Biesheuvel 		ret = vc_handle_mmio_movs(ctxt, bytes);
847a3cbbb47SArd Biesheuvel 		break;
848a3cbbb47SArd Biesheuvel 	default:
849a3cbbb47SArd Biesheuvel 		ret = ES_UNSUPPORTED;
850a3cbbb47SArd Biesheuvel 		break;
851a3cbbb47SArd Biesheuvel 	}
852a3cbbb47SArd Biesheuvel 
853a3cbbb47SArd Biesheuvel 	return ret;
854a3cbbb47SArd Biesheuvel }
855a3cbbb47SArd Biesheuvel 
856a3cbbb47SArd Biesheuvel static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
857a3cbbb47SArd Biesheuvel 					  struct es_em_ctxt *ctxt)
858a3cbbb47SArd Biesheuvel {
859a3cbbb47SArd Biesheuvel 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
860a3cbbb47SArd Biesheuvel 	long val, *reg = vc_insn_get_rm(ctxt);
861a3cbbb47SArd Biesheuvel 	enum es_result ret;
862a3cbbb47SArd Biesheuvel 
863a3cbbb47SArd Biesheuvel 	if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
864a3cbbb47SArd Biesheuvel 		return ES_VMM_ERROR;
865a3cbbb47SArd Biesheuvel 
866a3cbbb47SArd Biesheuvel 	if (!reg)
867a3cbbb47SArd Biesheuvel 		return ES_DECODE_FAILED;
868a3cbbb47SArd Biesheuvel 
869a3cbbb47SArd Biesheuvel 	val = *reg;
870a3cbbb47SArd Biesheuvel 
871a3cbbb47SArd Biesheuvel 	/* Upper 32 bits must be written as zeroes */
872a3cbbb47SArd Biesheuvel 	if (val >> 32) {
873a3cbbb47SArd Biesheuvel 		ctxt->fi.vector = X86_TRAP_GP;
874a3cbbb47SArd Biesheuvel 		ctxt->fi.error_code = 0;
875a3cbbb47SArd Biesheuvel 		return ES_EXCEPTION;
876a3cbbb47SArd Biesheuvel 	}
877a3cbbb47SArd Biesheuvel 
878a3cbbb47SArd Biesheuvel 	/* Clear out other reserved bits and set bit 10 */
879a3cbbb47SArd Biesheuvel 	val = (val & 0xffff23ffL) | BIT(10);
880a3cbbb47SArd Biesheuvel 
881a3cbbb47SArd Biesheuvel 	/* Early non-zero writes to DR7 are not supported */
882a3cbbb47SArd Biesheuvel 	if (!data && (val & ~DR7_RESET_VALUE))
883a3cbbb47SArd Biesheuvel 		return ES_UNSUPPORTED;
884a3cbbb47SArd Biesheuvel 
885a3cbbb47SArd Biesheuvel 	/* Using a value of 0 for ExitInfo1 means RAX holds the value */
886a3cbbb47SArd Biesheuvel 	ghcb_set_rax(ghcb, val);
887a3cbbb47SArd Biesheuvel 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
888a3cbbb47SArd Biesheuvel 	if (ret != ES_OK)
889a3cbbb47SArd Biesheuvel 		return ret;
890a3cbbb47SArd Biesheuvel 
891a3cbbb47SArd Biesheuvel 	if (data)
892a3cbbb47SArd Biesheuvel 		data->dr7 = val;
893a3cbbb47SArd Biesheuvel 
894a3cbbb47SArd Biesheuvel 	return ES_OK;
895a3cbbb47SArd Biesheuvel }
896a3cbbb47SArd Biesheuvel 
897a3cbbb47SArd Biesheuvel static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
898a3cbbb47SArd Biesheuvel 					 struct es_em_ctxt *ctxt)
899a3cbbb47SArd Biesheuvel {
900a3cbbb47SArd Biesheuvel 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
901a3cbbb47SArd Biesheuvel 	long *reg = vc_insn_get_rm(ctxt);
902a3cbbb47SArd Biesheuvel 
903a3cbbb47SArd Biesheuvel 	if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
904a3cbbb47SArd Biesheuvel 		return ES_VMM_ERROR;
905a3cbbb47SArd Biesheuvel 
906a3cbbb47SArd Biesheuvel 	if (!reg)
907a3cbbb47SArd Biesheuvel 		return ES_DECODE_FAILED;
908a3cbbb47SArd Biesheuvel 
909a3cbbb47SArd Biesheuvel 	if (data)
910a3cbbb47SArd Biesheuvel 		*reg = data->dr7;
911a3cbbb47SArd Biesheuvel 	else
912a3cbbb47SArd Biesheuvel 		*reg = DR7_RESET_VALUE;
913a3cbbb47SArd Biesheuvel 
914a3cbbb47SArd Biesheuvel 	return ES_OK;
915a3cbbb47SArd Biesheuvel }
916a3cbbb47SArd Biesheuvel 
917a3cbbb47SArd Biesheuvel static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
918a3cbbb47SArd Biesheuvel 				       struct es_em_ctxt *ctxt)
919a3cbbb47SArd Biesheuvel {
920a3cbbb47SArd Biesheuvel 	return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
921a3cbbb47SArd Biesheuvel }
922a3cbbb47SArd Biesheuvel 
923a3cbbb47SArd Biesheuvel static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
924a3cbbb47SArd Biesheuvel {
925a3cbbb47SArd Biesheuvel 	enum es_result ret;
926a3cbbb47SArd Biesheuvel 
927a3cbbb47SArd Biesheuvel 	ghcb_set_rcx(ghcb, ctxt->regs->cx);
928a3cbbb47SArd Biesheuvel 
929a3cbbb47SArd Biesheuvel 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
930a3cbbb47SArd Biesheuvel 	if (ret != ES_OK)
931a3cbbb47SArd Biesheuvel 		return ret;
932a3cbbb47SArd Biesheuvel 
933a3cbbb47SArd Biesheuvel 	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
934a3cbbb47SArd Biesheuvel 		return ES_VMM_ERROR;
935a3cbbb47SArd Biesheuvel 
936a3cbbb47SArd Biesheuvel 	ctxt->regs->ax = ghcb->save.rax;
937a3cbbb47SArd Biesheuvel 	ctxt->regs->dx = ghcb->save.rdx;
938a3cbbb47SArd Biesheuvel 
939a3cbbb47SArd Biesheuvel 	return ES_OK;
940a3cbbb47SArd Biesheuvel }
941a3cbbb47SArd Biesheuvel 
942a3cbbb47SArd Biesheuvel static enum es_result vc_handle_monitor(struct ghcb *ghcb,
943a3cbbb47SArd Biesheuvel 					struct es_em_ctxt *ctxt)
944a3cbbb47SArd Biesheuvel {
945a3cbbb47SArd Biesheuvel 	/*
946a3cbbb47SArd Biesheuvel 	 * Treat it as a NOP and do not leak a physical address to the
947a3cbbb47SArd Biesheuvel 	 * hypervisor.
948a3cbbb47SArd Biesheuvel 	 */
949a3cbbb47SArd Biesheuvel 	return ES_OK;
950a3cbbb47SArd Biesheuvel }
951a3cbbb47SArd Biesheuvel 
952a3cbbb47SArd Biesheuvel static enum es_result vc_handle_mwait(struct ghcb *ghcb,
953a3cbbb47SArd Biesheuvel 				      struct es_em_ctxt *ctxt)
954a3cbbb47SArd Biesheuvel {
955a3cbbb47SArd Biesheuvel 	/* Treat the same as MONITOR/MONITORX */
956a3cbbb47SArd Biesheuvel 	return ES_OK;
957a3cbbb47SArd Biesheuvel }
958a3cbbb47SArd Biesheuvel 
959a3cbbb47SArd Biesheuvel static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
960a3cbbb47SArd Biesheuvel 					struct es_em_ctxt *ctxt)
961a3cbbb47SArd Biesheuvel {
962a3cbbb47SArd Biesheuvel 	enum es_result ret;
963a3cbbb47SArd Biesheuvel 
964a3cbbb47SArd Biesheuvel 	ghcb_set_rax(ghcb, ctxt->regs->ax);
965a3cbbb47SArd Biesheuvel 	ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
966a3cbbb47SArd Biesheuvel 
967a3cbbb47SArd Biesheuvel 	if (x86_platform.hyper.sev_es_hcall_prepare)
968a3cbbb47SArd Biesheuvel 		x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
969a3cbbb47SArd Biesheuvel 
970a3cbbb47SArd Biesheuvel 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
971a3cbbb47SArd Biesheuvel 	if (ret != ES_OK)
972a3cbbb47SArd Biesheuvel 		return ret;
973a3cbbb47SArd Biesheuvel 
974a3cbbb47SArd Biesheuvel 	if (!ghcb_rax_is_valid(ghcb))
975a3cbbb47SArd Biesheuvel 		return ES_VMM_ERROR;
976a3cbbb47SArd Biesheuvel 
977a3cbbb47SArd Biesheuvel 	ctxt->regs->ax = ghcb->save.rax;
978a3cbbb47SArd Biesheuvel 
979a3cbbb47SArd Biesheuvel 	/*
980a3cbbb47SArd Biesheuvel 	 * Call sev_es_hcall_finish() after regs->ax is already set.
981a3cbbb47SArd Biesheuvel 	 * This allows the hypervisor handler to overwrite it again if
982a3cbbb47SArd Biesheuvel 	 * necessary.
983a3cbbb47SArd Biesheuvel 	 */
984a3cbbb47SArd Biesheuvel 	if (x86_platform.hyper.sev_es_hcall_finish &&
985a3cbbb47SArd Biesheuvel 	    !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
986a3cbbb47SArd Biesheuvel 		return ES_VMM_ERROR;
987a3cbbb47SArd Biesheuvel 
988a3cbbb47SArd Biesheuvel 	return ES_OK;
989a3cbbb47SArd Biesheuvel }
990a3cbbb47SArd Biesheuvel 
991a3cbbb47SArd Biesheuvel static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
992a3cbbb47SArd Biesheuvel 					struct es_em_ctxt *ctxt)
993a3cbbb47SArd Biesheuvel {
994a3cbbb47SArd Biesheuvel 	/*
995a3cbbb47SArd Biesheuvel 	 * Calling ecx_alignment_check() directly does not work, because it
996a3cbbb47SArd Biesheuvel 	 * enables IRQs and the GHCB is active. Forward the exception and call
997a3cbbb47SArd Biesheuvel 	 * it later from vc_forward_exception().
998a3cbbb47SArd Biesheuvel 	 */
999a3cbbb47SArd Biesheuvel 	ctxt->fi.vector = X86_TRAP_AC;
1000a3cbbb47SArd Biesheuvel 	ctxt->fi.error_code = 0;
1001a3cbbb47SArd Biesheuvel 	return ES_EXCEPTION;
1002a3cbbb47SArd Biesheuvel }
1003a3cbbb47SArd Biesheuvel 
1004a3cbbb47SArd Biesheuvel static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1005a3cbbb47SArd Biesheuvel 					 struct ghcb *ghcb,
1006a3cbbb47SArd Biesheuvel 					 unsigned long exit_code)
1007a3cbbb47SArd Biesheuvel {
1008a3cbbb47SArd Biesheuvel 	enum es_result result = vc_check_opcode_bytes(ctxt, exit_code);
1009a3cbbb47SArd Biesheuvel 
1010a3cbbb47SArd Biesheuvel 	if (result != ES_OK)
1011a3cbbb47SArd Biesheuvel 		return result;
1012a3cbbb47SArd Biesheuvel 
1013a3cbbb47SArd Biesheuvel 	switch (exit_code) {
1014a3cbbb47SArd Biesheuvel 	case SVM_EXIT_READ_DR7:
1015a3cbbb47SArd Biesheuvel 		result = vc_handle_dr7_read(ghcb, ctxt);
1016a3cbbb47SArd Biesheuvel 		break;
1017a3cbbb47SArd Biesheuvel 	case SVM_EXIT_WRITE_DR7:
1018a3cbbb47SArd Biesheuvel 		result = vc_handle_dr7_write(ghcb, ctxt);
1019a3cbbb47SArd Biesheuvel 		break;
1020a3cbbb47SArd Biesheuvel 	case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1021a3cbbb47SArd Biesheuvel 		result = vc_handle_trap_ac(ghcb, ctxt);
1022a3cbbb47SArd Biesheuvel 		break;
1023a3cbbb47SArd Biesheuvel 	case SVM_EXIT_RDTSC:
1024a3cbbb47SArd Biesheuvel 	case SVM_EXIT_RDTSCP:
1025a3cbbb47SArd Biesheuvel 		result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1026a3cbbb47SArd Biesheuvel 		break;
1027a3cbbb47SArd Biesheuvel 	case SVM_EXIT_RDPMC:
1028a3cbbb47SArd Biesheuvel 		result = vc_handle_rdpmc(ghcb, ctxt);
1029a3cbbb47SArd Biesheuvel 		break;
1030a3cbbb47SArd Biesheuvel 	case SVM_EXIT_INVD:
1031a3cbbb47SArd Biesheuvel 		pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1032a3cbbb47SArd Biesheuvel 		result = ES_UNSUPPORTED;
1033a3cbbb47SArd Biesheuvel 		break;
1034a3cbbb47SArd Biesheuvel 	case SVM_EXIT_CPUID:
1035a3cbbb47SArd Biesheuvel 		result = vc_handle_cpuid(ghcb, ctxt);
1036a3cbbb47SArd Biesheuvel 		break;
1037a3cbbb47SArd Biesheuvel 	case SVM_EXIT_IOIO:
1038a3cbbb47SArd Biesheuvel 		result = vc_handle_ioio(ghcb, ctxt);
1039a3cbbb47SArd Biesheuvel 		break;
1040a3cbbb47SArd Biesheuvel 	case SVM_EXIT_MSR:
1041a3cbbb47SArd Biesheuvel 		result = vc_handle_msr(ghcb, ctxt);
1042a3cbbb47SArd Biesheuvel 		break;
1043a3cbbb47SArd Biesheuvel 	case SVM_EXIT_VMMCALL:
1044a3cbbb47SArd Biesheuvel 		result = vc_handle_vmmcall(ghcb, ctxt);
1045a3cbbb47SArd Biesheuvel 		break;
1046a3cbbb47SArd Biesheuvel 	case SVM_EXIT_WBINVD:
1047a3cbbb47SArd Biesheuvel 		result = vc_handle_wbinvd(ghcb, ctxt);
1048a3cbbb47SArd Biesheuvel 		break;
1049a3cbbb47SArd Biesheuvel 	case SVM_EXIT_MONITOR:
1050a3cbbb47SArd Biesheuvel 		result = vc_handle_monitor(ghcb, ctxt);
1051a3cbbb47SArd Biesheuvel 		break;
1052a3cbbb47SArd Biesheuvel 	case SVM_EXIT_MWAIT:
1053a3cbbb47SArd Biesheuvel 		result = vc_handle_mwait(ghcb, ctxt);
1054a3cbbb47SArd Biesheuvel 		break;
1055a3cbbb47SArd Biesheuvel 	case SVM_EXIT_NPF:
1056a3cbbb47SArd Biesheuvel 		result = vc_handle_mmio(ghcb, ctxt);
1057a3cbbb47SArd Biesheuvel 		break;
1058a3cbbb47SArd Biesheuvel 	default:
1059a3cbbb47SArd Biesheuvel 		/*
1060a3cbbb47SArd Biesheuvel 		 * Unexpected #VC exception
1061a3cbbb47SArd Biesheuvel 		 */
1062a3cbbb47SArd Biesheuvel 		result = ES_UNSUPPORTED;
1063a3cbbb47SArd Biesheuvel 	}
1064a3cbbb47SArd Biesheuvel 
1065a3cbbb47SArd Biesheuvel 	return result;
1066a3cbbb47SArd Biesheuvel }
1067a3cbbb47SArd Biesheuvel 
1068a3cbbb47SArd Biesheuvel static __always_inline bool is_vc2_stack(unsigned long sp)
1069a3cbbb47SArd Biesheuvel {
1070a3cbbb47SArd Biesheuvel 	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1071a3cbbb47SArd Biesheuvel }
1072a3cbbb47SArd Biesheuvel 
1073a3cbbb47SArd Biesheuvel static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1074a3cbbb47SArd Biesheuvel {
1075a3cbbb47SArd Biesheuvel 	unsigned long sp, prev_sp;
1076a3cbbb47SArd Biesheuvel 
1077a3cbbb47SArd Biesheuvel 	sp      = (unsigned long)regs;
1078a3cbbb47SArd Biesheuvel 	prev_sp = regs->sp;
1079a3cbbb47SArd Biesheuvel 
1080a3cbbb47SArd Biesheuvel 	/*
1081a3cbbb47SArd Biesheuvel 	 * If the code was already executing on the VC2 stack when the #VC
1082a3cbbb47SArd Biesheuvel 	 * happened, let it proceed to the normal handling routine. This way the
1083a3cbbb47SArd Biesheuvel 	 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1084a3cbbb47SArd Biesheuvel 	 */
1085a3cbbb47SArd Biesheuvel 	return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1086a3cbbb47SArd Biesheuvel }
1087a3cbbb47SArd Biesheuvel 
1088a3cbbb47SArd Biesheuvel static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1089a3cbbb47SArd Biesheuvel {
1090a3cbbb47SArd Biesheuvel 	struct ghcb_state state;
1091a3cbbb47SArd Biesheuvel 	struct es_em_ctxt ctxt;
1092a3cbbb47SArd Biesheuvel 	enum es_result result;
1093a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
1094a3cbbb47SArd Biesheuvel 	bool ret = true;
1095a3cbbb47SArd Biesheuvel 
1096a3cbbb47SArd Biesheuvel 	ghcb = __sev_get_ghcb(&state);
1097a3cbbb47SArd Biesheuvel 
1098a3cbbb47SArd Biesheuvel 	vc_ghcb_invalidate(ghcb);
1099a3cbbb47SArd Biesheuvel 	result = vc_init_em_ctxt(&ctxt, regs, error_code);
1100a3cbbb47SArd Biesheuvel 
1101a3cbbb47SArd Biesheuvel 	if (result == ES_OK)
1102a3cbbb47SArd Biesheuvel 		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1103a3cbbb47SArd Biesheuvel 
1104a3cbbb47SArd Biesheuvel 	__sev_put_ghcb(&state);
1105a3cbbb47SArd Biesheuvel 
1106a3cbbb47SArd Biesheuvel 	/* Done - now check the result */
1107a3cbbb47SArd Biesheuvel 	switch (result) {
1108a3cbbb47SArd Biesheuvel 	case ES_OK:
1109a3cbbb47SArd Biesheuvel 		vc_finish_insn(&ctxt);
1110a3cbbb47SArd Biesheuvel 		break;
1111a3cbbb47SArd Biesheuvel 	case ES_UNSUPPORTED:
1112a3cbbb47SArd Biesheuvel 		pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1113a3cbbb47SArd Biesheuvel 				   error_code, regs->ip);
1114a3cbbb47SArd Biesheuvel 		ret = false;
1115a3cbbb47SArd Biesheuvel 		break;
1116a3cbbb47SArd Biesheuvel 	case ES_VMM_ERROR:
1117a3cbbb47SArd Biesheuvel 		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1118a3cbbb47SArd Biesheuvel 				   error_code, regs->ip);
1119a3cbbb47SArd Biesheuvel 		ret = false;
1120a3cbbb47SArd Biesheuvel 		break;
1121a3cbbb47SArd Biesheuvel 	case ES_DECODE_FAILED:
1122a3cbbb47SArd Biesheuvel 		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1123a3cbbb47SArd Biesheuvel 				   error_code, regs->ip);
1124a3cbbb47SArd Biesheuvel 		ret = false;
1125a3cbbb47SArd Biesheuvel 		break;
1126a3cbbb47SArd Biesheuvel 	case ES_EXCEPTION:
1127a3cbbb47SArd Biesheuvel 		vc_forward_exception(&ctxt);
1128a3cbbb47SArd Biesheuvel 		break;
1129a3cbbb47SArd Biesheuvel 	case ES_RETRY:
1130a3cbbb47SArd Biesheuvel 		/* Nothing to do */
1131a3cbbb47SArd Biesheuvel 		break;
1132a3cbbb47SArd Biesheuvel 	default:
1133a3cbbb47SArd Biesheuvel 		pr_emerg("Unknown result in %s():%d\n", __func__, result);
1134a3cbbb47SArd Biesheuvel 		/*
1135a3cbbb47SArd Biesheuvel 		 * Emulating the instruction which caused the #VC exception
1136a3cbbb47SArd Biesheuvel 		 * failed - can't continue so print debug information
1137a3cbbb47SArd Biesheuvel 		 */
1138a3cbbb47SArd Biesheuvel 		BUG();
1139a3cbbb47SArd Biesheuvel 	}
1140a3cbbb47SArd Biesheuvel 
1141a3cbbb47SArd Biesheuvel 	return ret;
1142a3cbbb47SArd Biesheuvel }
1143a3cbbb47SArd Biesheuvel 
1144a3cbbb47SArd Biesheuvel static __always_inline bool vc_is_db(unsigned long error_code)
1145a3cbbb47SArd Biesheuvel {
1146a3cbbb47SArd Biesheuvel 	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1147a3cbbb47SArd Biesheuvel }
1148a3cbbb47SArd Biesheuvel 
1149a3cbbb47SArd Biesheuvel /*
1150a3cbbb47SArd Biesheuvel  * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1151a3cbbb47SArd Biesheuvel  * and will panic when an error happens.
1152a3cbbb47SArd Biesheuvel  */
1153a3cbbb47SArd Biesheuvel DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1154a3cbbb47SArd Biesheuvel {
1155a3cbbb47SArd Biesheuvel 	irqentry_state_t irq_state;
1156a3cbbb47SArd Biesheuvel 
1157a3cbbb47SArd Biesheuvel 	/*
1158a3cbbb47SArd Biesheuvel 	 * With the current implementation it is always possible to switch to a
1159a3cbbb47SArd Biesheuvel 	 * safe stack because #VC exceptions only happen at known places, like
1160a3cbbb47SArd Biesheuvel 	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1161a3cbbb47SArd Biesheuvel 	 * also happen with code instrumentation when the hypervisor intercepts
1162a3cbbb47SArd Biesheuvel 	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1163a3cbbb47SArd Biesheuvel 	 * exceptions currently also only happen in safe places.
1164a3cbbb47SArd Biesheuvel 	 *
1165a3cbbb47SArd Biesheuvel 	 * But keep this here in case the noinstr annotations are violated due
1166a3cbbb47SArd Biesheuvel 	 * to bug elsewhere.
1167a3cbbb47SArd Biesheuvel 	 */
1168a3cbbb47SArd Biesheuvel 	if (unlikely(vc_from_invalid_context(regs))) {
1169a3cbbb47SArd Biesheuvel 		instrumentation_begin();
1170a3cbbb47SArd Biesheuvel 		panic("Can't handle #VC exception from unsupported context\n");
1171a3cbbb47SArd Biesheuvel 		instrumentation_end();
1172a3cbbb47SArd Biesheuvel 	}
1173a3cbbb47SArd Biesheuvel 
1174a3cbbb47SArd Biesheuvel 	/*
1175a3cbbb47SArd Biesheuvel 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1176a3cbbb47SArd Biesheuvel 	 */
1177a3cbbb47SArd Biesheuvel 	if (vc_is_db(error_code)) {
1178a3cbbb47SArd Biesheuvel 		exc_debug(regs);
1179a3cbbb47SArd Biesheuvel 		return;
1180a3cbbb47SArd Biesheuvel 	}
1181a3cbbb47SArd Biesheuvel 
1182a3cbbb47SArd Biesheuvel 	irq_state = irqentry_nmi_enter(regs);
1183a3cbbb47SArd Biesheuvel 
1184a3cbbb47SArd Biesheuvel 	instrumentation_begin();
1185a3cbbb47SArd Biesheuvel 
1186a3cbbb47SArd Biesheuvel 	if (!vc_raw_handle_exception(regs, error_code)) {
1187a3cbbb47SArd Biesheuvel 		/* Show some debug info */
1188a3cbbb47SArd Biesheuvel 		show_regs(regs);
1189a3cbbb47SArd Biesheuvel 
1190a3cbbb47SArd Biesheuvel 		/* Ask hypervisor to sev_es_terminate */
1191a3cbbb47SArd Biesheuvel 		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1192a3cbbb47SArd Biesheuvel 
1193a3cbbb47SArd Biesheuvel 		/* If that fails and we get here - just panic */
1194a3cbbb47SArd Biesheuvel 		panic("Returned from Terminate-Request to Hypervisor\n");
1195a3cbbb47SArd Biesheuvel 	}
1196a3cbbb47SArd Biesheuvel 
1197a3cbbb47SArd Biesheuvel 	instrumentation_end();
1198a3cbbb47SArd Biesheuvel 	irqentry_nmi_exit(regs, irq_state);
1199a3cbbb47SArd Biesheuvel }
1200a3cbbb47SArd Biesheuvel 
1201a3cbbb47SArd Biesheuvel /*
1202a3cbbb47SArd Biesheuvel  * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1203a3cbbb47SArd Biesheuvel  * and will kill the current task with SIGBUS when an error happens.
1204a3cbbb47SArd Biesheuvel  */
1205a3cbbb47SArd Biesheuvel DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1206a3cbbb47SArd Biesheuvel {
1207a3cbbb47SArd Biesheuvel 	/*
1208a3cbbb47SArd Biesheuvel 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1209a3cbbb47SArd Biesheuvel 	 */
1210a3cbbb47SArd Biesheuvel 	if (vc_is_db(error_code)) {
1211a3cbbb47SArd Biesheuvel 		noist_exc_debug(regs);
1212a3cbbb47SArd Biesheuvel 		return;
1213a3cbbb47SArd Biesheuvel 	}
1214a3cbbb47SArd Biesheuvel 
1215a3cbbb47SArd Biesheuvel 	irqentry_enter_from_user_mode(regs);
1216a3cbbb47SArd Biesheuvel 	instrumentation_begin();
1217a3cbbb47SArd Biesheuvel 
1218a3cbbb47SArd Biesheuvel 	if (!vc_raw_handle_exception(regs, error_code)) {
1219a3cbbb47SArd Biesheuvel 		/*
1220a3cbbb47SArd Biesheuvel 		 * Do not kill the machine if user-space triggered the
1221a3cbbb47SArd Biesheuvel 		 * exception. Send SIGBUS instead and let user-space deal with
1222a3cbbb47SArd Biesheuvel 		 * it.
1223a3cbbb47SArd Biesheuvel 		 */
1224a3cbbb47SArd Biesheuvel 		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1225a3cbbb47SArd Biesheuvel 	}
1226a3cbbb47SArd Biesheuvel 
1227a3cbbb47SArd Biesheuvel 	instrumentation_end();
1228a3cbbb47SArd Biesheuvel 	irqentry_exit_to_user_mode(regs);
1229a3cbbb47SArd Biesheuvel }
1230a3cbbb47SArd Biesheuvel 
1231a3cbbb47SArd Biesheuvel bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
1232a3cbbb47SArd Biesheuvel {
1233a3cbbb47SArd Biesheuvel 	unsigned long exit_code = regs->orig_ax;
1234a3cbbb47SArd Biesheuvel 	struct es_em_ctxt ctxt;
1235a3cbbb47SArd Biesheuvel 	enum es_result result;
1236a3cbbb47SArd Biesheuvel 
1237a3cbbb47SArd Biesheuvel 	vc_ghcb_invalidate(boot_ghcb);
1238a3cbbb47SArd Biesheuvel 
1239a3cbbb47SArd Biesheuvel 	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
1240a3cbbb47SArd Biesheuvel 	if (result == ES_OK)
1241a3cbbb47SArd Biesheuvel 		result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
1242a3cbbb47SArd Biesheuvel 
1243a3cbbb47SArd Biesheuvel 	/* Done - now check the result */
1244a3cbbb47SArd Biesheuvel 	switch (result) {
1245a3cbbb47SArd Biesheuvel 	case ES_OK:
1246a3cbbb47SArd Biesheuvel 		vc_finish_insn(&ctxt);
1247a3cbbb47SArd Biesheuvel 		break;
1248a3cbbb47SArd Biesheuvel 	case ES_UNSUPPORTED:
1249a3cbbb47SArd Biesheuvel 		early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1250a3cbbb47SArd Biesheuvel 				exit_code, regs->ip);
1251a3cbbb47SArd Biesheuvel 		goto fail;
1252a3cbbb47SArd Biesheuvel 	case ES_VMM_ERROR:
1253a3cbbb47SArd Biesheuvel 		early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1254a3cbbb47SArd Biesheuvel 				exit_code, regs->ip);
1255a3cbbb47SArd Biesheuvel 		goto fail;
1256a3cbbb47SArd Biesheuvel 	case ES_DECODE_FAILED:
1257a3cbbb47SArd Biesheuvel 		early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1258a3cbbb47SArd Biesheuvel 				exit_code, regs->ip);
1259a3cbbb47SArd Biesheuvel 		goto fail;
1260a3cbbb47SArd Biesheuvel 	case ES_EXCEPTION:
1261a3cbbb47SArd Biesheuvel 		vc_early_forward_exception(&ctxt);
1262a3cbbb47SArd Biesheuvel 		break;
1263a3cbbb47SArd Biesheuvel 	case ES_RETRY:
1264a3cbbb47SArd Biesheuvel 		/* Nothing to do */
1265a3cbbb47SArd Biesheuvel 		break;
1266a3cbbb47SArd Biesheuvel 	default:
1267a3cbbb47SArd Biesheuvel 		BUG();
1268a3cbbb47SArd Biesheuvel 	}
1269a3cbbb47SArd Biesheuvel 
1270a3cbbb47SArd Biesheuvel 	return true;
1271a3cbbb47SArd Biesheuvel 
1272a3cbbb47SArd Biesheuvel fail:
1273a3cbbb47SArd Biesheuvel 	show_regs(regs);
1274a3cbbb47SArd Biesheuvel 
1275a3cbbb47SArd Biesheuvel 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1276a3cbbb47SArd Biesheuvel }
1277a3cbbb47SArd Biesheuvel 
1278a3cbbb47SArd Biesheuvel /*
1279a3cbbb47SArd Biesheuvel  * Initial set up of SNP relies on information provided by the
1280a3cbbb47SArd Biesheuvel  * Confidential Computing blob, which can be passed to the kernel
1281a3cbbb47SArd Biesheuvel  * in the following ways, depending on how it is booted:
1282a3cbbb47SArd Biesheuvel  *
1283a3cbbb47SArd Biesheuvel  * - when booted via the boot/decompress kernel:
1284a3cbbb47SArd Biesheuvel  *   - via boot_params
1285a3cbbb47SArd Biesheuvel  *
1286a3cbbb47SArd Biesheuvel  * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
1287a3cbbb47SArd Biesheuvel  *   - via a setup_data entry, as defined by the Linux Boot Protocol
1288a3cbbb47SArd Biesheuvel  *
1289a3cbbb47SArd Biesheuvel  * Scan for the blob in that order.
1290a3cbbb47SArd Biesheuvel  */
1291a3cbbb47SArd Biesheuvel static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
1292a3cbbb47SArd Biesheuvel {
1293a3cbbb47SArd Biesheuvel 	struct cc_blob_sev_info *cc_info;
1294a3cbbb47SArd Biesheuvel 
1295a3cbbb47SArd Biesheuvel 	/* Boot kernel would have passed the CC blob via boot_params. */
1296a3cbbb47SArd Biesheuvel 	if (bp->cc_blob_address) {
1297a3cbbb47SArd Biesheuvel 		cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
1298a3cbbb47SArd Biesheuvel 		goto found_cc_info;
1299a3cbbb47SArd Biesheuvel 	}
1300a3cbbb47SArd Biesheuvel 
1301a3cbbb47SArd Biesheuvel 	/*
1302a3cbbb47SArd Biesheuvel 	 * If kernel was booted directly, without the use of the
1303a3cbbb47SArd Biesheuvel 	 * boot/decompression kernel, the CC blob may have been passed via
1304a3cbbb47SArd Biesheuvel 	 * setup_data instead.
1305a3cbbb47SArd Biesheuvel 	 */
1306a3cbbb47SArd Biesheuvel 	cc_info = find_cc_blob_setup_data(bp);
1307a3cbbb47SArd Biesheuvel 	if (!cc_info)
1308a3cbbb47SArd Biesheuvel 		return NULL;
1309a3cbbb47SArd Biesheuvel 
1310a3cbbb47SArd Biesheuvel found_cc_info:
1311a3cbbb47SArd Biesheuvel 	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
1312a3cbbb47SArd Biesheuvel 		snp_abort();
1313a3cbbb47SArd Biesheuvel 
1314a3cbbb47SArd Biesheuvel 	return cc_info;
1315a3cbbb47SArd Biesheuvel }
1316a3cbbb47SArd Biesheuvel 
1317a3cbbb47SArd Biesheuvel static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
1318a3cbbb47SArd Biesheuvel {
1319a3cbbb47SArd Biesheuvel 	struct svsm_call call = {};
1320a3cbbb47SArd Biesheuvel 	int ret;
1321a3cbbb47SArd Biesheuvel 	u64 pa;
1322a3cbbb47SArd Biesheuvel 
1323a3cbbb47SArd Biesheuvel 	/*
1324a3cbbb47SArd Biesheuvel 	 * Record the SVSM Calling Area address (CAA) if the guest is not
1325a3cbbb47SArd Biesheuvel 	 * running at VMPL0. The CA will be used to communicate with the
1326a3cbbb47SArd Biesheuvel 	 * SVSM to perform the SVSM services.
1327a3cbbb47SArd Biesheuvel 	 */
1328a3cbbb47SArd Biesheuvel 	if (!svsm_setup_ca(cc_info))
1329a3cbbb47SArd Biesheuvel 		return;
1330a3cbbb47SArd Biesheuvel 
1331a3cbbb47SArd Biesheuvel 	/*
1332a3cbbb47SArd Biesheuvel 	 * It is very early in the boot and the kernel is running identity
1333a3cbbb47SArd Biesheuvel 	 * mapped but without having adjusted the pagetables to where the
1334a3cbbb47SArd Biesheuvel 	 * kernel was loaded (physbase), so the get the CA address using
1335a3cbbb47SArd Biesheuvel 	 * RIP-relative addressing.
1336a3cbbb47SArd Biesheuvel 	 */
1337a3cbbb47SArd Biesheuvel 	pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
1338a3cbbb47SArd Biesheuvel 
1339a3cbbb47SArd Biesheuvel 	/*
1340a3cbbb47SArd Biesheuvel 	 * Switch over to the boot SVSM CA while the current CA is still
1341a3cbbb47SArd Biesheuvel 	 * addressable. There is no GHCB at this point so use the MSR protocol.
1342a3cbbb47SArd Biesheuvel 	 *
1343a3cbbb47SArd Biesheuvel 	 * SVSM_CORE_REMAP_CA call:
1344a3cbbb47SArd Biesheuvel 	 *   RAX = 0 (Protocol=0, CallID=0)
1345a3cbbb47SArd Biesheuvel 	 *   RCX = New CA GPA
1346a3cbbb47SArd Biesheuvel 	 */
1347a3cbbb47SArd Biesheuvel 	call.caa = svsm_get_caa();
1348a3cbbb47SArd Biesheuvel 	call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
1349a3cbbb47SArd Biesheuvel 	call.rcx = pa;
1350a3cbbb47SArd Biesheuvel 	ret = svsm_perform_call_protocol(&call);
1351a3cbbb47SArd Biesheuvel 	if (ret)
1352a3cbbb47SArd Biesheuvel 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
1353a3cbbb47SArd Biesheuvel 
1354681e2901SArd Biesheuvel 	boot_svsm_caa = (struct svsm_ca *)pa;
1355681e2901SArd Biesheuvel 	boot_svsm_caa_pa = pa;
1356a3cbbb47SArd Biesheuvel }
1357a3cbbb47SArd Biesheuvel 
1358a3cbbb47SArd Biesheuvel bool __head snp_init(struct boot_params *bp)
1359a3cbbb47SArd Biesheuvel {
1360a3cbbb47SArd Biesheuvel 	struct cc_blob_sev_info *cc_info;
1361a3cbbb47SArd Biesheuvel 
1362a3cbbb47SArd Biesheuvel 	if (!bp)
1363a3cbbb47SArd Biesheuvel 		return false;
1364a3cbbb47SArd Biesheuvel 
1365a3cbbb47SArd Biesheuvel 	cc_info = find_cc_blob(bp);
1366a3cbbb47SArd Biesheuvel 	if (!cc_info)
1367a3cbbb47SArd Biesheuvel 		return false;
1368a3cbbb47SArd Biesheuvel 
1369a3cbbb47SArd Biesheuvel 	if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
1370*18ea89eaSTom Lendacky 		sev_secrets_pa = cc_info->secrets_phys;
1371a3cbbb47SArd Biesheuvel 	else
1372a3cbbb47SArd Biesheuvel 		return false;
1373a3cbbb47SArd Biesheuvel 
1374a3cbbb47SArd Biesheuvel 	setup_cpuid_table(cc_info);
1375a3cbbb47SArd Biesheuvel 
1376a3cbbb47SArd Biesheuvel 	svsm_setup(cc_info);
1377a3cbbb47SArd Biesheuvel 
1378a3cbbb47SArd Biesheuvel 	/*
1379a3cbbb47SArd Biesheuvel 	 * The CC blob will be used later to access the secrets page. Cache
1380a3cbbb47SArd Biesheuvel 	 * it here like the boot kernel does.
1381a3cbbb47SArd Biesheuvel 	 */
1382a3cbbb47SArd Biesheuvel 	bp->cc_blob_address = (u32)(unsigned long)cc_info;
1383a3cbbb47SArd Biesheuvel 
1384a3cbbb47SArd Biesheuvel 	return true;
1385a3cbbb47SArd Biesheuvel }
1386a3cbbb47SArd Biesheuvel 
1387a3cbbb47SArd Biesheuvel void __head __noreturn snp_abort(void)
1388a3cbbb47SArd Biesheuvel {
1389a3cbbb47SArd Biesheuvel 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1390a3cbbb47SArd Biesheuvel }
1391