xref: /linux/arch/x86/boot/startup/sev-startup.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1a3cbbb47SArd Biesheuvel // SPDX-License-Identifier: GPL-2.0-only
2a3cbbb47SArd Biesheuvel /*
3a3cbbb47SArd Biesheuvel  * AMD Memory Encryption Support
4a3cbbb47SArd Biesheuvel  *
5a3cbbb47SArd Biesheuvel  * Copyright (C) 2019 SUSE
6a3cbbb47SArd Biesheuvel  *
7a3cbbb47SArd Biesheuvel  * Author: Joerg Roedel <jroedel@suse.de>
8a3cbbb47SArd Biesheuvel  */
9a3cbbb47SArd Biesheuvel 
10a3cbbb47SArd Biesheuvel #define pr_fmt(fmt)	"SEV: " fmt
11a3cbbb47SArd Biesheuvel 
12a3cbbb47SArd Biesheuvel #include <linux/percpu-defs.h>
13a3cbbb47SArd Biesheuvel #include <linux/cc_platform.h>
14a3cbbb47SArd Biesheuvel #include <linux/printk.h>
15a3cbbb47SArd Biesheuvel #include <linux/mm_types.h>
16a3cbbb47SArd Biesheuvel #include <linux/set_memory.h>
17a3cbbb47SArd Biesheuvel #include <linux/memblock.h>
18a3cbbb47SArd Biesheuvel #include <linux/kernel.h>
19a3cbbb47SArd Biesheuvel #include <linux/mm.h>
20a3cbbb47SArd Biesheuvel #include <linux/cpumask.h>
21a3cbbb47SArd Biesheuvel #include <linux/efi.h>
22a3cbbb47SArd Biesheuvel #include <linux/io.h>
23a3cbbb47SArd Biesheuvel #include <linux/psp-sev.h>
24a3cbbb47SArd Biesheuvel #include <uapi/linux/sev-guest.h>
25a3cbbb47SArd Biesheuvel 
26a3cbbb47SArd Biesheuvel #include <asm/init.h>
27a3cbbb47SArd Biesheuvel #include <asm/cpu_entry_area.h>
28a3cbbb47SArd Biesheuvel #include <asm/stacktrace.h>
29a3cbbb47SArd Biesheuvel #include <asm/sev.h>
30a3cbbb47SArd Biesheuvel #include <asm/sev-internal.h>
31a3cbbb47SArd Biesheuvel #include <asm/insn-eval.h>
32a3cbbb47SArd Biesheuvel #include <asm/fpu/xcr.h>
33a3cbbb47SArd Biesheuvel #include <asm/processor.h>
34a3cbbb47SArd Biesheuvel #include <asm/realmode.h>
35a3cbbb47SArd Biesheuvel #include <asm/setup.h>
36a3cbbb47SArd Biesheuvel #include <asm/traps.h>
37a3cbbb47SArd Biesheuvel #include <asm/svm.h>
38a3cbbb47SArd Biesheuvel #include <asm/smp.h>
39a3cbbb47SArd Biesheuvel #include <asm/cpu.h>
40a3cbbb47SArd Biesheuvel #include <asm/apic.h>
41*968e3000SAhmed S. Darwish #include <asm/cpuid/api.h>
42a3cbbb47SArd Biesheuvel #include <asm/cmdline.h>
43a3cbbb47SArd Biesheuvel 
44a3cbbb47SArd Biesheuvel /* For early boot hypervisor communication in SEV-ES enabled guests */
45a3cbbb47SArd Biesheuvel struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
46a3cbbb47SArd Biesheuvel 
47a3cbbb47SArd Biesheuvel /*
48a3cbbb47SArd Biesheuvel  * Needs to be in the .data section because we need it NULL before bss is
49a3cbbb47SArd Biesheuvel  * cleared
50a3cbbb47SArd Biesheuvel  */
51a3cbbb47SArd Biesheuvel struct ghcb *boot_ghcb __section(".data");
52a3cbbb47SArd Biesheuvel 
53a3cbbb47SArd Biesheuvel /* Bitmap of SEV features supported by the hypervisor */
54a3cbbb47SArd Biesheuvel u64 sev_hv_features __ro_after_init;
55a3cbbb47SArd Biesheuvel 
56a3cbbb47SArd Biesheuvel /* Secrets page physical address from the CC blob */
5718ea89eaSTom Lendacky u64 sev_secrets_pa __ro_after_init;
58a3cbbb47SArd Biesheuvel 
59a3cbbb47SArd Biesheuvel /* For early boot SVSM communication */
60a3cbbb47SArd Biesheuvel struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
61a3cbbb47SArd Biesheuvel 
62a3cbbb47SArd Biesheuvel DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
63a3cbbb47SArd Biesheuvel DEFINE_PER_CPU(u64, svsm_caa_pa);
64a3cbbb47SArd Biesheuvel 
65a3cbbb47SArd Biesheuvel /*
66a3cbbb47SArd Biesheuvel  * Nothing shall interrupt this code path while holding the per-CPU
67a3cbbb47SArd Biesheuvel  * GHCB. The backup GHCB is only for NMIs interrupting this path.
68a3cbbb47SArd Biesheuvel  *
69a3cbbb47SArd Biesheuvel  * Callers must disable local interrupts around it.
70a3cbbb47SArd Biesheuvel  */
71a3cbbb47SArd Biesheuvel noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
72a3cbbb47SArd Biesheuvel {
73a3cbbb47SArd Biesheuvel 	struct sev_es_runtime_data *data;
74a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
75a3cbbb47SArd Biesheuvel 
76a3cbbb47SArd Biesheuvel 	WARN_ON(!irqs_disabled());
77a3cbbb47SArd Biesheuvel 
78a3cbbb47SArd Biesheuvel 	data = this_cpu_read(runtime_data);
79a3cbbb47SArd Biesheuvel 	ghcb = &data->ghcb_page;
80a3cbbb47SArd Biesheuvel 
81a3cbbb47SArd Biesheuvel 	if (unlikely(data->ghcb_active)) {
82a3cbbb47SArd Biesheuvel 		/* GHCB is already in use - save its contents */
83a3cbbb47SArd Biesheuvel 
84a3cbbb47SArd Biesheuvel 		if (unlikely(data->backup_ghcb_active)) {
85a3cbbb47SArd Biesheuvel 			/*
86a3cbbb47SArd Biesheuvel 			 * Backup-GHCB is also already in use. There is no way
87a3cbbb47SArd Biesheuvel 			 * to continue here so just kill the machine. To make
88a3cbbb47SArd Biesheuvel 			 * panic() work, mark GHCBs inactive so that messages
89a3cbbb47SArd Biesheuvel 			 * can be printed out.
90a3cbbb47SArd Biesheuvel 			 */
91a3cbbb47SArd Biesheuvel 			data->ghcb_active        = false;
92a3cbbb47SArd Biesheuvel 			data->backup_ghcb_active = false;
93a3cbbb47SArd Biesheuvel 
94a3cbbb47SArd Biesheuvel 			instrumentation_begin();
95a3cbbb47SArd Biesheuvel 			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
96a3cbbb47SArd Biesheuvel 			instrumentation_end();
97a3cbbb47SArd Biesheuvel 		}
98a3cbbb47SArd Biesheuvel 
99a3cbbb47SArd Biesheuvel 		/* Mark backup_ghcb active before writing to it */
100a3cbbb47SArd Biesheuvel 		data->backup_ghcb_active = true;
101a3cbbb47SArd Biesheuvel 
102a3cbbb47SArd Biesheuvel 		state->ghcb = &data->backup_ghcb;
103a3cbbb47SArd Biesheuvel 
104a3cbbb47SArd Biesheuvel 		/* Backup GHCB content */
105a3cbbb47SArd Biesheuvel 		*state->ghcb = *ghcb;
106a3cbbb47SArd Biesheuvel 	} else {
107a3cbbb47SArd Biesheuvel 		state->ghcb = NULL;
108a3cbbb47SArd Biesheuvel 		data->ghcb_active = true;
109a3cbbb47SArd Biesheuvel 	}
110a3cbbb47SArd Biesheuvel 
111a3cbbb47SArd Biesheuvel 	return ghcb;
112a3cbbb47SArd Biesheuvel }
113a3cbbb47SArd Biesheuvel 
114a3cbbb47SArd Biesheuvel /* Include code shared with pre-decompression boot stage */
115a3cbbb47SArd Biesheuvel #include "sev-shared.c"
116a3cbbb47SArd Biesheuvel 
117a3cbbb47SArd Biesheuvel noinstr void __sev_put_ghcb(struct ghcb_state *state)
118a3cbbb47SArd Biesheuvel {
119a3cbbb47SArd Biesheuvel 	struct sev_es_runtime_data *data;
120a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
121a3cbbb47SArd Biesheuvel 
122a3cbbb47SArd Biesheuvel 	WARN_ON(!irqs_disabled());
123a3cbbb47SArd Biesheuvel 
124a3cbbb47SArd Biesheuvel 	data = this_cpu_read(runtime_data);
125a3cbbb47SArd Biesheuvel 	ghcb = &data->ghcb_page;
126a3cbbb47SArd Biesheuvel 
127a3cbbb47SArd Biesheuvel 	if (state->ghcb) {
128a3cbbb47SArd Biesheuvel 		/* Restore GHCB from Backup */
129a3cbbb47SArd Biesheuvel 		*ghcb = *state->ghcb;
130a3cbbb47SArd Biesheuvel 		data->backup_ghcb_active = false;
131a3cbbb47SArd Biesheuvel 		state->ghcb = NULL;
132a3cbbb47SArd Biesheuvel 	} else {
133a3cbbb47SArd Biesheuvel 		/*
134a3cbbb47SArd Biesheuvel 		 * Invalidate the GHCB so a VMGEXIT instruction issued
135a3cbbb47SArd Biesheuvel 		 * from userspace won't appear to be valid.
136a3cbbb47SArd Biesheuvel 		 */
137a3cbbb47SArd Biesheuvel 		vc_ghcb_invalidate(ghcb);
138a3cbbb47SArd Biesheuvel 		data->ghcb_active = false;
139a3cbbb47SArd Biesheuvel 	}
140a3cbbb47SArd Biesheuvel }
141a3cbbb47SArd Biesheuvel 
142a3cbbb47SArd Biesheuvel int svsm_perform_call_protocol(struct svsm_call *call)
143a3cbbb47SArd Biesheuvel {
144a3cbbb47SArd Biesheuvel 	struct ghcb_state state;
145a3cbbb47SArd Biesheuvel 	unsigned long flags;
146a3cbbb47SArd Biesheuvel 	struct ghcb *ghcb;
147a3cbbb47SArd Biesheuvel 	int ret;
148a3cbbb47SArd Biesheuvel 
149a3cbbb47SArd Biesheuvel 	/*
150a3cbbb47SArd Biesheuvel 	 * This can be called very early in the boot, use native functions in
151a3cbbb47SArd Biesheuvel 	 * order to avoid paravirt issues.
152a3cbbb47SArd Biesheuvel 	 */
153a3cbbb47SArd Biesheuvel 	flags = native_local_irq_save();
154a3cbbb47SArd Biesheuvel 
155681e2901SArd Biesheuvel 	if (sev_cfg.ghcbs_initialized)
156a3cbbb47SArd Biesheuvel 		ghcb = __sev_get_ghcb(&state);
157681e2901SArd Biesheuvel 	else if (boot_ghcb)
158681e2901SArd Biesheuvel 		ghcb = boot_ghcb;
159a3cbbb47SArd Biesheuvel 	else
160a3cbbb47SArd Biesheuvel 		ghcb = NULL;
161a3cbbb47SArd Biesheuvel 
162a3cbbb47SArd Biesheuvel 	do {
163a3cbbb47SArd Biesheuvel 		ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
164a3cbbb47SArd Biesheuvel 			   : svsm_perform_msr_protocol(call);
165a3cbbb47SArd Biesheuvel 	} while (ret == -EAGAIN);
166a3cbbb47SArd Biesheuvel 
167681e2901SArd Biesheuvel 	if (sev_cfg.ghcbs_initialized)
168a3cbbb47SArd Biesheuvel 		__sev_put_ghcb(&state);
169a3cbbb47SArd Biesheuvel 
170a3cbbb47SArd Biesheuvel 	native_local_irq_restore(flags);
171a3cbbb47SArd Biesheuvel 
172a3cbbb47SArd Biesheuvel 	return ret;
173a3cbbb47SArd Biesheuvel }
174a3cbbb47SArd Biesheuvel 
175a3cbbb47SArd Biesheuvel void __head
176a3cbbb47SArd Biesheuvel early_set_pages_state(unsigned long vaddr, unsigned long paddr,
177a3cbbb47SArd Biesheuvel 		      unsigned long npages, enum psc_op op)
178a3cbbb47SArd Biesheuvel {
179a3cbbb47SArd Biesheuvel 	unsigned long paddr_end;
180a3cbbb47SArd Biesheuvel 	u64 val;
181a3cbbb47SArd Biesheuvel 
182a3cbbb47SArd Biesheuvel 	vaddr = vaddr & PAGE_MASK;
183a3cbbb47SArd Biesheuvel 
184a3cbbb47SArd Biesheuvel 	paddr = paddr & PAGE_MASK;
185a3cbbb47SArd Biesheuvel 	paddr_end = paddr + (npages << PAGE_SHIFT);
186a3cbbb47SArd Biesheuvel 
187a3cbbb47SArd Biesheuvel 	while (paddr < paddr_end) {
188a3cbbb47SArd Biesheuvel 		/* Page validation must be rescinded before changing to shared */
189a3cbbb47SArd Biesheuvel 		if (op == SNP_PAGE_STATE_SHARED)
190a3cbbb47SArd Biesheuvel 			pvalidate_4k_page(vaddr, paddr, false);
191a3cbbb47SArd Biesheuvel 
192a3cbbb47SArd Biesheuvel 		/*
193a3cbbb47SArd Biesheuvel 		 * Use the MSR protocol because this function can be called before
194a3cbbb47SArd Biesheuvel 		 * the GHCB is established.
195a3cbbb47SArd Biesheuvel 		 */
196a3cbbb47SArd Biesheuvel 		sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
197a3cbbb47SArd Biesheuvel 		VMGEXIT();
198a3cbbb47SArd Biesheuvel 
199a3cbbb47SArd Biesheuvel 		val = sev_es_rd_ghcb_msr();
200a3cbbb47SArd Biesheuvel 
201a3cbbb47SArd Biesheuvel 		if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
202a3cbbb47SArd Biesheuvel 			goto e_term;
203a3cbbb47SArd Biesheuvel 
204a3cbbb47SArd Biesheuvel 		if (GHCB_MSR_PSC_RESP_VAL(val))
205a3cbbb47SArd Biesheuvel 			goto e_term;
206a3cbbb47SArd Biesheuvel 
207a3cbbb47SArd Biesheuvel 		/* Page validation must be performed after changing to private */
208a3cbbb47SArd Biesheuvel 		if (op == SNP_PAGE_STATE_PRIVATE)
209a3cbbb47SArd Biesheuvel 			pvalidate_4k_page(vaddr, paddr, true);
210a3cbbb47SArd Biesheuvel 
211a3cbbb47SArd Biesheuvel 		vaddr += PAGE_SIZE;
212a3cbbb47SArd Biesheuvel 		paddr += PAGE_SIZE;
213a3cbbb47SArd Biesheuvel 	}
214a3cbbb47SArd Biesheuvel 
215a3cbbb47SArd Biesheuvel 	return;
216a3cbbb47SArd Biesheuvel 
217a3cbbb47SArd Biesheuvel e_term:
218a3cbbb47SArd Biesheuvel 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
219a3cbbb47SArd Biesheuvel }
220a3cbbb47SArd Biesheuvel 
221a3cbbb47SArd Biesheuvel void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
222a3cbbb47SArd Biesheuvel 					 unsigned long npages)
223a3cbbb47SArd Biesheuvel {
224a3cbbb47SArd Biesheuvel 	/*
225a3cbbb47SArd Biesheuvel 	 * This can be invoked in early boot while running identity mapped, so
226a3cbbb47SArd Biesheuvel 	 * use an open coded check for SNP instead of using cc_platform_has().
227a3cbbb47SArd Biesheuvel 	 * This eliminates worries about jump tables or checking boot_cpu_data
228a3cbbb47SArd Biesheuvel 	 * in the cc_platform_has() function.
229a3cbbb47SArd Biesheuvel 	 */
230681e2901SArd Biesheuvel 	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
231a3cbbb47SArd Biesheuvel 		return;
232a3cbbb47SArd Biesheuvel 
233a3cbbb47SArd Biesheuvel 	 /*
234a3cbbb47SArd Biesheuvel 	  * Ask the hypervisor to mark the memory pages as private in the RMP
235a3cbbb47SArd Biesheuvel 	  * table.
236a3cbbb47SArd Biesheuvel 	  */
237a3cbbb47SArd Biesheuvel 	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
238a3cbbb47SArd Biesheuvel }
239a3cbbb47SArd Biesheuvel 
240a3cbbb47SArd Biesheuvel void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
241a3cbbb47SArd Biesheuvel 					unsigned long npages)
242a3cbbb47SArd Biesheuvel {
243a3cbbb47SArd Biesheuvel 	/*
244a3cbbb47SArd Biesheuvel 	 * This can be invoked in early boot while running identity mapped, so
245a3cbbb47SArd Biesheuvel 	 * use an open coded check for SNP instead of using cc_platform_has().
246a3cbbb47SArd Biesheuvel 	 * This eliminates worries about jump tables or checking boot_cpu_data
247a3cbbb47SArd Biesheuvel 	 * in the cc_platform_has() function.
248a3cbbb47SArd Biesheuvel 	 */
249681e2901SArd Biesheuvel 	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
250a3cbbb47SArd Biesheuvel 		return;
251a3cbbb47SArd Biesheuvel 
252a3cbbb47SArd Biesheuvel 	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
253a3cbbb47SArd Biesheuvel 	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
254a3cbbb47SArd Biesheuvel }
255a3cbbb47SArd Biesheuvel 
256a3cbbb47SArd Biesheuvel /*
257a3cbbb47SArd Biesheuvel  * Initial set up of SNP relies on information provided by the
258a3cbbb47SArd Biesheuvel  * Confidential Computing blob, which can be passed to the kernel
259a3cbbb47SArd Biesheuvel  * in the following ways, depending on how it is booted:
260a3cbbb47SArd Biesheuvel  *
261a3cbbb47SArd Biesheuvel  * - when booted via the boot/decompress kernel:
262a3cbbb47SArd Biesheuvel  *   - via boot_params
263a3cbbb47SArd Biesheuvel  *
264a3cbbb47SArd Biesheuvel  * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
265a3cbbb47SArd Biesheuvel  *   - via a setup_data entry, as defined by the Linux Boot Protocol
266a3cbbb47SArd Biesheuvel  *
267a3cbbb47SArd Biesheuvel  * Scan for the blob in that order.
268a3cbbb47SArd Biesheuvel  */
269a3cbbb47SArd Biesheuvel static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
270a3cbbb47SArd Biesheuvel {
271a3cbbb47SArd Biesheuvel 	struct cc_blob_sev_info *cc_info;
272a3cbbb47SArd Biesheuvel 
273a3cbbb47SArd Biesheuvel 	/* Boot kernel would have passed the CC blob via boot_params. */
274a3cbbb47SArd Biesheuvel 	if (bp->cc_blob_address) {
275a3cbbb47SArd Biesheuvel 		cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
276a3cbbb47SArd Biesheuvel 		goto found_cc_info;
277a3cbbb47SArd Biesheuvel 	}
278a3cbbb47SArd Biesheuvel 
279a3cbbb47SArd Biesheuvel 	/*
280a3cbbb47SArd Biesheuvel 	 * If kernel was booted directly, without the use of the
281a3cbbb47SArd Biesheuvel 	 * boot/decompression kernel, the CC blob may have been passed via
282a3cbbb47SArd Biesheuvel 	 * setup_data instead.
283a3cbbb47SArd Biesheuvel 	 */
284a3cbbb47SArd Biesheuvel 	cc_info = find_cc_blob_setup_data(bp);
285a3cbbb47SArd Biesheuvel 	if (!cc_info)
286a3cbbb47SArd Biesheuvel 		return NULL;
287a3cbbb47SArd Biesheuvel 
288a3cbbb47SArd Biesheuvel found_cc_info:
289a3cbbb47SArd Biesheuvel 	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
290a3cbbb47SArd Biesheuvel 		snp_abort();
291a3cbbb47SArd Biesheuvel 
292a3cbbb47SArd Biesheuvel 	return cc_info;
293a3cbbb47SArd Biesheuvel }
294a3cbbb47SArd Biesheuvel 
295a3cbbb47SArd Biesheuvel static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
296a3cbbb47SArd Biesheuvel {
297a3cbbb47SArd Biesheuvel 	struct svsm_call call = {};
298a3cbbb47SArd Biesheuvel 	int ret;
299a3cbbb47SArd Biesheuvel 	u64 pa;
300a3cbbb47SArd Biesheuvel 
301a3cbbb47SArd Biesheuvel 	/*
302a3cbbb47SArd Biesheuvel 	 * Record the SVSM Calling Area address (CAA) if the guest is not
303a3cbbb47SArd Biesheuvel 	 * running at VMPL0. The CA will be used to communicate with the
304a3cbbb47SArd Biesheuvel 	 * SVSM to perform the SVSM services.
305a3cbbb47SArd Biesheuvel 	 */
306a3cbbb47SArd Biesheuvel 	if (!svsm_setup_ca(cc_info))
307a3cbbb47SArd Biesheuvel 		return;
308a3cbbb47SArd Biesheuvel 
309a3cbbb47SArd Biesheuvel 	/*
310a3cbbb47SArd Biesheuvel 	 * It is very early in the boot and the kernel is running identity
311a3cbbb47SArd Biesheuvel 	 * mapped but without having adjusted the pagetables to where the
312a3cbbb47SArd Biesheuvel 	 * kernel was loaded (physbase), so the get the CA address using
313a3cbbb47SArd Biesheuvel 	 * RIP-relative addressing.
314a3cbbb47SArd Biesheuvel 	 */
315a3cbbb47SArd Biesheuvel 	pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
316a3cbbb47SArd Biesheuvel 
317a3cbbb47SArd Biesheuvel 	/*
318a3cbbb47SArd Biesheuvel 	 * Switch over to the boot SVSM CA while the current CA is still
319a3cbbb47SArd Biesheuvel 	 * addressable. There is no GHCB at this point so use the MSR protocol.
320a3cbbb47SArd Biesheuvel 	 *
321a3cbbb47SArd Biesheuvel 	 * SVSM_CORE_REMAP_CA call:
322a3cbbb47SArd Biesheuvel 	 *   RAX = 0 (Protocol=0, CallID=0)
323a3cbbb47SArd Biesheuvel 	 *   RCX = New CA GPA
324a3cbbb47SArd Biesheuvel 	 */
325a3cbbb47SArd Biesheuvel 	call.caa = svsm_get_caa();
326a3cbbb47SArd Biesheuvel 	call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
327a3cbbb47SArd Biesheuvel 	call.rcx = pa;
328a3cbbb47SArd Biesheuvel 	ret = svsm_perform_call_protocol(&call);
329a3cbbb47SArd Biesheuvel 	if (ret)
330a3cbbb47SArd Biesheuvel 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
331a3cbbb47SArd Biesheuvel 
332681e2901SArd Biesheuvel 	boot_svsm_caa = (struct svsm_ca *)pa;
333681e2901SArd Biesheuvel 	boot_svsm_caa_pa = pa;
334a3cbbb47SArd Biesheuvel }
335a3cbbb47SArd Biesheuvel 
336a3cbbb47SArd Biesheuvel bool __head snp_init(struct boot_params *bp)
337a3cbbb47SArd Biesheuvel {
338a3cbbb47SArd Biesheuvel 	struct cc_blob_sev_info *cc_info;
339a3cbbb47SArd Biesheuvel 
340a3cbbb47SArd Biesheuvel 	if (!bp)
341a3cbbb47SArd Biesheuvel 		return false;
342a3cbbb47SArd Biesheuvel 
343a3cbbb47SArd Biesheuvel 	cc_info = find_cc_blob(bp);
344a3cbbb47SArd Biesheuvel 	if (!cc_info)
345a3cbbb47SArd Biesheuvel 		return false;
346a3cbbb47SArd Biesheuvel 
347a3cbbb47SArd Biesheuvel 	if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
34818ea89eaSTom Lendacky 		sev_secrets_pa = cc_info->secrets_phys;
349a3cbbb47SArd Biesheuvel 	else
350a3cbbb47SArd Biesheuvel 		return false;
351a3cbbb47SArd Biesheuvel 
352a3cbbb47SArd Biesheuvel 	setup_cpuid_table(cc_info);
353a3cbbb47SArd Biesheuvel 
354a3cbbb47SArd Biesheuvel 	svsm_setup(cc_info);
355a3cbbb47SArd Biesheuvel 
356a3cbbb47SArd Biesheuvel 	/*
357a3cbbb47SArd Biesheuvel 	 * The CC blob will be used later to access the secrets page. Cache
358a3cbbb47SArd Biesheuvel 	 * it here like the boot kernel does.
359a3cbbb47SArd Biesheuvel 	 */
360a3cbbb47SArd Biesheuvel 	bp->cc_blob_address = (u32)(unsigned long)cc_info;
361a3cbbb47SArd Biesheuvel 
362a3cbbb47SArd Biesheuvel 	return true;
363a3cbbb47SArd Biesheuvel }
364a3cbbb47SArd Biesheuvel 
365a3cbbb47SArd Biesheuvel void __head __noreturn snp_abort(void)
366a3cbbb47SArd Biesheuvel {
367a3cbbb47SArd Biesheuvel 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
368a3cbbb47SArd Biesheuvel }
369