xref: /linux/arch/x86/coco/sev/core.c (revision 2f924ca36d2f788d40a57ea48825ff51cba4e700)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Memory Encryption Support
4  *
5  * Copyright (C) 2019 SUSE
6  *
7  * Author: Joerg Roedel <jroedel@suse.de>
8  */
9 
10 #define pr_fmt(fmt)	"SEV: " fmt
11 
12 #include <linux/sched/debug.h>	/* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/cc_platform.h>
15 #include <linux/printk.h>
16 #include <linux/mm_types.h>
17 #include <linux/set_memory.h>
18 #include <linux/memblock.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/efi.h>
23 #include <linux/platform_device.h>
24 #include <linux/io.h>
25 #include <linux/psp-sev.h>
26 #include <linux/dmi.h>
27 #include <uapi/linux/sev-guest.h>
28 #include <crypto/gcm.h>
29 
30 #include <asm/init.h>
31 #include <asm/cpu_entry_area.h>
32 #include <asm/stacktrace.h>
33 #include <asm/sev.h>
34 #include <asm/sev-internal.h>
35 #include <asm/insn-eval.h>
36 #include <asm/fpu/xcr.h>
37 #include <asm/processor.h>
38 #include <asm/realmode.h>
39 #include <asm/setup.h>
40 #include <asm/traps.h>
41 #include <asm/svm.h>
42 #include <asm/smp.h>
43 #include <asm/cpu.h>
44 #include <asm/apic.h>
45 #include <asm/cpuid/api.h>
46 #include <asm/cmdline.h>
47 #include <asm/msr.h>
48 
49 /* AP INIT values as documented in the APM2  section "Processor Initialization State" */
50 #define AP_INIT_CS_LIMIT		0xffff
51 #define AP_INIT_DS_LIMIT		0xffff
52 #define AP_INIT_LDTR_LIMIT		0xffff
53 #define AP_INIT_GDTR_LIMIT		0xffff
54 #define AP_INIT_IDTR_LIMIT		0xffff
55 #define AP_INIT_TR_LIMIT		0xffff
56 #define AP_INIT_RFLAGS_DEFAULT		0x2
57 #define AP_INIT_DR6_DEFAULT		0xffff0ff0
58 #define AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
59 #define AP_INIT_XCR0_DEFAULT		0x1
60 #define AP_INIT_X87_FTW_DEFAULT		0x5555
61 #define AP_INIT_X87_FCW_DEFAULT		0x0040
62 #define AP_INIT_CR0_DEFAULT		0x60000010
63 #define AP_INIT_MXCSR_DEFAULT		0x1f80
64 
65 static const char * const sev_status_feat_names[] = {
66 	[MSR_AMD64_SEV_ENABLED_BIT]		= "SEV",
67 	[MSR_AMD64_SEV_ES_ENABLED_BIT]		= "SEV-ES",
68 	[MSR_AMD64_SEV_SNP_ENABLED_BIT]		= "SEV-SNP",
69 	[MSR_AMD64_SNP_VTOM_BIT]		= "vTom",
70 	[MSR_AMD64_SNP_REFLECT_VC_BIT]		= "ReflectVC",
71 	[MSR_AMD64_SNP_RESTRICTED_INJ_BIT]	= "RI",
72 	[MSR_AMD64_SNP_ALT_INJ_BIT]		= "AI",
73 	[MSR_AMD64_SNP_DEBUG_SWAP_BIT]		= "DebugSwap",
74 	[MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT]	= "NoHostIBS",
75 	[MSR_AMD64_SNP_BTB_ISOLATION_BIT]	= "BTBIsol",
76 	[MSR_AMD64_SNP_VMPL_SSS_BIT]		= "VmplSSS",
77 	[MSR_AMD64_SNP_SECURE_TSC_BIT]		= "SecureTSC",
78 	[MSR_AMD64_SNP_VMGEXIT_PARAM_BIT]	= "VMGExitParam",
79 	[MSR_AMD64_SNP_IBS_VIRT_BIT]		= "IBSVirt",
80 	[MSR_AMD64_SNP_VMSA_REG_PROT_BIT]	= "VMSARegProt",
81 	[MSR_AMD64_SNP_SMT_PROT_BIT]		= "SMTProt",
82 };
83 
84 /*
85  * For Secure TSC guests, the BSP fetches TSC_INFO using SNP guest messaging and
86  * initializes snp_tsc_scale and snp_tsc_offset. These values are replicated
87  * across the APs VMSA fields (TSC_SCALE and TSC_OFFSET).
88  */
89 static u64 snp_tsc_scale __ro_after_init;
90 static u64 snp_tsc_offset __ro_after_init;
91 static u64 snp_tsc_freq_khz __ro_after_init;
92 
93 DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
94 DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
95 
96 /*
97  * SVSM related information:
98  *   When running under an SVSM, the VMPL that Linux is executing at must be
99  *   non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
100  */
101 u8 snp_vmpl __ro_after_init;
102 EXPORT_SYMBOL_GPL(snp_vmpl);
103 
104 static u64 __init get_snp_jump_table_addr(void)
105 {
106 	struct snp_secrets_page *secrets;
107 	void __iomem *mem;
108 	u64 addr;
109 
110 	mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
111 	if (!mem) {
112 		pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
113 		return 0;
114 	}
115 
116 	secrets = (__force struct snp_secrets_page *)mem;
117 
118 	addr = secrets->os_area.ap_jump_table_pa;
119 	iounmap(mem);
120 
121 	return addr;
122 }
123 
124 static u64 __init get_jump_table_addr(void)
125 {
126 	struct ghcb_state state;
127 	unsigned long flags;
128 	struct ghcb *ghcb;
129 	u64 ret = 0;
130 
131 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
132 		return get_snp_jump_table_addr();
133 
134 	local_irq_save(flags);
135 
136 	ghcb = __sev_get_ghcb(&state);
137 
138 	vc_ghcb_invalidate(ghcb);
139 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
140 	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
141 	ghcb_set_sw_exit_info_2(ghcb, 0);
142 
143 	sev_es_wr_ghcb_msr(__pa(ghcb));
144 	VMGEXIT();
145 
146 	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
147 	    ghcb_sw_exit_info_2_is_valid(ghcb))
148 		ret = ghcb->save.sw_exit_info_2;
149 
150 	__sev_put_ghcb(&state);
151 
152 	local_irq_restore(flags);
153 
154 	return ret;
155 }
156 
157 static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
158 				    int ret, u64 svsm_ret)
159 {
160 	WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
161 	     pfn, action, page_size, ret, svsm_ret);
162 
163 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
164 }
165 
166 static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
167 {
168 	unsigned int page_size;
169 	bool action;
170 	u64 pfn;
171 
172 	pfn = pc->entry[pc->cur_index].pfn;
173 	action = pc->entry[pc->cur_index].action;
174 	page_size = pc->entry[pc->cur_index].page_size;
175 
176 	__pval_terminate(pfn, action, page_size, ret, svsm_ret);
177 }
178 
179 static void pval_pages(struct snp_psc_desc *desc)
180 {
181 	struct psc_entry *e;
182 	unsigned long vaddr;
183 	unsigned int size;
184 	unsigned int i;
185 	bool validate;
186 	u64 pfn;
187 	int rc;
188 
189 	for (i = 0; i <= desc->hdr.end_entry; i++) {
190 		e = &desc->entries[i];
191 
192 		pfn = e->gfn;
193 		vaddr = (unsigned long)pfn_to_kaddr(pfn);
194 		size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
195 		validate = e->operation == SNP_PAGE_STATE_PRIVATE;
196 
197 		rc = pvalidate(vaddr, size, validate);
198 		if (!rc)
199 			continue;
200 
201 		if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
202 			unsigned long vaddr_end = vaddr + PMD_SIZE;
203 
204 			for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
205 				rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
206 				if (rc)
207 					__pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
208 			}
209 		} else {
210 			__pval_terminate(pfn, validate, size, rc, 0);
211 		}
212 	}
213 }
214 
215 static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
216 					struct svsm_pvalidate_call *pc)
217 {
218 	struct svsm_pvalidate_entry *pe;
219 
220 	/* Nothing in the CA yet */
221 	pc->num_entries = 0;
222 	pc->cur_index   = 0;
223 
224 	pe = &pc->entry[0];
225 
226 	while (pfn < pfn_end) {
227 		pe->page_size = RMP_PG_SIZE_4K;
228 		pe->action    = action;
229 		pe->ignore_cf = 0;
230 		pe->pfn       = pfn;
231 
232 		pe++;
233 		pfn++;
234 
235 		pc->num_entries++;
236 		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
237 			break;
238 	}
239 
240 	return pfn;
241 }
242 
243 static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
244 				       struct svsm_pvalidate_call *pc)
245 {
246 	struct svsm_pvalidate_entry *pe;
247 	struct psc_entry *e;
248 
249 	/* Nothing in the CA yet */
250 	pc->num_entries = 0;
251 	pc->cur_index   = 0;
252 
253 	pe = &pc->entry[0];
254 	e  = &desc->entries[desc_entry];
255 
256 	while (desc_entry <= desc->hdr.end_entry) {
257 		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
258 		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
259 		pe->ignore_cf = 0;
260 		pe->pfn       = e->gfn;
261 
262 		pe++;
263 		e++;
264 
265 		desc_entry++;
266 		pc->num_entries++;
267 		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
268 			break;
269 	}
270 
271 	return desc_entry;
272 }
273 
274 static void svsm_pval_pages(struct snp_psc_desc *desc)
275 {
276 	struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
277 	unsigned int i, pv_4k_count = 0;
278 	struct svsm_pvalidate_call *pc;
279 	struct svsm_call call = {};
280 	unsigned long flags;
281 	bool action;
282 	u64 pc_pa;
283 	int ret;
284 
285 	/*
286 	 * This can be called very early in the boot, use native functions in
287 	 * order to avoid paravirt issues.
288 	 */
289 	flags = native_local_irq_save();
290 
291 	/*
292 	 * The SVSM calling area (CA) can support processing 510 entries at a
293 	 * time. Loop through the Page State Change descriptor until the CA is
294 	 * full or the last entry in the descriptor is reached, at which time
295 	 * the SVSM is invoked. This repeats until all entries in the descriptor
296 	 * are processed.
297 	 */
298 	call.caa = svsm_get_caa();
299 
300 	pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
301 	pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
302 
303 	/* Protocol 0, Call ID 1 */
304 	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
305 	call.rcx = pc_pa;
306 
307 	for (i = 0; i <= desc->hdr.end_entry;) {
308 		i = svsm_build_ca_from_psc_desc(desc, i, pc);
309 
310 		do {
311 			ret = svsm_perform_call_protocol(&call);
312 			if (!ret)
313 				continue;
314 
315 			/*
316 			 * Check if the entry failed because of an RMP mismatch (a
317 			 * PVALIDATE at 2M was requested, but the page is mapped in
318 			 * the RMP as 4K).
319 			 */
320 
321 			if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
322 			    pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
323 				/* Save this entry for post-processing at 4K */
324 				pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
325 
326 				/* Skip to the next one unless at the end of the list */
327 				pc->cur_index++;
328 				if (pc->cur_index < pc->num_entries)
329 					ret = -EAGAIN;
330 				else
331 					ret = 0;
332 			}
333 		} while (ret == -EAGAIN);
334 
335 		if (ret)
336 			svsm_pval_terminate(pc, ret, call.rax_out);
337 	}
338 
339 	/* Process any entries that failed to be validated at 2M and validate them at 4K */
340 	for (i = 0; i < pv_4k_count; i++) {
341 		u64 pfn, pfn_end;
342 
343 		action  = pv_4k[i].action;
344 		pfn     = pv_4k[i].pfn;
345 		pfn_end = pfn + 512;
346 
347 		while (pfn < pfn_end) {
348 			pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
349 
350 			ret = svsm_perform_call_protocol(&call);
351 			if (ret)
352 				svsm_pval_terminate(pc, ret, call.rax_out);
353 		}
354 	}
355 
356 	native_local_irq_restore(flags);
357 }
358 
359 static void pvalidate_pages(struct snp_psc_desc *desc)
360 {
361 	if (snp_vmpl)
362 		svsm_pval_pages(desc);
363 	else
364 		pval_pages(desc);
365 }
366 
367 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
368 {
369 	int cur_entry, end_entry, ret = 0;
370 	struct snp_psc_desc *data;
371 	struct es_em_ctxt ctxt;
372 
373 	vc_ghcb_invalidate(ghcb);
374 
375 	/* Copy the input desc into GHCB shared buffer */
376 	data = (struct snp_psc_desc *)ghcb->shared_buffer;
377 	memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
378 
379 	/*
380 	 * As per the GHCB specification, the hypervisor can resume the guest
381 	 * before processing all the entries. Check whether all the entries
382 	 * are processed. If not, then keep retrying. Note, the hypervisor
383 	 * will update the data memory directly to indicate the status, so
384 	 * reference the data->hdr everywhere.
385 	 *
386 	 * The strategy here is to wait for the hypervisor to change the page
387 	 * state in the RMP table before guest accesses the memory pages. If the
388 	 * page state change was not successful, then later memory access will
389 	 * result in a crash.
390 	 */
391 	cur_entry = data->hdr.cur_entry;
392 	end_entry = data->hdr.end_entry;
393 
394 	while (data->hdr.cur_entry <= data->hdr.end_entry) {
395 		ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
396 
397 		/* This will advance the shared buffer data points to. */
398 		ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
399 
400 		/*
401 		 * Page State Change VMGEXIT can pass error code through
402 		 * exit_info_2.
403 		 */
404 		if (WARN(ret || ghcb->save.sw_exit_info_2,
405 			 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
406 			 ret, ghcb->save.sw_exit_info_2)) {
407 			ret = 1;
408 			goto out;
409 		}
410 
411 		/* Verify that reserved bit is not set */
412 		if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
413 			ret = 1;
414 			goto out;
415 		}
416 
417 		/*
418 		 * Sanity check that entry processing is not going backwards.
419 		 * This will happen only if hypervisor is tricking us.
420 		 */
421 		if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
422 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
423 			 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
424 			ret = 1;
425 			goto out;
426 		}
427 	}
428 
429 out:
430 	return ret;
431 }
432 
433 static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
434 				       unsigned long vaddr_end, int op)
435 {
436 	struct ghcb_state state;
437 	bool use_large_entry;
438 	struct psc_hdr *hdr;
439 	struct psc_entry *e;
440 	unsigned long flags;
441 	unsigned long pfn;
442 	struct ghcb *ghcb;
443 	int i;
444 
445 	hdr = &data->hdr;
446 	e = data->entries;
447 
448 	memset(data, 0, sizeof(*data));
449 	i = 0;
450 
451 	while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
452 		hdr->end_entry = i;
453 
454 		if (is_vmalloc_addr((void *)vaddr)) {
455 			pfn = vmalloc_to_pfn((void *)vaddr);
456 			use_large_entry = false;
457 		} else {
458 			pfn = __pa(vaddr) >> PAGE_SHIFT;
459 			use_large_entry = true;
460 		}
461 
462 		e->gfn = pfn;
463 		e->operation = op;
464 
465 		if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
466 		    (vaddr_end - vaddr) >= PMD_SIZE) {
467 			e->pagesize = RMP_PG_SIZE_2M;
468 			vaddr += PMD_SIZE;
469 		} else {
470 			e->pagesize = RMP_PG_SIZE_4K;
471 			vaddr += PAGE_SIZE;
472 		}
473 
474 		e++;
475 		i++;
476 	}
477 
478 	/* Page validation must be rescinded before changing to shared */
479 	if (op == SNP_PAGE_STATE_SHARED)
480 		pvalidate_pages(data);
481 
482 	local_irq_save(flags);
483 
484 	if (sev_cfg.ghcbs_initialized)
485 		ghcb = __sev_get_ghcb(&state);
486 	else
487 		ghcb = boot_ghcb;
488 
489 	/* Invoke the hypervisor to perform the page state changes */
490 	if (!ghcb || vmgexit_psc(ghcb, data))
491 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
492 
493 	if (sev_cfg.ghcbs_initialized)
494 		__sev_put_ghcb(&state);
495 
496 	local_irq_restore(flags);
497 
498 	/* Page validation must be performed after changing to private */
499 	if (op == SNP_PAGE_STATE_PRIVATE)
500 		pvalidate_pages(data);
501 
502 	return vaddr;
503 }
504 
505 static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
506 {
507 	struct snp_psc_desc desc;
508 	unsigned long vaddr_end;
509 
510 	/* Use the MSR protocol when a GHCB is not available. */
511 	if (!boot_ghcb)
512 		return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
513 
514 	vaddr = vaddr & PAGE_MASK;
515 	vaddr_end = vaddr + (npages << PAGE_SHIFT);
516 
517 	while (vaddr < vaddr_end)
518 		vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
519 }
520 
521 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
522 {
523 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
524 		return;
525 
526 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
527 }
528 
529 void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
530 {
531 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
532 		return;
533 
534 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
535 }
536 
537 void snp_accept_memory(phys_addr_t start, phys_addr_t end)
538 {
539 	unsigned long vaddr, npages;
540 
541 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
542 		return;
543 
544 	vaddr = (unsigned long)__va(start);
545 	npages = (end - start) >> PAGE_SHIFT;
546 
547 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
548 }
549 
550 static void set_pte_enc(pte_t *kpte, int level, void *va)
551 {
552 	struct pte_enc_desc d = {
553 		.kpte	   = kpte,
554 		.pte_level = level,
555 		.va	   = va,
556 		.encrypt   = true
557 	};
558 
559 	prepare_pte_enc(&d);
560 	set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
561 }
562 
563 static void unshare_all_memory(void)
564 {
565 	unsigned long addr, end, size, ghcb;
566 	struct sev_es_runtime_data *data;
567 	unsigned int npages, level;
568 	bool skipped_addr;
569 	pte_t *pte;
570 	int cpu;
571 
572 	/* Unshare the direct mapping. */
573 	addr = PAGE_OFFSET;
574 	end  = PAGE_OFFSET + get_max_mapped();
575 
576 	while (addr < end) {
577 		pte = lookup_address(addr, &level);
578 		size = page_level_size(level);
579 		npages = size / PAGE_SIZE;
580 		skipped_addr = false;
581 
582 		if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) {
583 			addr += size;
584 			continue;
585 		}
586 
587 		/*
588 		 * Ensure that all the per-CPU GHCBs are made private at the
589 		 * end of the unsharing loop so that the switch to the slower
590 		 * MSR protocol happens last.
591 		 */
592 		for_each_possible_cpu(cpu) {
593 			data = per_cpu(runtime_data, cpu);
594 			ghcb = (unsigned long)&data->ghcb_page;
595 
596 			if (addr <= ghcb && ghcb <= addr + size) {
597 				skipped_addr = true;
598 				break;
599 			}
600 		}
601 
602 		if (!skipped_addr) {
603 			set_pte_enc(pte, level, (void *)addr);
604 			snp_set_memory_private(addr, npages);
605 		}
606 		addr += size;
607 	}
608 
609 	/* Unshare all bss decrypted memory. */
610 	addr = (unsigned long)__start_bss_decrypted;
611 	end  = (unsigned long)__start_bss_decrypted_unused;
612 	npages = (end - addr) >> PAGE_SHIFT;
613 
614 	for (; addr < end; addr += PAGE_SIZE) {
615 		pte = lookup_address(addr, &level);
616 		if (!pte || !pte_decrypted(*pte) || pte_none(*pte))
617 			continue;
618 
619 		set_pte_enc(pte, level, (void *)addr);
620 	}
621 	addr = (unsigned long)__start_bss_decrypted;
622 	snp_set_memory_private(addr, npages);
623 
624 	__flush_tlb_all();
625 }
626 
627 /* Stop new private<->shared conversions */
628 void snp_kexec_begin(void)
629 {
630 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
631 		return;
632 
633 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
634 		return;
635 
636 	/*
637 	 * Crash kernel ends up here with interrupts disabled: can't wait for
638 	 * conversions to finish.
639 	 *
640 	 * If race happened, just report and proceed.
641 	 */
642 	if (!set_memory_enc_stop_conversion())
643 		pr_warn("Failed to stop shared<->private conversions\n");
644 }
645 
646 void snp_kexec_finish(void)
647 {
648 	struct sev_es_runtime_data *data;
649 	unsigned int level, cpu;
650 	unsigned long size;
651 	struct ghcb *ghcb;
652 	pte_t *pte;
653 
654 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
655 		return;
656 
657 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
658 		return;
659 
660 	unshare_all_memory();
661 
662 	/*
663 	 * Switch to using the MSR protocol to change per-CPU GHCBs to
664 	 * private. All the per-CPU GHCBs have been switched back to private,
665 	 * so can't do any more GHCB calls to the hypervisor beyond this point
666 	 * until the kexec'ed kernel starts running.
667 	 */
668 	boot_ghcb = NULL;
669 	sev_cfg.ghcbs_initialized = false;
670 
671 	for_each_possible_cpu(cpu) {
672 		data = per_cpu(runtime_data, cpu);
673 		ghcb = &data->ghcb_page;
674 		pte = lookup_address((unsigned long)ghcb, &level);
675 		size = page_level_size(level);
676 		set_pte_enc(pte, level, (void *)ghcb);
677 		snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE));
678 	}
679 }
680 
681 static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
682 {
683 	int ret;
684 
685 	if (snp_vmpl) {
686 		struct svsm_call call = {};
687 		unsigned long flags;
688 
689 		local_irq_save(flags);
690 
691 		call.caa = this_cpu_read(svsm_caa);
692 		call.rcx = __pa(va);
693 
694 		if (make_vmsa) {
695 			/* Protocol 0, Call ID 2 */
696 			call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
697 			call.rdx = __pa(caa);
698 			call.r8  = apic_id;
699 		} else {
700 			/* Protocol 0, Call ID 3 */
701 			call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
702 		}
703 
704 		ret = svsm_perform_call_protocol(&call);
705 
706 		local_irq_restore(flags);
707 	} else {
708 		/*
709 		 * If the kernel runs at VMPL0, it can change the VMSA
710 		 * bit for a page using the RMPADJUST instruction.
711 		 * However, for the instruction to succeed it must
712 		 * target the permissions of a lesser privileged (higher
713 		 * numbered) VMPL level, so use VMPL1.
714 		 */
715 		u64 attrs = 1;
716 
717 		if (make_vmsa)
718 			attrs |= RMPADJUST_VMSA_PAGE_BIT;
719 
720 		ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
721 	}
722 
723 	return ret;
724 }
725 
726 #define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
727 #define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
728 #define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
729 
730 #define INIT_LDTR_ATTRIBS	(SVM_SELECTOR_P_MASK | 2)
731 #define INIT_TR_ATTRIBS		(SVM_SELECTOR_P_MASK | 3)
732 
733 static void *snp_alloc_vmsa_page(int cpu)
734 {
735 	struct page *p;
736 
737 	/*
738 	 * Allocate VMSA page to work around the SNP erratum where the CPU will
739 	 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
740 	 * collides with the RMP entry of VMSA page. The recommended workaround
741 	 * is to not use a large page.
742 	 *
743 	 * Allocate an 8k page which is also 8k-aligned.
744 	 */
745 	p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
746 	if (!p)
747 		return NULL;
748 
749 	split_page(p, 1);
750 
751 	/* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
752 	__free_page(p);
753 
754 	return page_address(p + 1);
755 }
756 
757 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
758 {
759 	int err;
760 
761 	err = snp_set_vmsa(vmsa, NULL, apic_id, false);
762 	if (err)
763 		pr_err("clear VMSA page failed (%u), leaking page\n", err);
764 	else
765 		free_page((unsigned long)vmsa);
766 }
767 
768 static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
769 {
770 	struct sev_es_save_area *cur_vmsa, *vmsa;
771 	struct ghcb_state state;
772 	struct svsm_ca *caa;
773 	unsigned long flags;
774 	struct ghcb *ghcb;
775 	u8 sipi_vector;
776 	int cpu, ret;
777 	u64 cr4;
778 
779 	/*
780 	 * The hypervisor SNP feature support check has happened earlier, just check
781 	 * the AP_CREATION one here.
782 	 */
783 	if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
784 		return -EOPNOTSUPP;
785 
786 	/*
787 	 * Verify the desired start IP against the known trampoline start IP
788 	 * to catch any future new trampolines that may be introduced that
789 	 * would require a new protected guest entry point.
790 	 */
791 	if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
792 		      "Unsupported SNP start_ip: %lx\n", start_ip))
793 		return -EINVAL;
794 
795 	/* Override start_ip with known protected guest start IP */
796 	start_ip = real_mode_header->sev_es_trampoline_start;
797 
798 	/* Find the logical CPU for the APIC ID */
799 	for_each_present_cpu(cpu) {
800 		if (arch_match_cpu_phys_id(cpu, apic_id))
801 			break;
802 	}
803 	if (cpu >= nr_cpu_ids)
804 		return -EINVAL;
805 
806 	cur_vmsa = per_cpu(sev_vmsa, cpu);
807 
808 	/*
809 	 * A new VMSA is created each time because there is no guarantee that
810 	 * the current VMSA is the kernels or that the vCPU is not running. If
811 	 * an attempt was done to use the current VMSA with a running vCPU, a
812 	 * #VMEXIT of that vCPU would wipe out all of the settings being done
813 	 * here.
814 	 */
815 	vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu);
816 	if (!vmsa)
817 		return -ENOMEM;
818 
819 	/* If an SVSM is present, the SVSM per-CPU CAA will be !NULL */
820 	caa = per_cpu(svsm_caa, cpu);
821 
822 	/* CR4 should maintain the MCE value */
823 	cr4 = native_read_cr4() & X86_CR4_MCE;
824 
825 	/* Set the CS value based on the start_ip converted to a SIPI vector */
826 	sipi_vector		= (start_ip >> 12);
827 	vmsa->cs.base		= sipi_vector << 12;
828 	vmsa->cs.limit		= AP_INIT_CS_LIMIT;
829 	vmsa->cs.attrib		= INIT_CS_ATTRIBS;
830 	vmsa->cs.selector	= sipi_vector << 8;
831 
832 	/* Set the RIP value based on start_ip */
833 	vmsa->rip		= start_ip & 0xfff;
834 
835 	/* Set AP INIT defaults as documented in the APM */
836 	vmsa->ds.limit		= AP_INIT_DS_LIMIT;
837 	vmsa->ds.attrib		= INIT_DS_ATTRIBS;
838 	vmsa->es		= vmsa->ds;
839 	vmsa->fs		= vmsa->ds;
840 	vmsa->gs		= vmsa->ds;
841 	vmsa->ss		= vmsa->ds;
842 
843 	vmsa->gdtr.limit	= AP_INIT_GDTR_LIMIT;
844 	vmsa->ldtr.limit	= AP_INIT_LDTR_LIMIT;
845 	vmsa->ldtr.attrib	= INIT_LDTR_ATTRIBS;
846 	vmsa->idtr.limit	= AP_INIT_IDTR_LIMIT;
847 	vmsa->tr.limit		= AP_INIT_TR_LIMIT;
848 	vmsa->tr.attrib		= INIT_TR_ATTRIBS;
849 
850 	vmsa->cr4		= cr4;
851 	vmsa->cr0		= AP_INIT_CR0_DEFAULT;
852 	vmsa->dr7		= DR7_RESET_VALUE;
853 	vmsa->dr6		= AP_INIT_DR6_DEFAULT;
854 	vmsa->rflags		= AP_INIT_RFLAGS_DEFAULT;
855 	vmsa->g_pat		= AP_INIT_GPAT_DEFAULT;
856 	vmsa->xcr0		= AP_INIT_XCR0_DEFAULT;
857 	vmsa->mxcsr		= AP_INIT_MXCSR_DEFAULT;
858 	vmsa->x87_ftw		= AP_INIT_X87_FTW_DEFAULT;
859 	vmsa->x87_fcw		= AP_INIT_X87_FCW_DEFAULT;
860 
861 	/* SVME must be set. */
862 	vmsa->efer		= EFER_SVME;
863 
864 	/*
865 	 * Set the SNP-specific fields for this VMSA:
866 	 *   VMPL level
867 	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
868 	 */
869 	vmsa->vmpl		= snp_vmpl;
870 	vmsa->sev_features	= sev_status >> 2;
871 
872 	/* Populate AP's TSC scale/offset to get accurate TSC values. */
873 	if (cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) {
874 		vmsa->tsc_scale = snp_tsc_scale;
875 		vmsa->tsc_offset = snp_tsc_offset;
876 	}
877 
878 	/* Switch the page over to a VMSA page now that it is initialized */
879 	ret = snp_set_vmsa(vmsa, caa, apic_id, true);
880 	if (ret) {
881 		pr_err("set VMSA page failed (%u)\n", ret);
882 		free_page((unsigned long)vmsa);
883 
884 		return -EINVAL;
885 	}
886 
887 	/* Issue VMGEXIT AP Creation NAE event */
888 	local_irq_save(flags);
889 
890 	ghcb = __sev_get_ghcb(&state);
891 
892 	vc_ghcb_invalidate(ghcb);
893 	ghcb_set_rax(ghcb, vmsa->sev_features);
894 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
895 	ghcb_set_sw_exit_info_1(ghcb,
896 				((u64)apic_id << 32)	|
897 				((u64)snp_vmpl << 16)	|
898 				SVM_VMGEXIT_AP_CREATE);
899 	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
900 
901 	sev_es_wr_ghcb_msr(__pa(ghcb));
902 	VMGEXIT();
903 
904 	if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
905 	    lower_32_bits(ghcb->save.sw_exit_info_1)) {
906 		pr_err("SNP AP Creation error\n");
907 		ret = -EINVAL;
908 	}
909 
910 	__sev_put_ghcb(&state);
911 
912 	local_irq_restore(flags);
913 
914 	/* Perform cleanup if there was an error */
915 	if (ret) {
916 		snp_cleanup_vmsa(vmsa, apic_id);
917 		vmsa = NULL;
918 	}
919 
920 	/* Free up any previous VMSA page */
921 	if (cur_vmsa)
922 		snp_cleanup_vmsa(cur_vmsa, apic_id);
923 
924 	/* Record the current VMSA page */
925 	per_cpu(sev_vmsa, cpu) = vmsa;
926 
927 	return ret;
928 }
929 
930 void __init snp_set_wakeup_secondary_cpu(void)
931 {
932 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
933 		return;
934 
935 	/*
936 	 * Always set this override if SNP is enabled. This makes it the
937 	 * required method to start APs under SNP. If the hypervisor does
938 	 * not support AP creation, then no APs will be started.
939 	 */
940 	apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit);
941 }
942 
943 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
944 {
945 	u16 startup_cs, startup_ip;
946 	phys_addr_t jump_table_pa;
947 	u64 jump_table_addr;
948 	u16 __iomem *jump_table;
949 
950 	jump_table_addr = get_jump_table_addr();
951 
952 	/* On UP guests there is no jump table so this is not a failure */
953 	if (!jump_table_addr)
954 		return 0;
955 
956 	/* Check if AP Jump Table is page-aligned */
957 	if (jump_table_addr & ~PAGE_MASK)
958 		return -EINVAL;
959 
960 	jump_table_pa = jump_table_addr & PAGE_MASK;
961 
962 	startup_cs = (u16)(rmh->trampoline_start >> 4);
963 	startup_ip = (u16)(rmh->sev_es_trampoline_start -
964 			   rmh->trampoline_start);
965 
966 	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
967 	if (!jump_table)
968 		return -EIO;
969 
970 	writew(startup_ip, &jump_table[0]);
971 	writew(startup_cs, &jump_table[1]);
972 
973 	iounmap(jump_table);
974 
975 	return 0;
976 }
977 
978 /*
979  * This is needed by the OVMF UEFI firmware which will use whatever it finds in
980  * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
981  * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
982  */
983 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
984 {
985 	struct sev_es_runtime_data *data;
986 	unsigned long address, pflags;
987 	int cpu;
988 	u64 pfn;
989 
990 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
991 		return 0;
992 
993 	pflags = _PAGE_NX | _PAGE_RW;
994 
995 	for_each_possible_cpu(cpu) {
996 		data = per_cpu(runtime_data, cpu);
997 
998 		address = __pa(&data->ghcb_page);
999 		pfn = address >> PAGE_SHIFT;
1000 
1001 		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1002 			return 1;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static void snp_register_per_cpu_ghcb(void)
1009 {
1010 	struct sev_es_runtime_data *data;
1011 	struct ghcb *ghcb;
1012 
1013 	data = this_cpu_read(runtime_data);
1014 	ghcb = &data->ghcb_page;
1015 
1016 	snp_register_ghcb_early(__pa(ghcb));
1017 }
1018 
1019 void setup_ghcb(void)
1020 {
1021 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1022 		return;
1023 
1024 	/*
1025 	 * Check whether the runtime #VC exception handler is active. It uses
1026 	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1027 	 *
1028 	 * If SNP is active, register the per-CPU GHCB page so that the runtime
1029 	 * exception handler can use it.
1030 	 */
1031 	if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1032 		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1033 			snp_register_per_cpu_ghcb();
1034 
1035 		sev_cfg.ghcbs_initialized = true;
1036 
1037 		return;
1038 	}
1039 
1040 	/*
1041 	 * Make sure the hypervisor talks a supported protocol.
1042 	 * This gets called only in the BSP boot phase.
1043 	 */
1044 	if (!sev_es_negotiate_protocol())
1045 		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1046 
1047 	/*
1048 	 * Clear the boot_ghcb. The first exception comes in before the bss
1049 	 * section is cleared.
1050 	 */
1051 	memset(&boot_ghcb_page, 0, PAGE_SIZE);
1052 
1053 	/* Alright - Make the boot-ghcb public */
1054 	boot_ghcb = &boot_ghcb_page;
1055 
1056 	/* SNP guest requires that GHCB GPA must be registered. */
1057 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1058 		snp_register_ghcb_early(__pa(&boot_ghcb_page));
1059 }
1060 
1061 #ifdef CONFIG_HOTPLUG_CPU
1062 static void sev_es_ap_hlt_loop(void)
1063 {
1064 	struct ghcb_state state;
1065 	struct ghcb *ghcb;
1066 
1067 	ghcb = __sev_get_ghcb(&state);
1068 
1069 	while (true) {
1070 		vc_ghcb_invalidate(ghcb);
1071 		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1072 		ghcb_set_sw_exit_info_1(ghcb, 0);
1073 		ghcb_set_sw_exit_info_2(ghcb, 0);
1074 
1075 		sev_es_wr_ghcb_msr(__pa(ghcb));
1076 		VMGEXIT();
1077 
1078 		/* Wakeup signal? */
1079 		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1080 		    ghcb->save.sw_exit_info_2)
1081 			break;
1082 	}
1083 
1084 	__sev_put_ghcb(&state);
1085 }
1086 
1087 /*
1088  * Play_dead handler when running under SEV-ES. This is needed because
1089  * the hypervisor can't deliver an SIPI request to restart the AP.
1090  * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1091  * hypervisor wakes it up again.
1092  */
1093 static void sev_es_play_dead(void)
1094 {
1095 	play_dead_common();
1096 
1097 	/* IRQs now disabled */
1098 
1099 	sev_es_ap_hlt_loop();
1100 
1101 	/*
1102 	 * If we get here, the VCPU was woken up again. Jump to CPU
1103 	 * startup code to get it back online.
1104 	 */
1105 	soft_restart_cpu();
1106 }
1107 #else  /* CONFIG_HOTPLUG_CPU */
1108 #define sev_es_play_dead	native_play_dead
1109 #endif /* CONFIG_HOTPLUG_CPU */
1110 
1111 #ifdef CONFIG_SMP
1112 static void __init sev_es_setup_play_dead(void)
1113 {
1114 	smp_ops.play_dead = sev_es_play_dead;
1115 }
1116 #else
1117 static inline void sev_es_setup_play_dead(void) { }
1118 #endif
1119 
1120 static void __init alloc_runtime_data(int cpu)
1121 {
1122 	struct sev_es_runtime_data *data;
1123 
1124 	data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu));
1125 	if (!data)
1126 		panic("Can't allocate SEV-ES runtime data");
1127 
1128 	per_cpu(runtime_data, cpu) = data;
1129 
1130 	if (snp_vmpl) {
1131 		struct svsm_ca *caa;
1132 
1133 		/* Allocate the SVSM CA page if an SVSM is present */
1134 		caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE);
1135 
1136 		per_cpu(svsm_caa, cpu) = caa;
1137 		per_cpu(svsm_caa_pa, cpu) = __pa(caa);
1138 	}
1139 }
1140 
1141 static void __init init_ghcb(int cpu)
1142 {
1143 	struct sev_es_runtime_data *data;
1144 	int err;
1145 
1146 	data = per_cpu(runtime_data, cpu);
1147 
1148 	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1149 					 sizeof(data->ghcb_page));
1150 	if (err)
1151 		panic("Can't map GHCBs unencrypted");
1152 
1153 	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1154 
1155 	data->ghcb_active = false;
1156 	data->backup_ghcb_active = false;
1157 }
1158 
1159 void __init sev_es_init_vc_handling(void)
1160 {
1161 	int cpu;
1162 
1163 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1164 
1165 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1166 		return;
1167 
1168 	if (!sev_es_check_cpu_features())
1169 		panic("SEV-ES CPU Features missing");
1170 
1171 	/*
1172 	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1173 	 * features.
1174 	 */
1175 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1176 		sev_hv_features = get_hv_features();
1177 
1178 		if (!(sev_hv_features & GHCB_HV_FT_SNP))
1179 			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1180 	}
1181 
1182 	/* Initialize per-cpu GHCB pages */
1183 	for_each_possible_cpu(cpu) {
1184 		alloc_runtime_data(cpu);
1185 		init_ghcb(cpu);
1186 	}
1187 
1188 	/* If running under an SVSM, switch to the per-cpu CA */
1189 	if (snp_vmpl) {
1190 		struct svsm_call call = {};
1191 		unsigned long flags;
1192 		int ret;
1193 
1194 		local_irq_save(flags);
1195 
1196 		/*
1197 		 * SVSM_CORE_REMAP_CA call:
1198 		 *   RAX = 0 (Protocol=0, CallID=0)
1199 		 *   RCX = New CA GPA
1200 		 */
1201 		call.caa = svsm_get_caa();
1202 		call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
1203 		call.rcx = this_cpu_read(svsm_caa_pa);
1204 		ret = svsm_perform_call_protocol(&call);
1205 		if (ret)
1206 			panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n",
1207 			      ret, call.rax_out);
1208 
1209 		sev_cfg.use_cas = true;
1210 
1211 		local_irq_restore(flags);
1212 	}
1213 
1214 	sev_es_setup_play_dead();
1215 
1216 	/* Secondary CPUs use the runtime #VC handler */
1217 	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1218 }
1219 
1220 /*
1221  * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
1222  * enabled, as the alternative (fallback) logic for DMI probing in the legacy
1223  * ROM region can cause a crash since this region is not pre-validated.
1224  */
1225 void __init snp_dmi_setup(void)
1226 {
1227 	if (efi_enabled(EFI_CONFIG_TABLES))
1228 		dmi_setup();
1229 }
1230 
1231 static void dump_cpuid_table(void)
1232 {
1233 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
1234 	int i = 0;
1235 
1236 	pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
1237 		cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
1238 
1239 	for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
1240 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
1241 
1242 		pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
1243 			i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
1244 			fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
1245 	}
1246 }
1247 
1248 /*
1249  * It is useful from an auditing/testing perspective to provide an easy way
1250  * for the guest owner to know that the CPUID table has been initialized as
1251  * expected, but that initialization happens too early in boot to print any
1252  * sort of indicator, and there's not really any other good place to do it,
1253  * so do it here.
1254  *
1255  * If running as an SNP guest, report the current VM privilege level (VMPL).
1256  */
1257 static int __init report_snp_info(void)
1258 {
1259 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
1260 
1261 	if (cpuid_table->count) {
1262 		pr_info("Using SNP CPUID table, %d entries present.\n",
1263 			cpuid_table->count);
1264 
1265 		if (sev_cfg.debug)
1266 			dump_cpuid_table();
1267 	}
1268 
1269 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1270 		pr_info("SNP running at VMPL%u.\n", snp_vmpl);
1271 
1272 	return 0;
1273 }
1274 arch_initcall(report_snp_info);
1275 
1276 static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
1277 {
1278 	/* If (new) lengths have been returned, propagate them up */
1279 	if (call->rcx_out != call->rcx)
1280 		input->manifest_buf.len = call->rcx_out;
1281 
1282 	if (call->rdx_out != call->rdx)
1283 		input->certificates_buf.len = call->rdx_out;
1284 
1285 	if (call->r8_out != call->r8)
1286 		input->report_buf.len = call->r8_out;
1287 }
1288 
1289 int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
1290 			      struct svsm_attest_call *input)
1291 {
1292 	struct svsm_attest_call *ac;
1293 	unsigned long flags;
1294 	u64 attest_call_pa;
1295 	int ret;
1296 
1297 	if (!snp_vmpl)
1298 		return -EINVAL;
1299 
1300 	local_irq_save(flags);
1301 
1302 	call->caa = svsm_get_caa();
1303 
1304 	ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
1305 	attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
1306 
1307 	*ac = *input;
1308 
1309 	/*
1310 	 * Set input registers for the request and set RDX and R8 to known
1311 	 * values in order to detect length values being returned in them.
1312 	 */
1313 	call->rax = call_id;
1314 	call->rcx = attest_call_pa;
1315 	call->rdx = -1;
1316 	call->r8 = -1;
1317 	ret = svsm_perform_call_protocol(call);
1318 	update_attest_input(call, input);
1319 
1320 	local_irq_restore(flags);
1321 
1322 	return ret;
1323 }
1324 EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
1325 
1326 static int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input,
1327 				   struct snp_guest_request_ioctl *rio)
1328 {
1329 	struct ghcb_state state;
1330 	struct es_em_ctxt ctxt;
1331 	unsigned long flags;
1332 	struct ghcb *ghcb;
1333 	int ret;
1334 
1335 	rio->exitinfo2 = SEV_RET_NO_FW_CALL;
1336 
1337 	/*
1338 	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
1339 	 * a per-CPU GHCB.
1340 	 */
1341 	local_irq_save(flags);
1342 
1343 	ghcb = __sev_get_ghcb(&state);
1344 	if (!ghcb) {
1345 		ret = -EIO;
1346 		goto e_restore_irq;
1347 	}
1348 
1349 	vc_ghcb_invalidate(ghcb);
1350 
1351 	if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
1352 		ghcb_set_rax(ghcb, input->data_gpa);
1353 		ghcb_set_rbx(ghcb, input->data_npages);
1354 	}
1355 
1356 	ret = sev_es_ghcb_hv_call(ghcb, &ctxt, req->exit_code, input->req_gpa, input->resp_gpa);
1357 	if (ret)
1358 		goto e_put;
1359 
1360 	rio->exitinfo2 = ghcb->save.sw_exit_info_2;
1361 	switch (rio->exitinfo2) {
1362 	case 0:
1363 		break;
1364 
1365 	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
1366 		ret = -EAGAIN;
1367 		break;
1368 
1369 	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
1370 		/* Number of expected pages are returned in RBX */
1371 		if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
1372 			input->data_npages = ghcb_get_rbx(ghcb);
1373 			ret = -ENOSPC;
1374 			break;
1375 		}
1376 		fallthrough;
1377 	default:
1378 		ret = -EIO;
1379 		break;
1380 	}
1381 
1382 e_put:
1383 	__sev_put_ghcb(&state);
1384 e_restore_irq:
1385 	local_irq_restore(flags);
1386 
1387 	return ret;
1388 }
1389 
1390 static struct platform_device sev_guest_device = {
1391 	.name		= "sev-guest",
1392 	.id		= -1,
1393 };
1394 
1395 static int __init snp_init_platform_device(void)
1396 {
1397 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1398 		return -ENODEV;
1399 
1400 	if (platform_device_register(&sev_guest_device))
1401 		return -ENODEV;
1402 
1403 	pr_info("SNP guest platform device initialized.\n");
1404 	return 0;
1405 }
1406 device_initcall(snp_init_platform_device);
1407 
1408 void sev_show_status(void)
1409 {
1410 	int i;
1411 
1412 	pr_info("Status: ");
1413 	for (i = 0; i < MSR_AMD64_SNP_RESV_BIT; i++) {
1414 		if (sev_status & BIT_ULL(i)) {
1415 			if (!sev_status_feat_names[i])
1416 				continue;
1417 
1418 			pr_cont("%s ", sev_status_feat_names[i]);
1419 		}
1420 	}
1421 	pr_cont("\n");
1422 }
1423 
1424 void __init snp_update_svsm_ca(void)
1425 {
1426 	if (!snp_vmpl)
1427 		return;
1428 
1429 	/* Update the CAA to a proper kernel address */
1430 	boot_svsm_caa = &boot_svsm_ca_page;
1431 }
1432 
1433 #ifdef CONFIG_SYSFS
1434 static ssize_t vmpl_show(struct kobject *kobj,
1435 			 struct kobj_attribute *attr, char *buf)
1436 {
1437 	return sysfs_emit(buf, "%d\n", snp_vmpl);
1438 }
1439 
1440 static struct kobj_attribute vmpl_attr = __ATTR_RO(vmpl);
1441 
1442 static struct attribute *vmpl_attrs[] = {
1443 	&vmpl_attr.attr,
1444 	NULL
1445 };
1446 
1447 static struct attribute_group sev_attr_group = {
1448 	.attrs = vmpl_attrs,
1449 };
1450 
1451 static int __init sev_sysfs_init(void)
1452 {
1453 	struct kobject *sev_kobj;
1454 	struct device *dev_root;
1455 	int ret;
1456 
1457 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1458 		return -ENODEV;
1459 
1460 	dev_root = bus_get_dev_root(&cpu_subsys);
1461 	if (!dev_root)
1462 		return -ENODEV;
1463 
1464 	sev_kobj = kobject_create_and_add("sev", &dev_root->kobj);
1465 	put_device(dev_root);
1466 
1467 	if (!sev_kobj)
1468 		return -ENOMEM;
1469 
1470 	ret = sysfs_create_group(sev_kobj, &sev_attr_group);
1471 	if (ret)
1472 		kobject_put(sev_kobj);
1473 
1474 	return ret;
1475 }
1476 arch_initcall(sev_sysfs_init);
1477 #endif // CONFIG_SYSFS
1478 
1479 static void free_shared_pages(void *buf, size_t sz)
1480 {
1481 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
1482 	int ret;
1483 
1484 	if (!buf)
1485 		return;
1486 
1487 	ret = set_memory_encrypted((unsigned long)buf, npages);
1488 	if (ret) {
1489 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
1490 		return;
1491 	}
1492 
1493 	__free_pages(virt_to_page(buf), get_order(sz));
1494 }
1495 
1496 static void *alloc_shared_pages(size_t sz)
1497 {
1498 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
1499 	struct page *page;
1500 	int ret;
1501 
1502 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
1503 	if (!page)
1504 		return NULL;
1505 
1506 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
1507 	if (ret) {
1508 		pr_err("failed to mark page shared, ret=%d\n", ret);
1509 		__free_pages(page, get_order(sz));
1510 		return NULL;
1511 	}
1512 
1513 	return page_address(page);
1514 }
1515 
1516 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
1517 {
1518 	u8 *key = NULL;
1519 
1520 	switch (id) {
1521 	case 0:
1522 		*seqno = &secrets->os_area.msg_seqno_0;
1523 		key = secrets->vmpck0;
1524 		break;
1525 	case 1:
1526 		*seqno = &secrets->os_area.msg_seqno_1;
1527 		key = secrets->vmpck1;
1528 		break;
1529 	case 2:
1530 		*seqno = &secrets->os_area.msg_seqno_2;
1531 		key = secrets->vmpck2;
1532 		break;
1533 	case 3:
1534 		*seqno = &secrets->os_area.msg_seqno_3;
1535 		key = secrets->vmpck3;
1536 		break;
1537 	default:
1538 		break;
1539 	}
1540 
1541 	return key;
1542 }
1543 
1544 static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen)
1545 {
1546 	struct aesgcm_ctx *ctx;
1547 
1548 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1549 	if (!ctx)
1550 		return NULL;
1551 
1552 	if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) {
1553 		pr_err("Crypto context initialization failed\n");
1554 		kfree(ctx);
1555 		return NULL;
1556 	}
1557 
1558 	return ctx;
1559 }
1560 
1561 int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id)
1562 {
1563 	/* Adjust the default VMPCK key based on the executing VMPL level */
1564 	if (vmpck_id == -1)
1565 		vmpck_id = snp_vmpl;
1566 
1567 	mdesc->vmpck = get_vmpck(vmpck_id, mdesc->secrets, &mdesc->os_area_msg_seqno);
1568 	if (!mdesc->vmpck) {
1569 		pr_err("Invalid VMPCK%d communication key\n", vmpck_id);
1570 		return -EINVAL;
1571 	}
1572 
1573 	/* Verify that VMPCK is not zero. */
1574 	if (!memchr_inv(mdesc->vmpck, 0, VMPCK_KEY_LEN)) {
1575 		pr_err("Empty VMPCK%d communication key\n", vmpck_id);
1576 		return -EINVAL;
1577 	}
1578 
1579 	mdesc->vmpck_id = vmpck_id;
1580 
1581 	mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN);
1582 	if (!mdesc->ctx)
1583 		return -ENOMEM;
1584 
1585 	return 0;
1586 }
1587 EXPORT_SYMBOL_GPL(snp_msg_init);
1588 
1589 struct snp_msg_desc *snp_msg_alloc(void)
1590 {
1591 	struct snp_msg_desc *mdesc;
1592 	void __iomem *mem;
1593 
1594 	BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
1595 
1596 	mdesc = kzalloc(sizeof(struct snp_msg_desc), GFP_KERNEL);
1597 	if (!mdesc)
1598 		return ERR_PTR(-ENOMEM);
1599 
1600 	mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
1601 	if (!mem)
1602 		goto e_free_mdesc;
1603 
1604 	mdesc->secrets = (__force struct snp_secrets_page *)mem;
1605 
1606 	/* Allocate the shared page used for the request and response message. */
1607 	mdesc->request = alloc_shared_pages(sizeof(struct snp_guest_msg));
1608 	if (!mdesc->request)
1609 		goto e_unmap;
1610 
1611 	mdesc->response = alloc_shared_pages(sizeof(struct snp_guest_msg));
1612 	if (!mdesc->response)
1613 		goto e_free_request;
1614 
1615 	return mdesc;
1616 
1617 e_free_request:
1618 	free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
1619 e_unmap:
1620 	iounmap(mem);
1621 e_free_mdesc:
1622 	kfree(mdesc);
1623 
1624 	return ERR_PTR(-ENOMEM);
1625 }
1626 EXPORT_SYMBOL_GPL(snp_msg_alloc);
1627 
1628 void snp_msg_free(struct snp_msg_desc *mdesc)
1629 {
1630 	if (!mdesc)
1631 		return;
1632 
1633 	kfree(mdesc->ctx);
1634 	free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
1635 	free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
1636 	iounmap((__force void __iomem *)mdesc->secrets);
1637 
1638 	memset(mdesc, 0, sizeof(*mdesc));
1639 	kfree(mdesc);
1640 }
1641 EXPORT_SYMBOL_GPL(snp_msg_free);
1642 
1643 /* Mutex to serialize the shared buffer access and command handling. */
1644 static DEFINE_MUTEX(snp_cmd_mutex);
1645 
1646 /*
1647  * If an error is received from the host or AMD Secure Processor (ASP) there
1648  * are two options. Either retry the exact same encrypted request or discontinue
1649  * using the VMPCK.
1650  *
1651  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
1652  * encrypt the requests. The IV for this scheme is the sequence number. GCM
1653  * cannot tolerate IV reuse.
1654  *
1655  * The ASP FW v1.51 only increments the sequence numbers on a successful
1656  * guest<->ASP back and forth and only accepts messages at its exact sequence
1657  * number.
1658  *
1659  * So if the sequence number were to be reused the encryption scheme is
1660  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
1661  * will reject the request.
1662  */
1663 static void snp_disable_vmpck(struct snp_msg_desc *mdesc)
1664 {
1665 	pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n",
1666 		  mdesc->vmpck_id);
1667 	memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN);
1668 	mdesc->vmpck = NULL;
1669 }
1670 
1671 static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc)
1672 {
1673 	u64 count;
1674 
1675 	lockdep_assert_held(&snp_cmd_mutex);
1676 
1677 	/* Read the current message sequence counter from secrets pages */
1678 	count = *mdesc->os_area_msg_seqno;
1679 
1680 	return count + 1;
1681 }
1682 
1683 /* Return a non-zero on success */
1684 static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc)
1685 {
1686 	u64 count = __snp_get_msg_seqno(mdesc);
1687 
1688 	/*
1689 	 * The message sequence counter for the SNP guest request is a  64-bit
1690 	 * value but the version 2 of GHCB specification defines a 32-bit storage
1691 	 * for it. If the counter exceeds the 32-bit value then return zero.
1692 	 * The caller should check the return value, but if the caller happens to
1693 	 * not check the value and use it, then the firmware treats zero as an
1694 	 * invalid number and will fail the  message request.
1695 	 */
1696 	if (count >= UINT_MAX) {
1697 		pr_err("request message sequence counter overflow\n");
1698 		return 0;
1699 	}
1700 
1701 	return count;
1702 }
1703 
1704 static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc)
1705 {
1706 	/*
1707 	 * The counter is also incremented by the PSP, so increment it by 2
1708 	 * and save in secrets page.
1709 	 */
1710 	*mdesc->os_area_msg_seqno += 2;
1711 }
1712 
1713 static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
1714 {
1715 	struct snp_guest_msg *resp_msg = &mdesc->secret_response;
1716 	struct snp_guest_msg *req_msg = &mdesc->secret_request;
1717 	struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
1718 	struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
1719 	struct aesgcm_ctx *ctx = mdesc->ctx;
1720 	u8 iv[GCM_AES_IV_SIZE] = {};
1721 
1722 	pr_debug("response [seqno %lld type %d version %d sz %d]\n",
1723 		 resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
1724 		 resp_msg_hdr->msg_sz);
1725 
1726 	/* Copy response from shared memory to encrypted memory. */
1727 	memcpy(resp_msg, mdesc->response, sizeof(*resp_msg));
1728 
1729 	/* Verify that the sequence counter is incremented by 1 */
1730 	if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
1731 		return -EBADMSG;
1732 
1733 	/* Verify response message type and version number. */
1734 	if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
1735 	    resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
1736 		return -EBADMSG;
1737 
1738 	/*
1739 	 * If the message size is greater than our buffer length then return
1740 	 * an error.
1741 	 */
1742 	if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz))
1743 		return -EBADMSG;
1744 
1745 	/* Decrypt the payload */
1746 	memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno)));
1747 	if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz,
1748 			    &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag))
1749 		return -EBADMSG;
1750 
1751 	return 0;
1752 }
1753 
1754 static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req)
1755 {
1756 	struct snp_guest_msg *msg = &mdesc->secret_request;
1757 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
1758 	struct aesgcm_ctx *ctx = mdesc->ctx;
1759 	u8 iv[GCM_AES_IV_SIZE] = {};
1760 
1761 	memset(msg, 0, sizeof(*msg));
1762 
1763 	hdr->algo = SNP_AEAD_AES_256_GCM;
1764 	hdr->hdr_version = MSG_HDR_VER;
1765 	hdr->hdr_sz = sizeof(*hdr);
1766 	hdr->msg_type = req->msg_type;
1767 	hdr->msg_version = req->msg_version;
1768 	hdr->msg_seqno = seqno;
1769 	hdr->msg_vmpck = req->vmpck_id;
1770 	hdr->msg_sz = req->req_sz;
1771 
1772 	/* Verify the sequence number is non-zero */
1773 	if (!hdr->msg_seqno)
1774 		return -ENOSR;
1775 
1776 	pr_debug("request [seqno %lld type %d version %d sz %d]\n",
1777 		 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
1778 
1779 	if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload)))
1780 		return -EBADMSG;
1781 
1782 	memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno)));
1783 	aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo,
1784 		       AAD_LEN, iv, hdr->authtag);
1785 
1786 	return 0;
1787 }
1788 
1789 static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
1790 				  struct snp_guest_request_ioctl *rio)
1791 {
1792 	unsigned long req_start = jiffies;
1793 	unsigned int override_npages = 0;
1794 	u64 override_err = 0;
1795 	int rc;
1796 
1797 retry_request:
1798 	/*
1799 	 * Call firmware to process the request. In this function the encrypted
1800 	 * message enters shared memory with the host. So after this call the
1801 	 * sequence number must be incremented or the VMPCK must be deleted to
1802 	 * prevent reuse of the IV.
1803 	 */
1804 	rc = snp_issue_guest_request(req, &req->input, rio);
1805 	switch (rc) {
1806 	case -ENOSPC:
1807 		/*
1808 		 * If the extended guest request fails due to having too
1809 		 * small of a certificate data buffer, retry the same
1810 		 * guest request without the extended data request in
1811 		 * order to increment the sequence number and thus avoid
1812 		 * IV reuse.
1813 		 */
1814 		override_npages = req->input.data_npages;
1815 		req->exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
1816 
1817 		/*
1818 		 * Override the error to inform callers the given extended
1819 		 * request buffer size was too small and give the caller the
1820 		 * required buffer size.
1821 		 */
1822 		override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
1823 
1824 		/*
1825 		 * If this call to the firmware succeeds, the sequence number can
1826 		 * be incremented allowing for continued use of the VMPCK. If
1827 		 * there is an error reflected in the return value, this value
1828 		 * is checked further down and the result will be the deletion
1829 		 * of the VMPCK and the error code being propagated back to the
1830 		 * user as an ioctl() return code.
1831 		 */
1832 		goto retry_request;
1833 
1834 	/*
1835 	 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
1836 	 * throttled. Retry in the driver to avoid returning and reusing the
1837 	 * message sequence number on a different message.
1838 	 */
1839 	case -EAGAIN:
1840 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
1841 			rc = -ETIMEDOUT;
1842 			break;
1843 		}
1844 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
1845 		goto retry_request;
1846 	}
1847 
1848 	/*
1849 	 * Increment the message sequence number. There is no harm in doing
1850 	 * this now because decryption uses the value stored in the response
1851 	 * structure and any failure will wipe the VMPCK, preventing further
1852 	 * use anyway.
1853 	 */
1854 	snp_inc_msg_seqno(mdesc);
1855 
1856 	if (override_err) {
1857 		rio->exitinfo2 = override_err;
1858 
1859 		/*
1860 		 * If an extended guest request was issued and the supplied certificate
1861 		 * buffer was not large enough, a standard guest request was issued to
1862 		 * prevent IV reuse. If the standard request was successful, return -EIO
1863 		 * back to the caller as would have originally been returned.
1864 		 */
1865 		if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
1866 			rc = -EIO;
1867 	}
1868 
1869 	if (override_npages)
1870 		req->input.data_npages = override_npages;
1871 
1872 	return rc;
1873 }
1874 
1875 int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
1876 			   struct snp_guest_request_ioctl *rio)
1877 {
1878 	u64 seqno;
1879 	int rc;
1880 
1881 	guard(mutex)(&snp_cmd_mutex);
1882 
1883 	/* Check if the VMPCK is not empty */
1884 	if (!mdesc->vmpck || !memchr_inv(mdesc->vmpck, 0, VMPCK_KEY_LEN)) {
1885 		pr_err_ratelimited("VMPCK is disabled\n");
1886 		return -ENOTTY;
1887 	}
1888 
1889 	/* Get message sequence and verify that its a non-zero */
1890 	seqno = snp_get_msg_seqno(mdesc);
1891 	if (!seqno)
1892 		return -EIO;
1893 
1894 	/* Clear shared memory's response for the host to populate. */
1895 	memset(mdesc->response, 0, sizeof(struct snp_guest_msg));
1896 
1897 	/* Encrypt the userspace provided payload in mdesc->secret_request. */
1898 	rc = enc_payload(mdesc, seqno, req);
1899 	if (rc)
1900 		return rc;
1901 
1902 	/*
1903 	 * Write the fully encrypted request to the shared unencrypted
1904 	 * request page.
1905 	 */
1906 	memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request));
1907 
1908 	/* Initialize the input address for guest request */
1909 	req->input.req_gpa = __pa(mdesc->request);
1910 	req->input.resp_gpa = __pa(mdesc->response);
1911 	req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0;
1912 
1913 	rc = __handle_guest_request(mdesc, req, rio);
1914 	if (rc) {
1915 		if (rc == -EIO &&
1916 		    rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
1917 			return rc;
1918 
1919 		pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
1920 			 rc, rio->exitinfo2);
1921 
1922 		snp_disable_vmpck(mdesc);
1923 		return rc;
1924 	}
1925 
1926 	rc = verify_and_dec_payload(mdesc, req);
1927 	if (rc) {
1928 		pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc);
1929 		snp_disable_vmpck(mdesc);
1930 		return rc;
1931 	}
1932 
1933 	return 0;
1934 }
1935 EXPORT_SYMBOL_GPL(snp_send_guest_request);
1936 
1937 static int __init snp_get_tsc_info(void)
1938 {
1939 	struct snp_guest_request_ioctl *rio;
1940 	struct snp_tsc_info_resp *tsc_resp;
1941 	struct snp_tsc_info_req *tsc_req;
1942 	struct snp_msg_desc *mdesc;
1943 	struct snp_guest_req *req;
1944 	int rc = -ENOMEM;
1945 
1946 	tsc_req = kzalloc(sizeof(*tsc_req), GFP_KERNEL);
1947 	if (!tsc_req)
1948 		return rc;
1949 
1950 	/*
1951 	 * The intermediate response buffer is used while decrypting the
1952 	 * response payload. Make sure that it has enough space to cover
1953 	 * the authtag.
1954 	 */
1955 	tsc_resp = kzalloc(sizeof(*tsc_resp) + AUTHTAG_LEN, GFP_KERNEL);
1956 	if (!tsc_resp)
1957 		goto e_free_tsc_req;
1958 
1959 	req = kzalloc(sizeof(*req), GFP_KERNEL);
1960 	if (!req)
1961 		goto e_free_tsc_resp;
1962 
1963 	rio = kzalloc(sizeof(*rio), GFP_KERNEL);
1964 	if (!rio)
1965 		goto e_free_req;
1966 
1967 	mdesc = snp_msg_alloc();
1968 	if (IS_ERR_OR_NULL(mdesc))
1969 		goto e_free_rio;
1970 
1971 	rc = snp_msg_init(mdesc, snp_vmpl);
1972 	if (rc)
1973 		goto e_free_mdesc;
1974 
1975 	req->msg_version = MSG_HDR_VER;
1976 	req->msg_type = SNP_MSG_TSC_INFO_REQ;
1977 	req->vmpck_id = snp_vmpl;
1978 	req->req_buf = tsc_req;
1979 	req->req_sz = sizeof(*tsc_req);
1980 	req->resp_buf = (void *)tsc_resp;
1981 	req->resp_sz = sizeof(*tsc_resp) + AUTHTAG_LEN;
1982 	req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
1983 
1984 	rc = snp_send_guest_request(mdesc, req, rio);
1985 	if (rc)
1986 		goto e_request;
1987 
1988 	pr_debug("%s: response status 0x%x scale 0x%llx offset 0x%llx factor 0x%x\n",
1989 		 __func__, tsc_resp->status, tsc_resp->tsc_scale, tsc_resp->tsc_offset,
1990 		 tsc_resp->tsc_factor);
1991 
1992 	if (!tsc_resp->status) {
1993 		snp_tsc_scale = tsc_resp->tsc_scale;
1994 		snp_tsc_offset = tsc_resp->tsc_offset;
1995 	} else {
1996 		pr_err("Failed to get TSC info, response status 0x%x\n", tsc_resp->status);
1997 		rc = -EIO;
1998 	}
1999 
2000 e_request:
2001 	/* The response buffer contains sensitive data, explicitly clear it. */
2002 	memzero_explicit(tsc_resp, sizeof(*tsc_resp) + AUTHTAG_LEN);
2003 e_free_mdesc:
2004 	snp_msg_free(mdesc);
2005 e_free_rio:
2006 	kfree(rio);
2007 e_free_req:
2008 	kfree(req);
2009  e_free_tsc_resp:
2010 	kfree(tsc_resp);
2011 e_free_tsc_req:
2012 	kfree(tsc_req);
2013 
2014 	return rc;
2015 }
2016 
2017 void __init snp_secure_tsc_prepare(void)
2018 {
2019 	if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
2020 		return;
2021 
2022 	if (snp_get_tsc_info()) {
2023 		pr_alert("Unable to retrieve Secure TSC info from ASP\n");
2024 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
2025 	}
2026 
2027 	pr_debug("SecureTSC enabled");
2028 }
2029 
2030 static unsigned long securetsc_get_tsc_khz(void)
2031 {
2032 	return snp_tsc_freq_khz;
2033 }
2034 
2035 void __init snp_secure_tsc_init(void)
2036 {
2037 	unsigned long long tsc_freq_mhz;
2038 
2039 	if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
2040 		return;
2041 
2042 	setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
2043 	rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
2044 	snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
2045 
2046 	x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
2047 	x86_platform.calibrate_tsc = securetsc_get_tsc_khz;
2048 }
2049