xref: /linux/arch/x86/coco/sev/core.c (revision b4ada0618eed0fbd1b1630f73deb048c592b06a1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Memory Encryption Support
4  *
5  * Copyright (C) 2019 SUSE
6  *
7  * Author: Joerg Roedel <jroedel@suse.de>
8  */
9 
10 #define pr_fmt(fmt)	"SEV: " fmt
11 
12 #include <linux/sched/debug.h>	/* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/cc_platform.h>
15 #include <linux/printk.h>
16 #include <linux/mm_types.h>
17 #include <linux/set_memory.h>
18 #include <linux/memblock.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/efi.h>
23 #include <linux/platform_device.h>
24 #include <linux/io.h>
25 #include <linux/psp-sev.h>
26 #include <linux/dmi.h>
27 #include <uapi/linux/sev-guest.h>
28 #include <crypto/gcm.h>
29 
30 #include <asm/init.h>
31 #include <asm/cpu_entry_area.h>
32 #include <asm/stacktrace.h>
33 #include <asm/sev.h>
34 #include <asm/sev-internal.h>
35 #include <asm/insn-eval.h>
36 #include <asm/fpu/xcr.h>
37 #include <asm/processor.h>
38 #include <asm/realmode.h>
39 #include <asm/setup.h>
40 #include <asm/traps.h>
41 #include <asm/svm.h>
42 #include <asm/smp.h>
43 #include <asm/cpu.h>
44 #include <asm/apic.h>
45 #include <asm/cpuid/api.h>
46 #include <asm/cmdline.h>
47 #include <asm/msr.h>
48 
49 /* AP INIT values as documented in the APM2  section "Processor Initialization State" */
50 #define AP_INIT_CS_LIMIT		0xffff
51 #define AP_INIT_DS_LIMIT		0xffff
52 #define AP_INIT_LDTR_LIMIT		0xffff
53 #define AP_INIT_GDTR_LIMIT		0xffff
54 #define AP_INIT_IDTR_LIMIT		0xffff
55 #define AP_INIT_TR_LIMIT		0xffff
56 #define AP_INIT_RFLAGS_DEFAULT		0x2
57 #define AP_INIT_DR6_DEFAULT		0xffff0ff0
58 #define AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
59 #define AP_INIT_XCR0_DEFAULT		0x1
60 #define AP_INIT_X87_FTW_DEFAULT		0x5555
61 #define AP_INIT_X87_FCW_DEFAULT		0x0040
62 #define AP_INIT_CR0_DEFAULT		0x60000010
63 #define AP_INIT_MXCSR_DEFAULT		0x1f80
64 
65 static const char * const sev_status_feat_names[] = {
66 	[MSR_AMD64_SEV_ENABLED_BIT]		= "SEV",
67 	[MSR_AMD64_SEV_ES_ENABLED_BIT]		= "SEV-ES",
68 	[MSR_AMD64_SEV_SNP_ENABLED_BIT]		= "SEV-SNP",
69 	[MSR_AMD64_SNP_VTOM_BIT]		= "vTom",
70 	[MSR_AMD64_SNP_REFLECT_VC_BIT]		= "ReflectVC",
71 	[MSR_AMD64_SNP_RESTRICTED_INJ_BIT]	= "RI",
72 	[MSR_AMD64_SNP_ALT_INJ_BIT]		= "AI",
73 	[MSR_AMD64_SNP_DEBUG_SWAP_BIT]		= "DebugSwap",
74 	[MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT]	= "NoHostIBS",
75 	[MSR_AMD64_SNP_BTB_ISOLATION_BIT]	= "BTBIsol",
76 	[MSR_AMD64_SNP_VMPL_SSS_BIT]		= "VmplSSS",
77 	[MSR_AMD64_SNP_SECURE_TSC_BIT]		= "SecureTSC",
78 	[MSR_AMD64_SNP_VMGEXIT_PARAM_BIT]	= "VMGExitParam",
79 	[MSR_AMD64_SNP_IBS_VIRT_BIT]		= "IBSVirt",
80 	[MSR_AMD64_SNP_VMSA_REG_PROT_BIT]	= "VMSARegProt",
81 	[MSR_AMD64_SNP_SMT_PROT_BIT]		= "SMTProt",
82 };
83 
84 /*
85  * For Secure TSC guests, the BSP fetches TSC_INFO using SNP guest messaging and
86  * initializes snp_tsc_scale and snp_tsc_offset. These values are replicated
87  * across the APs VMSA fields (TSC_SCALE and TSC_OFFSET).
88  */
89 static u64 snp_tsc_scale __ro_after_init;
90 static u64 snp_tsc_offset __ro_after_init;
91 static unsigned long snp_tsc_freq_khz __ro_after_init;
92 
93 DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
94 DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
95 
96 /*
97  * SVSM related information:
98  *   When running under an SVSM, the VMPL that Linux is executing at must be
99  *   non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
100  */
101 u8 snp_vmpl __ro_after_init;
102 EXPORT_SYMBOL_GPL(snp_vmpl);
103 
104 static u64 __init get_snp_jump_table_addr(void)
105 {
106 	struct snp_secrets_page *secrets;
107 	void __iomem *mem;
108 	u64 addr;
109 
110 	mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
111 	if (!mem) {
112 		pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
113 		return 0;
114 	}
115 
116 	secrets = (__force struct snp_secrets_page *)mem;
117 
118 	addr = secrets->os_area.ap_jump_table_pa;
119 	iounmap(mem);
120 
121 	return addr;
122 }
123 
124 static u64 __init get_jump_table_addr(void)
125 {
126 	struct ghcb_state state;
127 	unsigned long flags;
128 	struct ghcb *ghcb;
129 	u64 ret = 0;
130 
131 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
132 		return get_snp_jump_table_addr();
133 
134 	local_irq_save(flags);
135 
136 	ghcb = __sev_get_ghcb(&state);
137 
138 	vc_ghcb_invalidate(ghcb);
139 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
140 	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
141 	ghcb_set_sw_exit_info_2(ghcb, 0);
142 
143 	sev_es_wr_ghcb_msr(__pa(ghcb));
144 	VMGEXIT();
145 
146 	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
147 	    ghcb_sw_exit_info_2_is_valid(ghcb))
148 		ret = ghcb->save.sw_exit_info_2;
149 
150 	__sev_put_ghcb(&state);
151 
152 	local_irq_restore(flags);
153 
154 	return ret;
155 }
156 
157 static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
158 				    int ret, u64 svsm_ret)
159 {
160 	WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
161 	     pfn, action, page_size, ret, svsm_ret);
162 
163 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
164 }
165 
166 static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
167 {
168 	unsigned int page_size;
169 	bool action;
170 	u64 pfn;
171 
172 	pfn = pc->entry[pc->cur_index].pfn;
173 	action = pc->entry[pc->cur_index].action;
174 	page_size = pc->entry[pc->cur_index].page_size;
175 
176 	__pval_terminate(pfn, action, page_size, ret, svsm_ret);
177 }
178 
179 static void pval_pages(struct snp_psc_desc *desc)
180 {
181 	struct psc_entry *e;
182 	unsigned long vaddr;
183 	unsigned int size;
184 	unsigned int i;
185 	bool validate;
186 	u64 pfn;
187 	int rc;
188 
189 	for (i = 0; i <= desc->hdr.end_entry; i++) {
190 		e = &desc->entries[i];
191 
192 		pfn = e->gfn;
193 		vaddr = (unsigned long)pfn_to_kaddr(pfn);
194 		size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
195 		validate = e->operation == SNP_PAGE_STATE_PRIVATE;
196 
197 		rc = pvalidate(vaddr, size, validate);
198 		if (!rc)
199 			continue;
200 
201 		if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
202 			unsigned long vaddr_end = vaddr + PMD_SIZE;
203 
204 			for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
205 				rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
206 				if (rc)
207 					__pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
208 			}
209 		} else {
210 			__pval_terminate(pfn, validate, size, rc, 0);
211 		}
212 	}
213 }
214 
215 static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
216 					struct svsm_pvalidate_call *pc)
217 {
218 	struct svsm_pvalidate_entry *pe;
219 
220 	/* Nothing in the CA yet */
221 	pc->num_entries = 0;
222 	pc->cur_index   = 0;
223 
224 	pe = &pc->entry[0];
225 
226 	while (pfn < pfn_end) {
227 		pe->page_size = RMP_PG_SIZE_4K;
228 		pe->action    = action;
229 		pe->ignore_cf = 0;
230 		pe->rsvd      = 0;
231 		pe->pfn       = pfn;
232 
233 		pe++;
234 		pfn++;
235 
236 		pc->num_entries++;
237 		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
238 			break;
239 	}
240 
241 	return pfn;
242 }
243 
244 static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
245 				       struct svsm_pvalidate_call *pc)
246 {
247 	struct svsm_pvalidate_entry *pe;
248 	struct psc_entry *e;
249 
250 	/* Nothing in the CA yet */
251 	pc->num_entries = 0;
252 	pc->cur_index   = 0;
253 
254 	pe = &pc->entry[0];
255 	e  = &desc->entries[desc_entry];
256 
257 	while (desc_entry <= desc->hdr.end_entry) {
258 		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
259 		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
260 		pe->ignore_cf = 0;
261 		pe->rsvd      = 0;
262 		pe->pfn       = e->gfn;
263 
264 		pe++;
265 		e++;
266 
267 		desc_entry++;
268 		pc->num_entries++;
269 		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
270 			break;
271 	}
272 
273 	return desc_entry;
274 }
275 
276 static void svsm_pval_pages(struct snp_psc_desc *desc)
277 {
278 	struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
279 	unsigned int i, pv_4k_count = 0;
280 	struct svsm_pvalidate_call *pc;
281 	struct svsm_call call = {};
282 	unsigned long flags;
283 	bool action;
284 	u64 pc_pa;
285 	int ret;
286 
287 	/*
288 	 * This can be called very early in the boot, use native functions in
289 	 * order to avoid paravirt issues.
290 	 */
291 	flags = native_local_irq_save();
292 
293 	/*
294 	 * The SVSM calling area (CA) can support processing 510 entries at a
295 	 * time. Loop through the Page State Change descriptor until the CA is
296 	 * full or the last entry in the descriptor is reached, at which time
297 	 * the SVSM is invoked. This repeats until all entries in the descriptor
298 	 * are processed.
299 	 */
300 	call.caa = svsm_get_caa();
301 
302 	pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
303 	pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
304 
305 	/* Protocol 0, Call ID 1 */
306 	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
307 	call.rcx = pc_pa;
308 
309 	for (i = 0; i <= desc->hdr.end_entry;) {
310 		i = svsm_build_ca_from_psc_desc(desc, i, pc);
311 
312 		do {
313 			ret = svsm_perform_call_protocol(&call);
314 			if (!ret)
315 				continue;
316 
317 			/*
318 			 * Check if the entry failed because of an RMP mismatch (a
319 			 * PVALIDATE at 2M was requested, but the page is mapped in
320 			 * the RMP as 4K).
321 			 */
322 
323 			if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
324 			    pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
325 				/* Save this entry for post-processing at 4K */
326 				pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
327 
328 				/* Skip to the next one unless at the end of the list */
329 				pc->cur_index++;
330 				if (pc->cur_index < pc->num_entries)
331 					ret = -EAGAIN;
332 				else
333 					ret = 0;
334 			}
335 		} while (ret == -EAGAIN);
336 
337 		if (ret)
338 			svsm_pval_terminate(pc, ret, call.rax_out);
339 	}
340 
341 	/* Process any entries that failed to be validated at 2M and validate them at 4K */
342 	for (i = 0; i < pv_4k_count; i++) {
343 		u64 pfn, pfn_end;
344 
345 		action  = pv_4k[i].action;
346 		pfn     = pv_4k[i].pfn;
347 		pfn_end = pfn + 512;
348 
349 		while (pfn < pfn_end) {
350 			pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
351 
352 			ret = svsm_perform_call_protocol(&call);
353 			if (ret)
354 				svsm_pval_terminate(pc, ret, call.rax_out);
355 		}
356 	}
357 
358 	native_local_irq_restore(flags);
359 }
360 
361 static void pvalidate_pages(struct snp_psc_desc *desc)
362 {
363 	struct psc_entry *e;
364 	unsigned int i;
365 
366 	if (snp_vmpl)
367 		svsm_pval_pages(desc);
368 	else
369 		pval_pages(desc);
370 
371 	/*
372 	 * If not affected by the cache-coherency vulnerability there is no need
373 	 * to perform the cache eviction mitigation.
374 	 */
375 	if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
376 		return;
377 
378 	for (i = 0; i <= desc->hdr.end_entry; i++) {
379 		e = &desc->entries[i];
380 
381 		/*
382 		 * If validating memory (making it private) perform the cache
383 		 * eviction mitigation.
384 		 */
385 		if (e->operation == SNP_PAGE_STATE_PRIVATE)
386 			sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
387 	}
388 }
389 
390 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
391 {
392 	int cur_entry, end_entry, ret = 0;
393 	struct snp_psc_desc *data;
394 	struct es_em_ctxt ctxt;
395 
396 	vc_ghcb_invalidate(ghcb);
397 
398 	/* Copy the input desc into GHCB shared buffer */
399 	data = (struct snp_psc_desc *)ghcb->shared_buffer;
400 	memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
401 
402 	/*
403 	 * As per the GHCB specification, the hypervisor can resume the guest
404 	 * before processing all the entries. Check whether all the entries
405 	 * are processed. If not, then keep retrying. Note, the hypervisor
406 	 * will update the data memory directly to indicate the status, so
407 	 * reference the data->hdr everywhere.
408 	 *
409 	 * The strategy here is to wait for the hypervisor to change the page
410 	 * state in the RMP table before guest accesses the memory pages. If the
411 	 * page state change was not successful, then later memory access will
412 	 * result in a crash.
413 	 */
414 	cur_entry = data->hdr.cur_entry;
415 	end_entry = data->hdr.end_entry;
416 
417 	while (data->hdr.cur_entry <= data->hdr.end_entry) {
418 		ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
419 
420 		/* This will advance the shared buffer data points to. */
421 		ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
422 
423 		/*
424 		 * Page State Change VMGEXIT can pass error code through
425 		 * exit_info_2.
426 		 */
427 		if (WARN(ret || ghcb->save.sw_exit_info_2,
428 			 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
429 			 ret, ghcb->save.sw_exit_info_2)) {
430 			ret = 1;
431 			goto out;
432 		}
433 
434 		/* Verify that reserved bit is not set */
435 		if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
436 			ret = 1;
437 			goto out;
438 		}
439 
440 		/*
441 		 * Sanity check that entry processing is not going backwards.
442 		 * This will happen only if hypervisor is tricking us.
443 		 */
444 		if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
445 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
446 			 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
447 			ret = 1;
448 			goto out;
449 		}
450 	}
451 
452 out:
453 	return ret;
454 }
455 
456 static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
457 				       unsigned long vaddr_end, int op)
458 {
459 	struct ghcb_state state;
460 	bool use_large_entry;
461 	struct psc_hdr *hdr;
462 	struct psc_entry *e;
463 	unsigned long flags;
464 	unsigned long pfn;
465 	struct ghcb *ghcb;
466 	int i;
467 
468 	hdr = &data->hdr;
469 	e = data->entries;
470 
471 	memset(data, 0, sizeof(*data));
472 	i = 0;
473 
474 	while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
475 		hdr->end_entry = i;
476 
477 		if (is_vmalloc_addr((void *)vaddr)) {
478 			pfn = vmalloc_to_pfn((void *)vaddr);
479 			use_large_entry = false;
480 		} else {
481 			pfn = __pa(vaddr) >> PAGE_SHIFT;
482 			use_large_entry = true;
483 		}
484 
485 		e->gfn = pfn;
486 		e->operation = op;
487 
488 		if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
489 		    (vaddr_end - vaddr) >= PMD_SIZE) {
490 			e->pagesize = RMP_PG_SIZE_2M;
491 			vaddr += PMD_SIZE;
492 		} else {
493 			e->pagesize = RMP_PG_SIZE_4K;
494 			vaddr += PAGE_SIZE;
495 		}
496 
497 		e++;
498 		i++;
499 	}
500 
501 	/* Page validation must be rescinded before changing to shared */
502 	if (op == SNP_PAGE_STATE_SHARED)
503 		pvalidate_pages(data);
504 
505 	local_irq_save(flags);
506 
507 	if (sev_cfg.ghcbs_initialized)
508 		ghcb = __sev_get_ghcb(&state);
509 	else
510 		ghcb = boot_ghcb;
511 
512 	/* Invoke the hypervisor to perform the page state changes */
513 	if (!ghcb || vmgexit_psc(ghcb, data))
514 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
515 
516 	if (sev_cfg.ghcbs_initialized)
517 		__sev_put_ghcb(&state);
518 
519 	local_irq_restore(flags);
520 
521 	/* Page validation must be performed after changing to private */
522 	if (op == SNP_PAGE_STATE_PRIVATE)
523 		pvalidate_pages(data);
524 
525 	return vaddr;
526 }
527 
528 static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
529 {
530 	struct snp_psc_desc desc;
531 	unsigned long vaddr_end;
532 
533 	/* Use the MSR protocol when a GHCB is not available. */
534 	if (!boot_ghcb)
535 		return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
536 
537 	vaddr = vaddr & PAGE_MASK;
538 	vaddr_end = vaddr + (npages << PAGE_SHIFT);
539 
540 	while (vaddr < vaddr_end)
541 		vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
542 }
543 
544 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
545 {
546 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
547 		return;
548 
549 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
550 }
551 
552 void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
553 {
554 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
555 		return;
556 
557 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
558 }
559 
560 void snp_accept_memory(phys_addr_t start, phys_addr_t end)
561 {
562 	unsigned long vaddr, npages;
563 
564 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
565 		return;
566 
567 	vaddr = (unsigned long)__va(start);
568 	npages = (end - start) >> PAGE_SHIFT;
569 
570 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
571 }
572 
573 static int vmgexit_ap_control(u64 event, struct sev_es_save_area *vmsa, u32 apic_id)
574 {
575 	bool create = event != SVM_VMGEXIT_AP_DESTROY;
576 	struct ghcb_state state;
577 	unsigned long flags;
578 	struct ghcb *ghcb;
579 	int ret = 0;
580 
581 	local_irq_save(flags);
582 
583 	ghcb = __sev_get_ghcb(&state);
584 
585 	vc_ghcb_invalidate(ghcb);
586 
587 	if (create)
588 		ghcb_set_rax(ghcb, vmsa->sev_features);
589 
590 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
591 	ghcb_set_sw_exit_info_1(ghcb,
592 				((u64)apic_id << 32)	|
593 				((u64)snp_vmpl << 16)	|
594 				event);
595 	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
596 
597 	sev_es_wr_ghcb_msr(__pa(ghcb));
598 	VMGEXIT();
599 
600 	if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
601 	    lower_32_bits(ghcb->save.sw_exit_info_1)) {
602 		pr_err("SNP AP %s error\n", (create ? "CREATE" : "DESTROY"));
603 		ret = -EINVAL;
604 	}
605 
606 	__sev_put_ghcb(&state);
607 
608 	local_irq_restore(flags);
609 
610 	return ret;
611 }
612 
613 static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
614 {
615 	int ret;
616 
617 	if (snp_vmpl) {
618 		struct svsm_call call = {};
619 		unsigned long flags;
620 
621 		local_irq_save(flags);
622 
623 		call.caa = this_cpu_read(svsm_caa);
624 		call.rcx = __pa(va);
625 
626 		if (make_vmsa) {
627 			/* Protocol 0, Call ID 2 */
628 			call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
629 			call.rdx = __pa(caa);
630 			call.r8  = apic_id;
631 		} else {
632 			/* Protocol 0, Call ID 3 */
633 			call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
634 		}
635 
636 		ret = svsm_perform_call_protocol(&call);
637 
638 		local_irq_restore(flags);
639 	} else {
640 		/*
641 		 * If the kernel runs at VMPL0, it can change the VMSA
642 		 * bit for a page using the RMPADJUST instruction.
643 		 * However, for the instruction to succeed it must
644 		 * target the permissions of a lesser privileged (higher
645 		 * numbered) VMPL level, so use VMPL1.
646 		 */
647 		u64 attrs = 1;
648 
649 		if (make_vmsa)
650 			attrs |= RMPADJUST_VMSA_PAGE_BIT;
651 
652 		ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
653 	}
654 
655 	return ret;
656 }
657 
658 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
659 {
660 	int err;
661 
662 	err = snp_set_vmsa(vmsa, NULL, apic_id, false);
663 	if (err)
664 		pr_err("clear VMSA page failed (%u), leaking page\n", err);
665 	else
666 		free_page((unsigned long)vmsa);
667 }
668 
669 static void set_pte_enc(pte_t *kpte, int level, void *va)
670 {
671 	struct pte_enc_desc d = {
672 		.kpte	   = kpte,
673 		.pte_level = level,
674 		.va	   = va,
675 		.encrypt   = true
676 	};
677 
678 	prepare_pte_enc(&d);
679 	set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
680 }
681 
682 static void unshare_all_memory(void)
683 {
684 	unsigned long addr, end, size, ghcb;
685 	struct sev_es_runtime_data *data;
686 	unsigned int npages, level;
687 	bool skipped_addr;
688 	pte_t *pte;
689 	int cpu;
690 
691 	/* Unshare the direct mapping. */
692 	addr = PAGE_OFFSET;
693 	end  = PAGE_OFFSET + get_max_mapped();
694 
695 	while (addr < end) {
696 		pte = lookup_address(addr, &level);
697 		size = page_level_size(level);
698 		npages = size / PAGE_SIZE;
699 		skipped_addr = false;
700 
701 		if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) {
702 			addr += size;
703 			continue;
704 		}
705 
706 		/*
707 		 * Ensure that all the per-CPU GHCBs are made private at the
708 		 * end of the unsharing loop so that the switch to the slower
709 		 * MSR protocol happens last.
710 		 */
711 		for_each_possible_cpu(cpu) {
712 			data = per_cpu(runtime_data, cpu);
713 			ghcb = (unsigned long)&data->ghcb_page;
714 
715 			/* Handle the case of a huge page containing the GHCB page */
716 			if (addr <= ghcb && ghcb < addr + size) {
717 				skipped_addr = true;
718 				break;
719 			}
720 		}
721 
722 		if (!skipped_addr) {
723 			set_pte_enc(pte, level, (void *)addr);
724 			snp_set_memory_private(addr, npages);
725 		}
726 		addr += size;
727 	}
728 
729 	/* Unshare all bss decrypted memory. */
730 	addr = (unsigned long)__start_bss_decrypted;
731 	end  = (unsigned long)__start_bss_decrypted_unused;
732 	npages = (end - addr) >> PAGE_SHIFT;
733 
734 	for (; addr < end; addr += PAGE_SIZE) {
735 		pte = lookup_address(addr, &level);
736 		if (!pte || !pte_decrypted(*pte) || pte_none(*pte))
737 			continue;
738 
739 		set_pte_enc(pte, level, (void *)addr);
740 	}
741 	addr = (unsigned long)__start_bss_decrypted;
742 	snp_set_memory_private(addr, npages);
743 
744 	__flush_tlb_all();
745 }
746 
747 /* Stop new private<->shared conversions */
748 void snp_kexec_begin(void)
749 {
750 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
751 		return;
752 
753 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
754 		return;
755 
756 	/*
757 	 * Crash kernel ends up here with interrupts disabled: can't wait for
758 	 * conversions to finish.
759 	 *
760 	 * If race happened, just report and proceed.
761 	 */
762 	if (!set_memory_enc_stop_conversion())
763 		pr_warn("Failed to stop shared<->private conversions\n");
764 }
765 
766 /*
767  * Shutdown all APs except the one handling kexec/kdump and clearing
768  * the VMSA tag on AP's VMSA pages as they are not being used as
769  * VMSA page anymore.
770  */
771 static void shutdown_all_aps(void)
772 {
773 	struct sev_es_save_area *vmsa;
774 	int apic_id, this_cpu, cpu;
775 
776 	this_cpu = get_cpu();
777 
778 	/*
779 	 * APs are already in HLT loop when enc_kexec_finish() callback
780 	 * is invoked.
781 	 */
782 	for_each_present_cpu(cpu) {
783 		vmsa = per_cpu(sev_vmsa, cpu);
784 
785 		/*
786 		 * The BSP or offlined APs do not have guest allocated VMSA
787 		 * and there is no need  to clear the VMSA tag for this page.
788 		 */
789 		if (!vmsa)
790 			continue;
791 
792 		/*
793 		 * Cannot clear the VMSA tag for the currently running vCPU.
794 		 */
795 		if (this_cpu == cpu) {
796 			unsigned long pa;
797 			struct page *p;
798 
799 			pa = __pa(vmsa);
800 			/*
801 			 * Mark the VMSA page of the running vCPU as offline
802 			 * so that is excluded and not touched by makedumpfile
803 			 * while generating vmcore during kdump.
804 			 */
805 			p = pfn_to_online_page(pa >> PAGE_SHIFT);
806 			if (p)
807 				__SetPageOffline(p);
808 			continue;
809 		}
810 
811 		apic_id = cpuid_to_apicid[cpu];
812 
813 		/*
814 		 * Issue AP destroy to ensure AP gets kicked out of guest mode
815 		 * to allow using RMPADJUST to remove the VMSA tag on it's
816 		 * VMSA page.
817 		 */
818 		vmgexit_ap_control(SVM_VMGEXIT_AP_DESTROY, vmsa, apic_id);
819 		snp_cleanup_vmsa(vmsa, apic_id);
820 	}
821 
822 	put_cpu();
823 }
824 
825 void snp_kexec_finish(void)
826 {
827 	struct sev_es_runtime_data *data;
828 	unsigned long size, addr;
829 	unsigned int level, cpu;
830 	struct ghcb *ghcb;
831 	pte_t *pte;
832 
833 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
834 		return;
835 
836 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
837 		return;
838 
839 	shutdown_all_aps();
840 
841 	unshare_all_memory();
842 
843 	/*
844 	 * Switch to using the MSR protocol to change per-CPU GHCBs to
845 	 * private. All the per-CPU GHCBs have been switched back to private,
846 	 * so can't do any more GHCB calls to the hypervisor beyond this point
847 	 * until the kexec'ed kernel starts running.
848 	 */
849 	boot_ghcb = NULL;
850 	sev_cfg.ghcbs_initialized = false;
851 
852 	for_each_possible_cpu(cpu) {
853 		data = per_cpu(runtime_data, cpu);
854 		ghcb = &data->ghcb_page;
855 		pte = lookup_address((unsigned long)ghcb, &level);
856 		size = page_level_size(level);
857 		/* Handle the case of a huge page containing the GHCB page */
858 		addr = (unsigned long)ghcb & page_level_mask(level);
859 		set_pte_enc(pte, level, (void *)addr);
860 		snp_set_memory_private(addr, (size / PAGE_SIZE));
861 	}
862 }
863 
864 #define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
865 #define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
866 #define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
867 
868 #define INIT_LDTR_ATTRIBS	(SVM_SELECTOR_P_MASK | 2)
869 #define INIT_TR_ATTRIBS		(SVM_SELECTOR_P_MASK | 3)
870 
871 static void *snp_alloc_vmsa_page(int cpu)
872 {
873 	struct page *p;
874 
875 	/*
876 	 * Allocate VMSA page to work around the SNP erratum where the CPU will
877 	 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
878 	 * collides with the RMP entry of VMSA page. The recommended workaround
879 	 * is to not use a large page.
880 	 *
881 	 * Allocate an 8k page which is also 8k-aligned.
882 	 */
883 	p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
884 	if (!p)
885 		return NULL;
886 
887 	split_page(p, 1);
888 
889 	/* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
890 	__free_page(p);
891 
892 	return page_address(p + 1);
893 }
894 
895 static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip, unsigned int cpu)
896 {
897 	struct sev_es_save_area *cur_vmsa, *vmsa;
898 	struct svsm_ca *caa;
899 	u8 sipi_vector;
900 	int ret;
901 	u64 cr4;
902 
903 	/*
904 	 * The hypervisor SNP feature support check has happened earlier, just check
905 	 * the AP_CREATION one here.
906 	 */
907 	if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
908 		return -EOPNOTSUPP;
909 
910 	/*
911 	 * Verify the desired start IP against the known trampoline start IP
912 	 * to catch any future new trampolines that may be introduced that
913 	 * would require a new protected guest entry point.
914 	 */
915 	if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
916 		      "Unsupported SNP start_ip: %lx\n", start_ip))
917 		return -EINVAL;
918 
919 	/* Override start_ip with known protected guest start IP */
920 	start_ip = real_mode_header->sev_es_trampoline_start;
921 	cur_vmsa = per_cpu(sev_vmsa, cpu);
922 
923 	/*
924 	 * A new VMSA is created each time because there is no guarantee that
925 	 * the current VMSA is the kernels or that the vCPU is not running. If
926 	 * an attempt was done to use the current VMSA with a running vCPU, a
927 	 * #VMEXIT of that vCPU would wipe out all of the settings being done
928 	 * here.
929 	 */
930 	vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu);
931 	if (!vmsa)
932 		return -ENOMEM;
933 
934 	/* If an SVSM is present, the SVSM per-CPU CAA will be !NULL */
935 	caa = per_cpu(svsm_caa, cpu);
936 
937 	/* CR4 should maintain the MCE value */
938 	cr4 = native_read_cr4() & X86_CR4_MCE;
939 
940 	/* Set the CS value based on the start_ip converted to a SIPI vector */
941 	sipi_vector		= (start_ip >> 12);
942 	vmsa->cs.base		= sipi_vector << 12;
943 	vmsa->cs.limit		= AP_INIT_CS_LIMIT;
944 	vmsa->cs.attrib		= INIT_CS_ATTRIBS;
945 	vmsa->cs.selector	= sipi_vector << 8;
946 
947 	/* Set the RIP value based on start_ip */
948 	vmsa->rip		= start_ip & 0xfff;
949 
950 	/* Set AP INIT defaults as documented in the APM */
951 	vmsa->ds.limit		= AP_INIT_DS_LIMIT;
952 	vmsa->ds.attrib		= INIT_DS_ATTRIBS;
953 	vmsa->es		= vmsa->ds;
954 	vmsa->fs		= vmsa->ds;
955 	vmsa->gs		= vmsa->ds;
956 	vmsa->ss		= vmsa->ds;
957 
958 	vmsa->gdtr.limit	= AP_INIT_GDTR_LIMIT;
959 	vmsa->ldtr.limit	= AP_INIT_LDTR_LIMIT;
960 	vmsa->ldtr.attrib	= INIT_LDTR_ATTRIBS;
961 	vmsa->idtr.limit	= AP_INIT_IDTR_LIMIT;
962 	vmsa->tr.limit		= AP_INIT_TR_LIMIT;
963 	vmsa->tr.attrib		= INIT_TR_ATTRIBS;
964 
965 	vmsa->cr4		= cr4;
966 	vmsa->cr0		= AP_INIT_CR0_DEFAULT;
967 	vmsa->dr7		= DR7_RESET_VALUE;
968 	vmsa->dr6		= AP_INIT_DR6_DEFAULT;
969 	vmsa->rflags		= AP_INIT_RFLAGS_DEFAULT;
970 	vmsa->g_pat		= AP_INIT_GPAT_DEFAULT;
971 	vmsa->xcr0		= AP_INIT_XCR0_DEFAULT;
972 	vmsa->mxcsr		= AP_INIT_MXCSR_DEFAULT;
973 	vmsa->x87_ftw		= AP_INIT_X87_FTW_DEFAULT;
974 	vmsa->x87_fcw		= AP_INIT_X87_FCW_DEFAULT;
975 
976 	/* SVME must be set. */
977 	vmsa->efer		= EFER_SVME;
978 
979 	/*
980 	 * Set the SNP-specific fields for this VMSA:
981 	 *   VMPL level
982 	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
983 	 */
984 	vmsa->vmpl		= snp_vmpl;
985 	vmsa->sev_features	= sev_status >> 2;
986 
987 	/* Populate AP's TSC scale/offset to get accurate TSC values. */
988 	if (cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) {
989 		vmsa->tsc_scale = snp_tsc_scale;
990 		vmsa->tsc_offset = snp_tsc_offset;
991 	}
992 
993 	/* Switch the page over to a VMSA page now that it is initialized */
994 	ret = snp_set_vmsa(vmsa, caa, apic_id, true);
995 	if (ret) {
996 		pr_err("set VMSA page failed (%u)\n", ret);
997 		free_page((unsigned long)vmsa);
998 
999 		return -EINVAL;
1000 	}
1001 
1002 	/* Issue VMGEXIT AP Creation NAE event */
1003 	ret = vmgexit_ap_control(SVM_VMGEXIT_AP_CREATE, vmsa, apic_id);
1004 	if (ret) {
1005 		snp_cleanup_vmsa(vmsa, apic_id);
1006 		vmsa = NULL;
1007 	}
1008 
1009 	/* Free up any previous VMSA page */
1010 	if (cur_vmsa)
1011 		snp_cleanup_vmsa(cur_vmsa, apic_id);
1012 
1013 	/* Record the current VMSA page */
1014 	per_cpu(sev_vmsa, cpu) = vmsa;
1015 
1016 	return ret;
1017 }
1018 
1019 void __init snp_set_wakeup_secondary_cpu(void)
1020 {
1021 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1022 		return;
1023 
1024 	/*
1025 	 * Always set this override if SNP is enabled. This makes it the
1026 	 * required method to start APs under SNP. If the hypervisor does
1027 	 * not support AP creation, then no APs will be started.
1028 	 */
1029 	apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit);
1030 }
1031 
1032 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1033 {
1034 	u16 startup_cs, startup_ip;
1035 	phys_addr_t jump_table_pa;
1036 	u64 jump_table_addr;
1037 	u16 __iomem *jump_table;
1038 
1039 	jump_table_addr = get_jump_table_addr();
1040 
1041 	/* On UP guests there is no jump table so this is not a failure */
1042 	if (!jump_table_addr)
1043 		return 0;
1044 
1045 	/* Check if AP Jump Table is page-aligned */
1046 	if (jump_table_addr & ~PAGE_MASK)
1047 		return -EINVAL;
1048 
1049 	jump_table_pa = jump_table_addr & PAGE_MASK;
1050 
1051 	startup_cs = (u16)(rmh->trampoline_start >> 4);
1052 	startup_ip = (u16)(rmh->sev_es_trampoline_start -
1053 			   rmh->trampoline_start);
1054 
1055 	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1056 	if (!jump_table)
1057 		return -EIO;
1058 
1059 	writew(startup_ip, &jump_table[0]);
1060 	writew(startup_cs, &jump_table[1]);
1061 
1062 	iounmap(jump_table);
1063 
1064 	return 0;
1065 }
1066 
1067 /*
1068  * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1069  * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1070  * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1071  *
1072  * When running under SVSM the CA page is needed too, so map it as well.
1073  */
1074 int __init sev_es_efi_map_ghcbs_cas(pgd_t *pgd)
1075 {
1076 	unsigned long address, pflags, pflags_enc;
1077 	struct sev_es_runtime_data *data;
1078 	int cpu;
1079 	u64 pfn;
1080 
1081 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1082 		return 0;
1083 
1084 	pflags = _PAGE_NX | _PAGE_RW;
1085 	pflags_enc = cc_mkenc(pflags);
1086 
1087 	for_each_possible_cpu(cpu) {
1088 		data = per_cpu(runtime_data, cpu);
1089 
1090 		address = __pa(&data->ghcb_page);
1091 		pfn = address >> PAGE_SHIFT;
1092 
1093 		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1094 			return 1;
1095 
1096 		if (snp_vmpl) {
1097 			address = per_cpu(svsm_caa_pa, cpu);
1098 			if (!address)
1099 				return 1;
1100 
1101 			pfn = address >> PAGE_SHIFT;
1102 			if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags_enc))
1103 				return 1;
1104 		}
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 static void snp_register_per_cpu_ghcb(void)
1111 {
1112 	struct sev_es_runtime_data *data;
1113 	struct ghcb *ghcb;
1114 
1115 	data = this_cpu_read(runtime_data);
1116 	ghcb = &data->ghcb_page;
1117 
1118 	snp_register_ghcb_early(__pa(ghcb));
1119 }
1120 
1121 void setup_ghcb(void)
1122 {
1123 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1124 		return;
1125 
1126 	/*
1127 	 * Check whether the runtime #VC exception handler is active. It uses
1128 	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1129 	 *
1130 	 * If SNP is active, register the per-CPU GHCB page so that the runtime
1131 	 * exception handler can use it.
1132 	 */
1133 	if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1134 		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1135 			snp_register_per_cpu_ghcb();
1136 
1137 		sev_cfg.ghcbs_initialized = true;
1138 
1139 		return;
1140 	}
1141 
1142 	/*
1143 	 * Make sure the hypervisor talks a supported protocol.
1144 	 * This gets called only in the BSP boot phase.
1145 	 */
1146 	if (!sev_es_negotiate_protocol())
1147 		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1148 
1149 	/*
1150 	 * Clear the boot_ghcb. The first exception comes in before the bss
1151 	 * section is cleared.
1152 	 */
1153 	memset(&boot_ghcb_page, 0, PAGE_SIZE);
1154 
1155 	/* Alright - Make the boot-ghcb public */
1156 	boot_ghcb = &boot_ghcb_page;
1157 
1158 	/* SNP guest requires that GHCB GPA must be registered. */
1159 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1160 		snp_register_ghcb_early(__pa(&boot_ghcb_page));
1161 }
1162 
1163 #ifdef CONFIG_HOTPLUG_CPU
1164 static void sev_es_ap_hlt_loop(void)
1165 {
1166 	struct ghcb_state state;
1167 	struct ghcb *ghcb;
1168 
1169 	ghcb = __sev_get_ghcb(&state);
1170 
1171 	while (true) {
1172 		vc_ghcb_invalidate(ghcb);
1173 		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1174 		ghcb_set_sw_exit_info_1(ghcb, 0);
1175 		ghcb_set_sw_exit_info_2(ghcb, 0);
1176 
1177 		sev_es_wr_ghcb_msr(__pa(ghcb));
1178 		VMGEXIT();
1179 
1180 		/* Wakeup signal? */
1181 		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1182 		    ghcb->save.sw_exit_info_2)
1183 			break;
1184 	}
1185 
1186 	__sev_put_ghcb(&state);
1187 }
1188 
1189 /*
1190  * Play_dead handler when running under SEV-ES. This is needed because
1191  * the hypervisor can't deliver an SIPI request to restart the AP.
1192  * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1193  * hypervisor wakes it up again.
1194  */
1195 static void sev_es_play_dead(void)
1196 {
1197 	play_dead_common();
1198 
1199 	/* IRQs now disabled */
1200 
1201 	sev_es_ap_hlt_loop();
1202 
1203 	/*
1204 	 * If we get here, the VCPU was woken up again. Jump to CPU
1205 	 * startup code to get it back online.
1206 	 */
1207 	soft_restart_cpu();
1208 }
1209 #else  /* CONFIG_HOTPLUG_CPU */
1210 #define sev_es_play_dead	native_play_dead
1211 #endif /* CONFIG_HOTPLUG_CPU */
1212 
1213 #ifdef CONFIG_SMP
1214 static void __init sev_es_setup_play_dead(void)
1215 {
1216 	smp_ops.play_dead = sev_es_play_dead;
1217 }
1218 #else
1219 static inline void sev_es_setup_play_dead(void) { }
1220 #endif
1221 
1222 static void __init alloc_runtime_data(int cpu)
1223 {
1224 	struct sev_es_runtime_data *data;
1225 
1226 	data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu));
1227 	if (!data)
1228 		panic("Can't allocate SEV-ES runtime data");
1229 
1230 	per_cpu(runtime_data, cpu) = data;
1231 
1232 	if (snp_vmpl) {
1233 		struct svsm_ca *caa;
1234 
1235 		/* Allocate the SVSM CA page if an SVSM is present */
1236 		caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE);
1237 
1238 		per_cpu(svsm_caa, cpu) = caa;
1239 		per_cpu(svsm_caa_pa, cpu) = __pa(caa);
1240 	}
1241 }
1242 
1243 static void __init init_ghcb(int cpu)
1244 {
1245 	struct sev_es_runtime_data *data;
1246 	int err;
1247 
1248 	data = per_cpu(runtime_data, cpu);
1249 
1250 	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1251 					 sizeof(data->ghcb_page));
1252 	if (err)
1253 		panic("Can't map GHCBs unencrypted");
1254 
1255 	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1256 
1257 	data->ghcb_active = false;
1258 	data->backup_ghcb_active = false;
1259 }
1260 
1261 void __init sev_es_init_vc_handling(void)
1262 {
1263 	int cpu;
1264 
1265 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1266 
1267 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1268 		return;
1269 
1270 	if (!sev_es_check_cpu_features())
1271 		panic("SEV-ES CPU Features missing");
1272 
1273 	/*
1274 	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1275 	 * features.
1276 	 */
1277 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1278 		sev_hv_features = get_hv_features();
1279 
1280 		if (!(sev_hv_features & GHCB_HV_FT_SNP))
1281 			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1282 	}
1283 
1284 	/* Initialize per-cpu GHCB pages */
1285 	for_each_possible_cpu(cpu) {
1286 		alloc_runtime_data(cpu);
1287 		init_ghcb(cpu);
1288 	}
1289 
1290 	/* If running under an SVSM, switch to the per-cpu CA */
1291 	if (snp_vmpl) {
1292 		struct svsm_call call = {};
1293 		unsigned long flags;
1294 		int ret;
1295 
1296 		local_irq_save(flags);
1297 
1298 		/*
1299 		 * SVSM_CORE_REMAP_CA call:
1300 		 *   RAX = 0 (Protocol=0, CallID=0)
1301 		 *   RCX = New CA GPA
1302 		 */
1303 		call.caa = svsm_get_caa();
1304 		call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
1305 		call.rcx = this_cpu_read(svsm_caa_pa);
1306 		ret = svsm_perform_call_protocol(&call);
1307 		if (ret)
1308 			panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n",
1309 			      ret, call.rax_out);
1310 
1311 		sev_cfg.use_cas = true;
1312 
1313 		local_irq_restore(flags);
1314 	}
1315 
1316 	sev_es_setup_play_dead();
1317 
1318 	/* Secondary CPUs use the runtime #VC handler */
1319 	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1320 }
1321 
1322 /*
1323  * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
1324  * enabled, as the alternative (fallback) logic for DMI probing in the legacy
1325  * ROM region can cause a crash since this region is not pre-validated.
1326  */
1327 void __init snp_dmi_setup(void)
1328 {
1329 	if (efi_enabled(EFI_CONFIG_TABLES))
1330 		dmi_setup();
1331 }
1332 
1333 static void dump_cpuid_table(void)
1334 {
1335 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
1336 	int i = 0;
1337 
1338 	pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
1339 		cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
1340 
1341 	for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
1342 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
1343 
1344 		pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
1345 			i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
1346 			fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
1347 	}
1348 }
1349 
1350 /*
1351  * It is useful from an auditing/testing perspective to provide an easy way
1352  * for the guest owner to know that the CPUID table has been initialized as
1353  * expected, but that initialization happens too early in boot to print any
1354  * sort of indicator, and there's not really any other good place to do it,
1355  * so do it here.
1356  *
1357  * If running as an SNP guest, report the current VM privilege level (VMPL).
1358  */
1359 static int __init report_snp_info(void)
1360 {
1361 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
1362 
1363 	if (cpuid_table->count) {
1364 		pr_info("Using SNP CPUID table, %d entries present.\n",
1365 			cpuid_table->count);
1366 
1367 		if (sev_cfg.debug)
1368 			dump_cpuid_table();
1369 	}
1370 
1371 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1372 		pr_info("SNP running at VMPL%u.\n", snp_vmpl);
1373 
1374 	return 0;
1375 }
1376 arch_initcall(report_snp_info);
1377 
1378 static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
1379 {
1380 	/* If (new) lengths have been returned, propagate them up */
1381 	if (call->rcx_out != call->rcx)
1382 		input->manifest_buf.len = call->rcx_out;
1383 
1384 	if (call->rdx_out != call->rdx)
1385 		input->certificates_buf.len = call->rdx_out;
1386 
1387 	if (call->r8_out != call->r8)
1388 		input->report_buf.len = call->r8_out;
1389 }
1390 
1391 int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
1392 			      struct svsm_attest_call *input)
1393 {
1394 	struct svsm_attest_call *ac;
1395 	unsigned long flags;
1396 	u64 attest_call_pa;
1397 	int ret;
1398 
1399 	if (!snp_vmpl)
1400 		return -EINVAL;
1401 
1402 	local_irq_save(flags);
1403 
1404 	call->caa = svsm_get_caa();
1405 
1406 	ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
1407 	attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
1408 
1409 	*ac = *input;
1410 
1411 	/*
1412 	 * Set input registers for the request and set RDX and R8 to known
1413 	 * values in order to detect length values being returned in them.
1414 	 */
1415 	call->rax = call_id;
1416 	call->rcx = attest_call_pa;
1417 	call->rdx = -1;
1418 	call->r8 = -1;
1419 	ret = svsm_perform_call_protocol(call);
1420 	update_attest_input(call, input);
1421 
1422 	local_irq_restore(flags);
1423 
1424 	return ret;
1425 }
1426 EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
1427 
1428 static int snp_issue_guest_request(struct snp_guest_req *req)
1429 {
1430 	struct snp_req_data *input = &req->input;
1431 	struct ghcb_state state;
1432 	struct es_em_ctxt ctxt;
1433 	unsigned long flags;
1434 	struct ghcb *ghcb;
1435 	int ret;
1436 
1437 	req->exitinfo2 = SEV_RET_NO_FW_CALL;
1438 
1439 	/*
1440 	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
1441 	 * a per-CPU GHCB.
1442 	 */
1443 	local_irq_save(flags);
1444 
1445 	ghcb = __sev_get_ghcb(&state);
1446 	if (!ghcb) {
1447 		ret = -EIO;
1448 		goto e_restore_irq;
1449 	}
1450 
1451 	vc_ghcb_invalidate(ghcb);
1452 
1453 	if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
1454 		ghcb_set_rax(ghcb, input->data_gpa);
1455 		ghcb_set_rbx(ghcb, input->data_npages);
1456 	}
1457 
1458 	ret = sev_es_ghcb_hv_call(ghcb, &ctxt, req->exit_code, input->req_gpa, input->resp_gpa);
1459 	if (ret)
1460 		goto e_put;
1461 
1462 	req->exitinfo2 = ghcb->save.sw_exit_info_2;
1463 	switch (req->exitinfo2) {
1464 	case 0:
1465 		break;
1466 
1467 	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
1468 		ret = -EAGAIN;
1469 		break;
1470 
1471 	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
1472 		/* Number of expected pages are returned in RBX */
1473 		if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
1474 			input->data_npages = ghcb_get_rbx(ghcb);
1475 			ret = -ENOSPC;
1476 			break;
1477 		}
1478 		fallthrough;
1479 	default:
1480 		ret = -EIO;
1481 		break;
1482 	}
1483 
1484 e_put:
1485 	__sev_put_ghcb(&state);
1486 e_restore_irq:
1487 	local_irq_restore(flags);
1488 
1489 	return ret;
1490 }
1491 
1492 /**
1493  * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device
1494  *
1495  * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND
1496  * which is the only request used so far.
1497  *
1498  * Return: true if the platform provides a vTPM SVSM device, false otherwise.
1499  */
1500 static bool snp_svsm_vtpm_probe(void)
1501 {
1502 	struct svsm_call call = {};
1503 
1504 	/* The vTPM device is available only if a SVSM is present */
1505 	if (!snp_vmpl)
1506 		return false;
1507 
1508 	call.caa = svsm_get_caa();
1509 	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY);
1510 
1511 	if (svsm_perform_call_protocol(&call))
1512 		return false;
1513 
1514 	/* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */
1515 	return call.rcx_out & BIT_ULL(8);
1516 }
1517 
1518 /**
1519  * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM
1520  * @buffer: A buffer used to both send the command and receive the response.
1521  *
1522  * Execute a SVSM_VTPM_CMD call as defined by
1523  * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00
1524  *
1525  * All command request/response buffers have a common structure as specified by
1526  * the following table:
1527  *     Byte      Size       In/Out    Description
1528  *     Offset    (Bytes)
1529  *     0x000     4          In        Platform command
1530  *                          Out       Platform command response size
1531  *
1532  * Each command can build upon this common request/response structure to create
1533  * a structure specific to the command. See include/linux/tpm_svsm.h for more
1534  * details.
1535  *
1536  * Return: 0 on success, -errno on failure
1537  */
1538 int snp_svsm_vtpm_send_command(u8 *buffer)
1539 {
1540 	struct svsm_call call = {};
1541 
1542 	call.caa = svsm_get_caa();
1543 	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD);
1544 	call.rcx = __pa(buffer);
1545 
1546 	return svsm_perform_call_protocol(&call);
1547 }
1548 EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command);
1549 
1550 static struct platform_device sev_guest_device = {
1551 	.name		= "sev-guest",
1552 	.id		= -1,
1553 };
1554 
1555 static struct platform_device tpm_svsm_device = {
1556 	.name		= "tpm-svsm",
1557 	.id		= -1,
1558 };
1559 
1560 static int __init snp_init_platform_device(void)
1561 {
1562 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1563 		return -ENODEV;
1564 
1565 	if (platform_device_register(&sev_guest_device))
1566 		return -ENODEV;
1567 
1568 	if (snp_svsm_vtpm_probe() &&
1569 	    platform_device_register(&tpm_svsm_device))
1570 		return -ENODEV;
1571 
1572 	pr_info("SNP guest platform devices initialized.\n");
1573 	return 0;
1574 }
1575 device_initcall(snp_init_platform_device);
1576 
1577 void sev_show_status(void)
1578 {
1579 	int i;
1580 
1581 	pr_info("Status: ");
1582 	for (i = 0; i < MSR_AMD64_SNP_RESV_BIT; i++) {
1583 		if (sev_status & BIT_ULL(i)) {
1584 			if (!sev_status_feat_names[i])
1585 				continue;
1586 
1587 			pr_cont("%s ", sev_status_feat_names[i]);
1588 		}
1589 	}
1590 	pr_cont("\n");
1591 }
1592 
1593 void __init snp_update_svsm_ca(void)
1594 {
1595 	if (!snp_vmpl)
1596 		return;
1597 
1598 	/* Update the CAA to a proper kernel address */
1599 	boot_svsm_caa = &boot_svsm_ca_page;
1600 }
1601 
1602 #ifdef CONFIG_SYSFS
1603 static ssize_t vmpl_show(struct kobject *kobj,
1604 			 struct kobj_attribute *attr, char *buf)
1605 {
1606 	return sysfs_emit(buf, "%d\n", snp_vmpl);
1607 }
1608 
1609 static struct kobj_attribute vmpl_attr = __ATTR_RO(vmpl);
1610 
1611 static struct attribute *vmpl_attrs[] = {
1612 	&vmpl_attr.attr,
1613 	NULL
1614 };
1615 
1616 static struct attribute_group sev_attr_group = {
1617 	.attrs = vmpl_attrs,
1618 };
1619 
1620 static int __init sev_sysfs_init(void)
1621 {
1622 	struct kobject *sev_kobj;
1623 	struct device *dev_root;
1624 	int ret;
1625 
1626 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1627 		return -ENODEV;
1628 
1629 	dev_root = bus_get_dev_root(&cpu_subsys);
1630 	if (!dev_root)
1631 		return -ENODEV;
1632 
1633 	sev_kobj = kobject_create_and_add("sev", &dev_root->kobj);
1634 	put_device(dev_root);
1635 
1636 	if (!sev_kobj)
1637 		return -ENOMEM;
1638 
1639 	ret = sysfs_create_group(sev_kobj, &sev_attr_group);
1640 	if (ret)
1641 		kobject_put(sev_kobj);
1642 
1643 	return ret;
1644 }
1645 arch_initcall(sev_sysfs_init);
1646 #endif // CONFIG_SYSFS
1647 
1648 static void free_shared_pages(void *buf, size_t sz)
1649 {
1650 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
1651 	int ret;
1652 
1653 	if (!buf)
1654 		return;
1655 
1656 	ret = set_memory_encrypted((unsigned long)buf, npages);
1657 	if (ret) {
1658 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
1659 		return;
1660 	}
1661 
1662 	__free_pages(virt_to_page(buf), get_order(sz));
1663 }
1664 
1665 static void *alloc_shared_pages(size_t sz)
1666 {
1667 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
1668 	struct page *page;
1669 	int ret;
1670 
1671 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
1672 	if (!page)
1673 		return NULL;
1674 
1675 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
1676 	if (ret) {
1677 		pr_err("failed to mark page shared, ret=%d\n", ret);
1678 		__free_pages(page, get_order(sz));
1679 		return NULL;
1680 	}
1681 
1682 	return page_address(page);
1683 }
1684 
1685 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
1686 {
1687 	u8 *key = NULL;
1688 
1689 	switch (id) {
1690 	case 0:
1691 		*seqno = &secrets->os_area.msg_seqno_0;
1692 		key = secrets->vmpck0;
1693 		break;
1694 	case 1:
1695 		*seqno = &secrets->os_area.msg_seqno_1;
1696 		key = secrets->vmpck1;
1697 		break;
1698 	case 2:
1699 		*seqno = &secrets->os_area.msg_seqno_2;
1700 		key = secrets->vmpck2;
1701 		break;
1702 	case 3:
1703 		*seqno = &secrets->os_area.msg_seqno_3;
1704 		key = secrets->vmpck3;
1705 		break;
1706 	default:
1707 		break;
1708 	}
1709 
1710 	return key;
1711 }
1712 
1713 static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen)
1714 {
1715 	struct aesgcm_ctx *ctx;
1716 
1717 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1718 	if (!ctx)
1719 		return NULL;
1720 
1721 	if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) {
1722 		pr_err("Crypto context initialization failed\n");
1723 		kfree(ctx);
1724 		return NULL;
1725 	}
1726 
1727 	return ctx;
1728 }
1729 
1730 int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id)
1731 {
1732 	/* Adjust the default VMPCK key based on the executing VMPL level */
1733 	if (vmpck_id == -1)
1734 		vmpck_id = snp_vmpl;
1735 
1736 	mdesc->vmpck = get_vmpck(vmpck_id, mdesc->secrets, &mdesc->os_area_msg_seqno);
1737 	if (!mdesc->vmpck) {
1738 		pr_err("Invalid VMPCK%d communication key\n", vmpck_id);
1739 		return -EINVAL;
1740 	}
1741 
1742 	/* Verify that VMPCK is not zero. */
1743 	if (!memchr_inv(mdesc->vmpck, 0, VMPCK_KEY_LEN)) {
1744 		pr_err("Empty VMPCK%d communication key\n", vmpck_id);
1745 		return -EINVAL;
1746 	}
1747 
1748 	mdesc->vmpck_id = vmpck_id;
1749 
1750 	mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN);
1751 	if (!mdesc->ctx)
1752 		return -ENOMEM;
1753 
1754 	return 0;
1755 }
1756 EXPORT_SYMBOL_GPL(snp_msg_init);
1757 
1758 struct snp_msg_desc *snp_msg_alloc(void)
1759 {
1760 	struct snp_msg_desc *mdesc;
1761 	void __iomem *mem;
1762 
1763 	BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
1764 
1765 	mdesc = kzalloc(sizeof(struct snp_msg_desc), GFP_KERNEL);
1766 	if (!mdesc)
1767 		return ERR_PTR(-ENOMEM);
1768 
1769 	mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
1770 	if (!mem)
1771 		goto e_free_mdesc;
1772 
1773 	mdesc->secrets = (__force struct snp_secrets_page *)mem;
1774 
1775 	/* Allocate the shared page used for the request and response message. */
1776 	mdesc->request = alloc_shared_pages(sizeof(struct snp_guest_msg));
1777 	if (!mdesc->request)
1778 		goto e_unmap;
1779 
1780 	mdesc->response = alloc_shared_pages(sizeof(struct snp_guest_msg));
1781 	if (!mdesc->response)
1782 		goto e_free_request;
1783 
1784 	return mdesc;
1785 
1786 e_free_request:
1787 	free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
1788 e_unmap:
1789 	iounmap(mem);
1790 e_free_mdesc:
1791 	kfree(mdesc);
1792 
1793 	return ERR_PTR(-ENOMEM);
1794 }
1795 EXPORT_SYMBOL_GPL(snp_msg_alloc);
1796 
1797 void snp_msg_free(struct snp_msg_desc *mdesc)
1798 {
1799 	if (!mdesc)
1800 		return;
1801 
1802 	kfree(mdesc->ctx);
1803 	free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
1804 	free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
1805 	iounmap((__force void __iomem *)mdesc->secrets);
1806 
1807 	memset(mdesc, 0, sizeof(*mdesc));
1808 	kfree(mdesc);
1809 }
1810 EXPORT_SYMBOL_GPL(snp_msg_free);
1811 
1812 /* Mutex to serialize the shared buffer access and command handling. */
1813 static DEFINE_MUTEX(snp_cmd_mutex);
1814 
1815 /*
1816  * If an error is received from the host or AMD Secure Processor (ASP) there
1817  * are two options. Either retry the exact same encrypted request or discontinue
1818  * using the VMPCK.
1819  *
1820  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
1821  * encrypt the requests. The IV for this scheme is the sequence number. GCM
1822  * cannot tolerate IV reuse.
1823  *
1824  * The ASP FW v1.51 only increments the sequence numbers on a successful
1825  * guest<->ASP back and forth and only accepts messages at its exact sequence
1826  * number.
1827  *
1828  * So if the sequence number were to be reused the encryption scheme is
1829  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
1830  * will reject the request.
1831  */
1832 static void snp_disable_vmpck(struct snp_msg_desc *mdesc)
1833 {
1834 	pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n",
1835 		  mdesc->vmpck_id);
1836 	memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN);
1837 	mdesc->vmpck = NULL;
1838 }
1839 
1840 static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc)
1841 {
1842 	u64 count;
1843 
1844 	lockdep_assert_held(&snp_cmd_mutex);
1845 
1846 	/* Read the current message sequence counter from secrets pages */
1847 	count = *mdesc->os_area_msg_seqno;
1848 
1849 	return count + 1;
1850 }
1851 
1852 /* Return a non-zero on success */
1853 static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc)
1854 {
1855 	u64 count = __snp_get_msg_seqno(mdesc);
1856 
1857 	/*
1858 	 * The message sequence counter for the SNP guest request is a  64-bit
1859 	 * value but the version 2 of GHCB specification defines a 32-bit storage
1860 	 * for it. If the counter exceeds the 32-bit value then return zero.
1861 	 * The caller should check the return value, but if the caller happens to
1862 	 * not check the value and use it, then the firmware treats zero as an
1863 	 * invalid number and will fail the  message request.
1864 	 */
1865 	if (count >= UINT_MAX) {
1866 		pr_err("request message sequence counter overflow\n");
1867 		return 0;
1868 	}
1869 
1870 	return count;
1871 }
1872 
1873 static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc)
1874 {
1875 	/*
1876 	 * The counter is also incremented by the PSP, so increment it by 2
1877 	 * and save in secrets page.
1878 	 */
1879 	*mdesc->os_area_msg_seqno += 2;
1880 }
1881 
1882 static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
1883 {
1884 	struct snp_guest_msg *resp_msg = &mdesc->secret_response;
1885 	struct snp_guest_msg *req_msg = &mdesc->secret_request;
1886 	struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
1887 	struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
1888 	struct aesgcm_ctx *ctx = mdesc->ctx;
1889 	u8 iv[GCM_AES_IV_SIZE] = {};
1890 
1891 	pr_debug("response [seqno %lld type %d version %d sz %d]\n",
1892 		 resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
1893 		 resp_msg_hdr->msg_sz);
1894 
1895 	/* Copy response from shared memory to encrypted memory. */
1896 	memcpy(resp_msg, mdesc->response, sizeof(*resp_msg));
1897 
1898 	/* Verify that the sequence counter is incremented by 1 */
1899 	if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
1900 		return -EBADMSG;
1901 
1902 	/* Verify response message type and version number. */
1903 	if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
1904 	    resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
1905 		return -EBADMSG;
1906 
1907 	/*
1908 	 * If the message size is greater than our buffer length then return
1909 	 * an error.
1910 	 */
1911 	if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz))
1912 		return -EBADMSG;
1913 
1914 	/* Decrypt the payload */
1915 	memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno)));
1916 	if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz,
1917 			    &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag))
1918 		return -EBADMSG;
1919 
1920 	return 0;
1921 }
1922 
1923 static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req)
1924 {
1925 	struct snp_guest_msg *msg = &mdesc->secret_request;
1926 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
1927 	struct aesgcm_ctx *ctx = mdesc->ctx;
1928 	u8 iv[GCM_AES_IV_SIZE] = {};
1929 
1930 	memset(msg, 0, sizeof(*msg));
1931 
1932 	hdr->algo = SNP_AEAD_AES_256_GCM;
1933 	hdr->hdr_version = MSG_HDR_VER;
1934 	hdr->hdr_sz = sizeof(*hdr);
1935 	hdr->msg_type = req->msg_type;
1936 	hdr->msg_version = req->msg_version;
1937 	hdr->msg_seqno = seqno;
1938 	hdr->msg_vmpck = req->vmpck_id;
1939 	hdr->msg_sz = req->req_sz;
1940 
1941 	/* Verify the sequence number is non-zero */
1942 	if (!hdr->msg_seqno)
1943 		return -ENOSR;
1944 
1945 	pr_debug("request [seqno %lld type %d version %d sz %d]\n",
1946 		 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
1947 
1948 	if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload)))
1949 		return -EBADMSG;
1950 
1951 	memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno)));
1952 	aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo,
1953 		       AAD_LEN, iv, hdr->authtag);
1954 
1955 	return 0;
1956 }
1957 
1958 static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
1959 {
1960 	unsigned long req_start = jiffies;
1961 	unsigned int override_npages = 0;
1962 	u64 override_err = 0;
1963 	int rc;
1964 
1965 retry_request:
1966 	/*
1967 	 * Call firmware to process the request. In this function the encrypted
1968 	 * message enters shared memory with the host. So after this call the
1969 	 * sequence number must be incremented or the VMPCK must be deleted to
1970 	 * prevent reuse of the IV.
1971 	 */
1972 	rc = snp_issue_guest_request(req);
1973 	switch (rc) {
1974 	case -ENOSPC:
1975 		/*
1976 		 * If the extended guest request fails due to having too
1977 		 * small of a certificate data buffer, retry the same
1978 		 * guest request without the extended data request in
1979 		 * order to increment the sequence number and thus avoid
1980 		 * IV reuse.
1981 		 */
1982 		override_npages = req->input.data_npages;
1983 		req->exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
1984 
1985 		/*
1986 		 * Override the error to inform callers the given extended
1987 		 * request buffer size was too small and give the caller the
1988 		 * required buffer size.
1989 		 */
1990 		override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
1991 
1992 		/*
1993 		 * If this call to the firmware succeeds, the sequence number can
1994 		 * be incremented allowing for continued use of the VMPCK. If
1995 		 * there is an error reflected in the return value, this value
1996 		 * is checked further down and the result will be the deletion
1997 		 * of the VMPCK and the error code being propagated back to the
1998 		 * user as an ioctl() return code.
1999 		 */
2000 		goto retry_request;
2001 
2002 	/*
2003 	 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
2004 	 * throttled. Retry in the driver to avoid returning and reusing the
2005 	 * message sequence number on a different message.
2006 	 */
2007 	case -EAGAIN:
2008 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
2009 			rc = -ETIMEDOUT;
2010 			break;
2011 		}
2012 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
2013 		goto retry_request;
2014 	}
2015 
2016 	/*
2017 	 * Increment the message sequence number. There is no harm in doing
2018 	 * this now because decryption uses the value stored in the response
2019 	 * structure and any failure will wipe the VMPCK, preventing further
2020 	 * use anyway.
2021 	 */
2022 	snp_inc_msg_seqno(mdesc);
2023 
2024 	if (override_err) {
2025 		req->exitinfo2 = override_err;
2026 
2027 		/*
2028 		 * If an extended guest request was issued and the supplied certificate
2029 		 * buffer was not large enough, a standard guest request was issued to
2030 		 * prevent IV reuse. If the standard request was successful, return -EIO
2031 		 * back to the caller as would have originally been returned.
2032 		 */
2033 		if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
2034 			rc = -EIO;
2035 	}
2036 
2037 	if (override_npages)
2038 		req->input.data_npages = override_npages;
2039 
2040 	return rc;
2041 }
2042 
2043 int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
2044 {
2045 	u64 seqno;
2046 	int rc;
2047 
2048 	/*
2049 	 * enc_payload() calls aesgcm_encrypt(), which can potentially offload to HW.
2050 	 * The offload's DMA SG list of data to encrypt has to be in linear mapping.
2051 	 */
2052 	if (!virt_addr_valid(req->req_buf) || !virt_addr_valid(req->resp_buf)) {
2053 		pr_warn("AES-GSM buffers must be in linear mapping");
2054 		return -EINVAL;
2055 	}
2056 
2057 	guard(mutex)(&snp_cmd_mutex);
2058 
2059 	/* Check if the VMPCK is not empty */
2060 	if (!mdesc->vmpck || !memchr_inv(mdesc->vmpck, 0, VMPCK_KEY_LEN)) {
2061 		pr_err_ratelimited("VMPCK is disabled\n");
2062 		return -ENOTTY;
2063 	}
2064 
2065 	/* Get message sequence and verify that its a non-zero */
2066 	seqno = snp_get_msg_seqno(mdesc);
2067 	if (!seqno)
2068 		return -EIO;
2069 
2070 	/* Clear shared memory's response for the host to populate. */
2071 	memset(mdesc->response, 0, sizeof(struct snp_guest_msg));
2072 
2073 	/* Encrypt the userspace provided payload in mdesc->secret_request. */
2074 	rc = enc_payload(mdesc, seqno, req);
2075 	if (rc)
2076 		return rc;
2077 
2078 	/*
2079 	 * Write the fully encrypted request to the shared unencrypted
2080 	 * request page.
2081 	 */
2082 	memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request));
2083 
2084 	/* Initialize the input address for guest request */
2085 	req->input.req_gpa = __pa(mdesc->request);
2086 	req->input.resp_gpa = __pa(mdesc->response);
2087 	req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0;
2088 
2089 	rc = __handle_guest_request(mdesc, req);
2090 	if (rc) {
2091 		if (rc == -EIO &&
2092 		    req->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
2093 			return rc;
2094 
2095 		pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
2096 			 rc, req->exitinfo2);
2097 
2098 		snp_disable_vmpck(mdesc);
2099 		return rc;
2100 	}
2101 
2102 	rc = verify_and_dec_payload(mdesc, req);
2103 	if (rc) {
2104 		pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc);
2105 		snp_disable_vmpck(mdesc);
2106 		return rc;
2107 	}
2108 
2109 	return 0;
2110 }
2111 EXPORT_SYMBOL_GPL(snp_send_guest_request);
2112 
2113 static int __init snp_get_tsc_info(void)
2114 {
2115 	struct snp_tsc_info_resp *tsc_resp;
2116 	struct snp_tsc_info_req *tsc_req;
2117 	struct snp_msg_desc *mdesc;
2118 	struct snp_guest_req req = {};
2119 	int rc = -ENOMEM;
2120 
2121 	tsc_req = kzalloc(sizeof(*tsc_req), GFP_KERNEL);
2122 	if (!tsc_req)
2123 		return rc;
2124 
2125 	/*
2126 	 * The intermediate response buffer is used while decrypting the
2127 	 * response payload. Make sure that it has enough space to cover
2128 	 * the authtag.
2129 	 */
2130 	tsc_resp = kzalloc(sizeof(*tsc_resp) + AUTHTAG_LEN, GFP_KERNEL);
2131 	if (!tsc_resp)
2132 		goto e_free_tsc_req;
2133 
2134 	mdesc = snp_msg_alloc();
2135 	if (IS_ERR_OR_NULL(mdesc))
2136 		goto e_free_tsc_resp;
2137 
2138 	rc = snp_msg_init(mdesc, snp_vmpl);
2139 	if (rc)
2140 		goto e_free_mdesc;
2141 
2142 	req.msg_version = MSG_HDR_VER;
2143 	req.msg_type = SNP_MSG_TSC_INFO_REQ;
2144 	req.vmpck_id = snp_vmpl;
2145 	req.req_buf = tsc_req;
2146 	req.req_sz = sizeof(*tsc_req);
2147 	req.resp_buf = (void *)tsc_resp;
2148 	req.resp_sz = sizeof(*tsc_resp) + AUTHTAG_LEN;
2149 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
2150 
2151 	rc = snp_send_guest_request(mdesc, &req);
2152 	if (rc)
2153 		goto e_request;
2154 
2155 	pr_debug("%s: response status 0x%x scale 0x%llx offset 0x%llx factor 0x%x\n",
2156 		 __func__, tsc_resp->status, tsc_resp->tsc_scale, tsc_resp->tsc_offset,
2157 		 tsc_resp->tsc_factor);
2158 
2159 	if (!tsc_resp->status) {
2160 		snp_tsc_scale = tsc_resp->tsc_scale;
2161 		snp_tsc_offset = tsc_resp->tsc_offset;
2162 	} else {
2163 		pr_err("Failed to get TSC info, response status 0x%x\n", tsc_resp->status);
2164 		rc = -EIO;
2165 	}
2166 
2167 e_request:
2168 	/* The response buffer contains sensitive data, explicitly clear it. */
2169 	memzero_explicit(tsc_resp, sizeof(*tsc_resp) + AUTHTAG_LEN);
2170 e_free_mdesc:
2171 	snp_msg_free(mdesc);
2172 e_free_tsc_resp:
2173 	kfree(tsc_resp);
2174 e_free_tsc_req:
2175 	kfree(tsc_req);
2176 
2177 	return rc;
2178 }
2179 
2180 void __init snp_secure_tsc_prepare(void)
2181 {
2182 	if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
2183 		return;
2184 
2185 	if (snp_get_tsc_info()) {
2186 		pr_alert("Unable to retrieve Secure TSC info from ASP\n");
2187 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
2188 	}
2189 
2190 	pr_debug("SecureTSC enabled");
2191 }
2192 
2193 static unsigned long securetsc_get_tsc_khz(void)
2194 {
2195 	return snp_tsc_freq_khz;
2196 }
2197 
2198 void __init snp_secure_tsc_init(void)
2199 {
2200 	struct snp_secrets_page *secrets;
2201 	unsigned long tsc_freq_mhz;
2202 	void *mem;
2203 
2204 	if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
2205 		return;
2206 
2207 	mem = early_memremap_encrypted(sev_secrets_pa, PAGE_SIZE);
2208 	if (!mem) {
2209 		pr_err("Unable to get TSC_FACTOR: failed to map the SNP secrets page.\n");
2210 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
2211 	}
2212 
2213 	secrets = (__force struct snp_secrets_page *)mem;
2214 
2215 	setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
2216 	rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
2217 
2218 	/* Extract the GUEST TSC MHZ from BIT[17:0], rest is reserved space */
2219 	tsc_freq_mhz &= GENMASK_ULL(17, 0);
2220 
2221 	snp_tsc_freq_khz = SNP_SCALE_TSC_FREQ(tsc_freq_mhz * 1000, secrets->tsc_factor);
2222 
2223 	x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
2224 	x86_platform.calibrate_tsc = securetsc_get_tsc_khz;
2225 
2226 	early_memunmap(mem, PAGE_SIZE);
2227 }
2228