xref: /linux/arch/x86/coco/sev/svsm.c (revision ca8f421ea0d3f1d39f773e14f68f93c978e470ef)
1*e21279b7SBorislav Petkov (AMD) // SPDX-License-Identifier: GPL-2.0-only
2*e21279b7SBorislav Petkov (AMD) /*
3*e21279b7SBorislav Petkov (AMD)  * SVSM support code
4*e21279b7SBorislav Petkov (AMD)  */
5*e21279b7SBorislav Petkov (AMD) 
6*e21279b7SBorislav Petkov (AMD) #include <linux/types.h>
7*e21279b7SBorislav Petkov (AMD) 
8*e21279b7SBorislav Petkov (AMD) #include <asm/sev.h>
9*e21279b7SBorislav Petkov (AMD) 
10*e21279b7SBorislav Petkov (AMD) #include "internal.h"
11*e21279b7SBorislav Petkov (AMD) 
12*e21279b7SBorislav Petkov (AMD) /* For early boot SVSM communication */
13*e21279b7SBorislav Petkov (AMD) struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
14*e21279b7SBorislav Petkov (AMD) SYM_PIC_ALIAS(boot_svsm_ca_page);
15*e21279b7SBorislav Petkov (AMD) 
16*e21279b7SBorislav Petkov (AMD) /*
17*e21279b7SBorislav Petkov (AMD)  * SVSM related information:
18*e21279b7SBorislav Petkov (AMD)  *   During boot, the page tables are set up as identity mapped and later
19*e21279b7SBorislav Petkov (AMD)  *   changed to use kernel virtual addresses. Maintain separate virtual and
20*e21279b7SBorislav Petkov (AMD)  *   physical addresses for the CAA to allow SVSM functions to be used during
21*e21279b7SBorislav Petkov (AMD)  *   early boot, both with identity mapped virtual addresses and proper kernel
22*e21279b7SBorislav Petkov (AMD)  *   virtual addresses.
23*e21279b7SBorislav Petkov (AMD)  */
24*e21279b7SBorislav Petkov (AMD) u64 boot_svsm_caa_pa __ro_after_init;
25*e21279b7SBorislav Petkov (AMD) SYM_PIC_ALIAS(boot_svsm_caa_pa);
26*e21279b7SBorislav Petkov (AMD) 
27*e21279b7SBorislav Petkov (AMD) DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
28*e21279b7SBorislav Petkov (AMD) DEFINE_PER_CPU(u64, svsm_caa_pa);
29*e21279b7SBorislav Petkov (AMD) 
30*e21279b7SBorislav Petkov (AMD) static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
31*e21279b7SBorislav Petkov (AMD) {
32*e21279b7SBorislav Petkov (AMD) 	struct es_em_ctxt ctxt;
33*e21279b7SBorislav Petkov (AMD) 	u8 pending = 0;
34*e21279b7SBorislav Petkov (AMD) 
35*e21279b7SBorislav Petkov (AMD) 	vc_ghcb_invalidate(ghcb);
36*e21279b7SBorislav Petkov (AMD) 
37*e21279b7SBorislav Petkov (AMD) 	/*
38*e21279b7SBorislav Petkov (AMD) 	 * Fill in protocol and format specifiers. This can be called very early
39*e21279b7SBorislav Petkov (AMD) 	 * in the boot, so use rip-relative references as needed.
40*e21279b7SBorislav Petkov (AMD) 	 */
41*e21279b7SBorislav Petkov (AMD) 	ghcb->protocol_version = ghcb_version;
42*e21279b7SBorislav Petkov (AMD) 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
43*e21279b7SBorislav Petkov (AMD) 
44*e21279b7SBorislav Petkov (AMD) 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
45*e21279b7SBorislav Petkov (AMD) 	ghcb_set_sw_exit_info_1(ghcb, 0);
46*e21279b7SBorislav Petkov (AMD) 	ghcb_set_sw_exit_info_2(ghcb, 0);
47*e21279b7SBorislav Petkov (AMD) 
48*e21279b7SBorislav Petkov (AMD) 	sev_es_wr_ghcb_msr(__pa(ghcb));
49*e21279b7SBorislav Petkov (AMD) 
50*e21279b7SBorislav Petkov (AMD) 	svsm_issue_call(call, &pending);
51*e21279b7SBorislav Petkov (AMD) 
52*e21279b7SBorislav Petkov (AMD) 	if (pending)
53*e21279b7SBorislav Petkov (AMD) 		return -EINVAL;
54*e21279b7SBorislav Petkov (AMD) 
55*e21279b7SBorislav Petkov (AMD) 	switch (verify_exception_info(ghcb, &ctxt)) {
56*e21279b7SBorislav Petkov (AMD) 	case ES_OK:
57*e21279b7SBorislav Petkov (AMD) 		break;
58*e21279b7SBorislav Petkov (AMD) 	case ES_EXCEPTION:
59*e21279b7SBorislav Petkov (AMD) 		vc_forward_exception(&ctxt);
60*e21279b7SBorislav Petkov (AMD) 		fallthrough;
61*e21279b7SBorislav Petkov (AMD) 	default:
62*e21279b7SBorislav Petkov (AMD) 		return -EINVAL;
63*e21279b7SBorislav Petkov (AMD) 	}
64*e21279b7SBorislav Petkov (AMD) 
65*e21279b7SBorislav Petkov (AMD) 	return svsm_process_result_codes(call);
66*e21279b7SBorislav Petkov (AMD) }
67*e21279b7SBorislav Petkov (AMD) 
68*e21279b7SBorislav Petkov (AMD) int svsm_perform_call_protocol(struct svsm_call *call)
69*e21279b7SBorislav Petkov (AMD) {
70*e21279b7SBorislav Petkov (AMD) 	struct ghcb_state state;
71*e21279b7SBorislav Petkov (AMD) 	unsigned long flags;
72*e21279b7SBorislav Petkov (AMD) 	struct ghcb *ghcb;
73*e21279b7SBorislav Petkov (AMD) 	int ret;
74*e21279b7SBorislav Petkov (AMD) 
75*e21279b7SBorislav Petkov (AMD) 	flags = native_local_irq_save();
76*e21279b7SBorislav Petkov (AMD) 
77*e21279b7SBorislav Petkov (AMD) 	if (sev_cfg.ghcbs_initialized)
78*e21279b7SBorislav Petkov (AMD) 		ghcb = __sev_get_ghcb(&state);
79*e21279b7SBorislav Petkov (AMD) 	else if (boot_ghcb)
80*e21279b7SBorislav Petkov (AMD) 		ghcb = boot_ghcb;
81*e21279b7SBorislav Petkov (AMD) 	else
82*e21279b7SBorislav Petkov (AMD) 		ghcb = NULL;
83*e21279b7SBorislav Petkov (AMD) 
84*e21279b7SBorislav Petkov (AMD) 	do {
85*e21279b7SBorislav Petkov (AMD) 		ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
86*e21279b7SBorislav Petkov (AMD) 			   : __pi_svsm_perform_msr_protocol(call);
87*e21279b7SBorislav Petkov (AMD) 	} while (ret == -EAGAIN);
88*e21279b7SBorislav Petkov (AMD) 
89*e21279b7SBorislav Petkov (AMD) 	if (sev_cfg.ghcbs_initialized)
90*e21279b7SBorislav Petkov (AMD) 		__sev_put_ghcb(&state);
91*e21279b7SBorislav Petkov (AMD) 
92*e21279b7SBorislav Petkov (AMD) 	native_local_irq_restore(flags);
93*e21279b7SBorislav Petkov (AMD) 
94*e21279b7SBorislav Petkov (AMD) 	return ret;
95*e21279b7SBorislav Petkov (AMD) }
96*e21279b7SBorislav Petkov (AMD) 
97*e21279b7SBorislav Petkov (AMD) static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
98*e21279b7SBorislav Petkov (AMD) 					struct svsm_pvalidate_call *pc)
99*e21279b7SBorislav Petkov (AMD) {
100*e21279b7SBorislav Petkov (AMD) 	struct svsm_pvalidate_entry *pe;
101*e21279b7SBorislav Petkov (AMD) 
102*e21279b7SBorislav Petkov (AMD) 	/* Nothing in the CA yet */
103*e21279b7SBorislav Petkov (AMD) 	pc->num_entries = 0;
104*e21279b7SBorislav Petkov (AMD) 	pc->cur_index   = 0;
105*e21279b7SBorislav Petkov (AMD) 
106*e21279b7SBorislav Petkov (AMD) 	pe = &pc->entry[0];
107*e21279b7SBorislav Petkov (AMD) 
108*e21279b7SBorislav Petkov (AMD) 	while (pfn < pfn_end) {
109*e21279b7SBorislav Petkov (AMD) 		pe->page_size = RMP_PG_SIZE_4K;
110*e21279b7SBorislav Petkov (AMD) 		pe->action    = action;
111*e21279b7SBorislav Petkov (AMD) 		pe->ignore_cf = 0;
112*e21279b7SBorislav Petkov (AMD) 		pe->rsvd      = 0;
113*e21279b7SBorislav Petkov (AMD) 		pe->pfn       = pfn;
114*e21279b7SBorislav Petkov (AMD) 
115*e21279b7SBorislav Petkov (AMD) 		pe++;
116*e21279b7SBorislav Petkov (AMD) 		pfn++;
117*e21279b7SBorislav Petkov (AMD) 
118*e21279b7SBorislav Petkov (AMD) 		pc->num_entries++;
119*e21279b7SBorislav Petkov (AMD) 		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
120*e21279b7SBorislav Petkov (AMD) 			break;
121*e21279b7SBorislav Petkov (AMD) 	}
122*e21279b7SBorislav Petkov (AMD) 
123*e21279b7SBorislav Petkov (AMD) 	return pfn;
124*e21279b7SBorislav Petkov (AMD) }
125*e21279b7SBorislav Petkov (AMD) 
126*e21279b7SBorislav Petkov (AMD) static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
127*e21279b7SBorislav Petkov (AMD) 				       struct svsm_pvalidate_call *pc)
128*e21279b7SBorislav Petkov (AMD) {
129*e21279b7SBorislav Petkov (AMD) 	struct svsm_pvalidate_entry *pe;
130*e21279b7SBorislav Petkov (AMD) 	struct psc_entry *e;
131*e21279b7SBorislav Petkov (AMD) 
132*e21279b7SBorislav Petkov (AMD) 	/* Nothing in the CA yet */
133*e21279b7SBorislav Petkov (AMD) 	pc->num_entries = 0;
134*e21279b7SBorislav Petkov (AMD) 	pc->cur_index   = 0;
135*e21279b7SBorislav Petkov (AMD) 
136*e21279b7SBorislav Petkov (AMD) 	pe = &pc->entry[0];
137*e21279b7SBorislav Petkov (AMD) 	e  = &desc->entries[desc_entry];
138*e21279b7SBorislav Petkov (AMD) 
139*e21279b7SBorislav Petkov (AMD) 	while (desc_entry <= desc->hdr.end_entry) {
140*e21279b7SBorislav Petkov (AMD) 		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
141*e21279b7SBorislav Petkov (AMD) 		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
142*e21279b7SBorislav Petkov (AMD) 		pe->ignore_cf = 0;
143*e21279b7SBorislav Petkov (AMD) 		pe->rsvd      = 0;
144*e21279b7SBorislav Petkov (AMD) 		pe->pfn       = e->gfn;
145*e21279b7SBorislav Petkov (AMD) 
146*e21279b7SBorislav Petkov (AMD) 		pe++;
147*e21279b7SBorislav Petkov (AMD) 		e++;
148*e21279b7SBorislav Petkov (AMD) 
149*e21279b7SBorislav Petkov (AMD) 		desc_entry++;
150*e21279b7SBorislav Petkov (AMD) 		pc->num_entries++;
151*e21279b7SBorislav Petkov (AMD) 		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
152*e21279b7SBorislav Petkov (AMD) 			break;
153*e21279b7SBorislav Petkov (AMD) 	}
154*e21279b7SBorislav Petkov (AMD) 
155*e21279b7SBorislav Petkov (AMD) 	return desc_entry;
156*e21279b7SBorislav Petkov (AMD) }
157*e21279b7SBorislav Petkov (AMD) 
158*e21279b7SBorislav Petkov (AMD) static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
159*e21279b7SBorislav Petkov (AMD) {
160*e21279b7SBorislav Petkov (AMD) 	unsigned int page_size;
161*e21279b7SBorislav Petkov (AMD) 	bool action;
162*e21279b7SBorislav Petkov (AMD) 	u64 pfn;
163*e21279b7SBorislav Petkov (AMD) 
164*e21279b7SBorislav Petkov (AMD) 	pfn = pc->entry[pc->cur_index].pfn;
165*e21279b7SBorislav Petkov (AMD) 	action = pc->entry[pc->cur_index].action;
166*e21279b7SBorislav Petkov (AMD) 	page_size = pc->entry[pc->cur_index].page_size;
167*e21279b7SBorislav Petkov (AMD) 
168*e21279b7SBorislav Petkov (AMD) 	__pval_terminate(pfn, action, page_size, ret, svsm_ret);
169*e21279b7SBorislav Petkov (AMD) }
170*e21279b7SBorislav Petkov (AMD) 
171*e21279b7SBorislav Petkov (AMD) void svsm_pval_pages(struct snp_psc_desc *desc)
172*e21279b7SBorislav Petkov (AMD) {
173*e21279b7SBorislav Petkov (AMD) 	struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
174*e21279b7SBorislav Petkov (AMD) 	unsigned int i, pv_4k_count = 0;
175*e21279b7SBorislav Petkov (AMD) 	struct svsm_pvalidate_call *pc;
176*e21279b7SBorislav Petkov (AMD) 	struct svsm_call call = {};
177*e21279b7SBorislav Petkov (AMD) 	unsigned long flags;
178*e21279b7SBorislav Petkov (AMD) 	bool action;
179*e21279b7SBorislav Petkov (AMD) 	u64 pc_pa;
180*e21279b7SBorislav Petkov (AMD) 	int ret;
181*e21279b7SBorislav Petkov (AMD) 
182*e21279b7SBorislav Petkov (AMD) 	/*
183*e21279b7SBorislav Petkov (AMD) 	 * This can be called very early in the boot, use native functions in
184*e21279b7SBorislav Petkov (AMD) 	 * order to avoid paravirt issues.
185*e21279b7SBorislav Petkov (AMD) 	 */
186*e21279b7SBorislav Petkov (AMD) 	flags = native_local_irq_save();
187*e21279b7SBorislav Petkov (AMD) 
188*e21279b7SBorislav Petkov (AMD) 	/*
189*e21279b7SBorislav Petkov (AMD) 	 * The SVSM calling area (CA) can support processing 510 entries at a
190*e21279b7SBorislav Petkov (AMD) 	 * time. Loop through the Page State Change descriptor until the CA is
191*e21279b7SBorislav Petkov (AMD) 	 * full or the last entry in the descriptor is reached, at which time
192*e21279b7SBorislav Petkov (AMD) 	 * the SVSM is invoked. This repeats until all entries in the descriptor
193*e21279b7SBorislav Petkov (AMD) 	 * are processed.
194*e21279b7SBorislav Petkov (AMD) 	 */
195*e21279b7SBorislav Petkov (AMD) 	call.caa = svsm_get_caa();
196*e21279b7SBorislav Petkov (AMD) 
197*e21279b7SBorislav Petkov (AMD) 	pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
198*e21279b7SBorislav Petkov (AMD) 	pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
199*e21279b7SBorislav Petkov (AMD) 
200*e21279b7SBorislav Petkov (AMD) 	/* Protocol 0, Call ID 1 */
201*e21279b7SBorislav Petkov (AMD) 	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
202*e21279b7SBorislav Petkov (AMD) 	call.rcx = pc_pa;
203*e21279b7SBorislav Petkov (AMD) 
204*e21279b7SBorislav Petkov (AMD) 	for (i = 0; i <= desc->hdr.end_entry;) {
205*e21279b7SBorislav Petkov (AMD) 		i = svsm_build_ca_from_psc_desc(desc, i, pc);
206*e21279b7SBorislav Petkov (AMD) 
207*e21279b7SBorislav Petkov (AMD) 		do {
208*e21279b7SBorislav Petkov (AMD) 			ret = svsm_perform_call_protocol(&call);
209*e21279b7SBorislav Petkov (AMD) 			if (!ret)
210*e21279b7SBorislav Petkov (AMD) 				continue;
211*e21279b7SBorislav Petkov (AMD) 
212*e21279b7SBorislav Petkov (AMD) 			/*
213*e21279b7SBorislav Petkov (AMD) 			 * Check if the entry failed because of an RMP mismatch (a
214*e21279b7SBorislav Petkov (AMD) 			 * PVALIDATE at 2M was requested, but the page is mapped in
215*e21279b7SBorislav Petkov (AMD) 			 * the RMP as 4K).
216*e21279b7SBorislav Petkov (AMD) 			 */
217*e21279b7SBorislav Petkov (AMD) 
218*e21279b7SBorislav Petkov (AMD) 			if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
219*e21279b7SBorislav Petkov (AMD) 			    pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
220*e21279b7SBorislav Petkov (AMD) 				/* Save this entry for post-processing at 4K */
221*e21279b7SBorislav Petkov (AMD) 				pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
222*e21279b7SBorislav Petkov (AMD) 
223*e21279b7SBorislav Petkov (AMD) 				/* Skip to the next one unless at the end of the list */
224*e21279b7SBorislav Petkov (AMD) 				pc->cur_index++;
225*e21279b7SBorislav Petkov (AMD) 				if (pc->cur_index < pc->num_entries)
226*e21279b7SBorislav Petkov (AMD) 					ret = -EAGAIN;
227*e21279b7SBorislav Petkov (AMD) 				else
228*e21279b7SBorislav Petkov (AMD) 					ret = 0;
229*e21279b7SBorislav Petkov (AMD) 			}
230*e21279b7SBorislav Petkov (AMD) 		} while (ret == -EAGAIN);
231*e21279b7SBorislav Petkov (AMD) 
232*e21279b7SBorislav Petkov (AMD) 		if (ret)
233*e21279b7SBorislav Petkov (AMD) 			svsm_pval_terminate(pc, ret, call.rax_out);
234*e21279b7SBorislav Petkov (AMD) 	}
235*e21279b7SBorislav Petkov (AMD) 
236*e21279b7SBorislav Petkov (AMD) 	/* Process any entries that failed to be validated at 2M and validate them at 4K */
237*e21279b7SBorislav Petkov (AMD) 	for (i = 0; i < pv_4k_count; i++) {
238*e21279b7SBorislav Petkov (AMD) 		u64 pfn, pfn_end;
239*e21279b7SBorislav Petkov (AMD) 
240*e21279b7SBorislav Petkov (AMD) 		action  = pv_4k[i].action;
241*e21279b7SBorislav Petkov (AMD) 		pfn     = pv_4k[i].pfn;
242*e21279b7SBorislav Petkov (AMD) 		pfn_end = pfn + 512;
243*e21279b7SBorislav Petkov (AMD) 
244*e21279b7SBorislav Petkov (AMD) 		while (pfn < pfn_end) {
245*e21279b7SBorislav Petkov (AMD) 			pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
246*e21279b7SBorislav Petkov (AMD) 
247*e21279b7SBorislav Petkov (AMD) 			ret = svsm_perform_call_protocol(&call);
248*e21279b7SBorislav Petkov (AMD) 			if (ret)
249*e21279b7SBorislav Petkov (AMD) 				svsm_pval_terminate(pc, ret, call.rax_out);
250*e21279b7SBorislav Petkov (AMD) 		}
251*e21279b7SBorislav Petkov (AMD) 	}
252*e21279b7SBorislav Petkov (AMD) 
253*e21279b7SBorislav Petkov (AMD) 	native_local_irq_restore(flags);
254*e21279b7SBorislav Petkov (AMD) }
255*e21279b7SBorislav Petkov (AMD) 
256*e21279b7SBorislav Petkov (AMD) static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
257*e21279b7SBorislav Petkov (AMD) {
258*e21279b7SBorislav Petkov (AMD) 	/* If (new) lengths have been returned, propagate them up */
259*e21279b7SBorislav Petkov (AMD) 	if (call->rcx_out != call->rcx)
260*e21279b7SBorislav Petkov (AMD) 		input->manifest_buf.len = call->rcx_out;
261*e21279b7SBorislav Petkov (AMD) 
262*e21279b7SBorislav Petkov (AMD) 	if (call->rdx_out != call->rdx)
263*e21279b7SBorislav Petkov (AMD) 		input->certificates_buf.len = call->rdx_out;
264*e21279b7SBorislav Petkov (AMD) 
265*e21279b7SBorislav Petkov (AMD) 	if (call->r8_out != call->r8)
266*e21279b7SBorislav Petkov (AMD) 		input->report_buf.len = call->r8_out;
267*e21279b7SBorislav Petkov (AMD) }
268*e21279b7SBorislav Petkov (AMD) 
269*e21279b7SBorislav Petkov (AMD) int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
270*e21279b7SBorislav Petkov (AMD) 			      struct svsm_attest_call *input)
271*e21279b7SBorislav Petkov (AMD) {
272*e21279b7SBorislav Petkov (AMD) 	struct svsm_attest_call *ac;
273*e21279b7SBorislav Petkov (AMD) 	unsigned long flags;
274*e21279b7SBorislav Petkov (AMD) 	u64 attest_call_pa;
275*e21279b7SBorislav Petkov (AMD) 	int ret;
276*e21279b7SBorislav Petkov (AMD) 
277*e21279b7SBorislav Petkov (AMD) 	if (!snp_vmpl)
278*e21279b7SBorislav Petkov (AMD) 		return -EINVAL;
279*e21279b7SBorislav Petkov (AMD) 
280*e21279b7SBorislav Petkov (AMD) 	local_irq_save(flags);
281*e21279b7SBorislav Petkov (AMD) 
282*e21279b7SBorislav Petkov (AMD) 	call->caa = svsm_get_caa();
283*e21279b7SBorislav Petkov (AMD) 
284*e21279b7SBorislav Petkov (AMD) 	ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
285*e21279b7SBorislav Petkov (AMD) 	attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
286*e21279b7SBorislav Petkov (AMD) 
287*e21279b7SBorislav Petkov (AMD) 	*ac = *input;
288*e21279b7SBorislav Petkov (AMD) 
289*e21279b7SBorislav Petkov (AMD) 	/*
290*e21279b7SBorislav Petkov (AMD) 	 * Set input registers for the request and set RDX and R8 to known
291*e21279b7SBorislav Petkov (AMD) 	 * values in order to detect length values being returned in them.
292*e21279b7SBorislav Petkov (AMD) 	 */
293*e21279b7SBorislav Petkov (AMD) 	call->rax = call_id;
294*e21279b7SBorislav Petkov (AMD) 	call->rcx = attest_call_pa;
295*e21279b7SBorislav Petkov (AMD) 	call->rdx = -1;
296*e21279b7SBorislav Petkov (AMD) 	call->r8 = -1;
297*e21279b7SBorislav Petkov (AMD) 	ret = svsm_perform_call_protocol(call);
298*e21279b7SBorislav Petkov (AMD) 	update_attest_input(call, input);
299*e21279b7SBorislav Petkov (AMD) 
300*e21279b7SBorislav Petkov (AMD) 	local_irq_restore(flags);
301*e21279b7SBorislav Petkov (AMD) 
302*e21279b7SBorislav Petkov (AMD) 	return ret;
303*e21279b7SBorislav Petkov (AMD) }
304*e21279b7SBorislav Petkov (AMD) EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
305*e21279b7SBorislav Petkov (AMD) 
306*e21279b7SBorislav Petkov (AMD) /**
307*e21279b7SBorislav Petkov (AMD)  * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM
308*e21279b7SBorislav Petkov (AMD)  * @buffer: A buffer used to both send the command and receive the response.
309*e21279b7SBorislav Petkov (AMD)  *
310*e21279b7SBorislav Petkov (AMD)  * Execute a SVSM_VTPM_CMD call as defined by
311*e21279b7SBorislav Petkov (AMD)  * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00
312*e21279b7SBorislav Petkov (AMD)  *
313*e21279b7SBorislav Petkov (AMD)  * All command request/response buffers have a common structure as specified by
314*e21279b7SBorislav Petkov (AMD)  * the following table:
315*e21279b7SBorislav Petkov (AMD)  *     Byte      Size       In/Out    Description
316*e21279b7SBorislav Petkov (AMD)  *     Offset    (Bytes)
317*e21279b7SBorislav Petkov (AMD)  *     0x000     4          In        Platform command
318*e21279b7SBorislav Petkov (AMD)  *                          Out       Platform command response size
319*e21279b7SBorislav Petkov (AMD)  *
320*e21279b7SBorislav Petkov (AMD)  * Each command can build upon this common request/response structure to create
321*e21279b7SBorislav Petkov (AMD)  * a structure specific to the command. See include/linux/tpm_svsm.h for more
322*e21279b7SBorislav Petkov (AMD)  * details.
323*e21279b7SBorislav Petkov (AMD)  *
324*e21279b7SBorislav Petkov (AMD)  * Return: 0 on success, -errno on failure
325*e21279b7SBorislav Petkov (AMD)  */
326*e21279b7SBorislav Petkov (AMD) int snp_svsm_vtpm_send_command(u8 *buffer)
327*e21279b7SBorislav Petkov (AMD) {
328*e21279b7SBorislav Petkov (AMD) 	struct svsm_call call = {};
329*e21279b7SBorislav Petkov (AMD) 
330*e21279b7SBorislav Petkov (AMD) 	call.caa = svsm_get_caa();
331*e21279b7SBorislav Petkov (AMD) 	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD);
332*e21279b7SBorislav Petkov (AMD) 	call.rcx = __pa(buffer);
333*e21279b7SBorislav Petkov (AMD) 
334*e21279b7SBorislav Petkov (AMD) 	return svsm_perform_call_protocol(&call);
335*e21279b7SBorislav Petkov (AMD) }
336*e21279b7SBorislav Petkov (AMD) EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command);
337*e21279b7SBorislav Petkov (AMD) 
338*e21279b7SBorislav Petkov (AMD) /**
339*e21279b7SBorislav Petkov (AMD)  * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device
340*e21279b7SBorislav Petkov (AMD)  *
341*e21279b7SBorislav Petkov (AMD)  * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND
342*e21279b7SBorislav Petkov (AMD)  * which is the only request used so far.
343*e21279b7SBorislav Petkov (AMD)  *
344*e21279b7SBorislav Petkov (AMD)  * Return: true if the platform provides a vTPM SVSM device, false otherwise.
345*e21279b7SBorislav Petkov (AMD)  */
346*e21279b7SBorislav Petkov (AMD) bool snp_svsm_vtpm_probe(void)
347*e21279b7SBorislav Petkov (AMD) {
348*e21279b7SBorislav Petkov (AMD) 	struct svsm_call call = {};
349*e21279b7SBorislav Petkov (AMD) 
350*e21279b7SBorislav Petkov (AMD) 	/* The vTPM device is available only if a SVSM is present */
351*e21279b7SBorislav Petkov (AMD) 	if (!snp_vmpl)
352*e21279b7SBorislav Petkov (AMD) 		return false;
353*e21279b7SBorislav Petkov (AMD) 
354*e21279b7SBorislav Petkov (AMD) 	call.caa = svsm_get_caa();
355*e21279b7SBorislav Petkov (AMD) 	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY);
356*e21279b7SBorislav Petkov (AMD) 
357*e21279b7SBorislav Petkov (AMD) 	if (svsm_perform_call_protocol(&call))
358*e21279b7SBorislav Petkov (AMD) 		return false;
359*e21279b7SBorislav Petkov (AMD) 
360*e21279b7SBorislav Petkov (AMD) 	/* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */
361*e21279b7SBorislav Petkov (AMD) 	return call.rcx_out & BIT_ULL(8);
362*e21279b7SBorislav Petkov (AMD) }
363