xref: /linux/arch/x86/hyperv/ivm.c (revision 2174181019e4273e583a0f0a9795e9db38984784)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 #include <asm/mtrr.h>
21 
22 #ifdef CONFIG_AMD_MEM_ENCRYPT
23 
24 #define GHCB_USAGE_HYPERV_CALL	1
25 
26 union hv_ghcb {
27 	struct ghcb ghcb;
28 	struct {
29 		u64 hypercalldata[509];
30 		u64 outputgpa;
31 		union {
32 			union {
33 				struct {
34 					u32 callcode        : 16;
35 					u32 isfast          : 1;
36 					u32 reserved1       : 14;
37 					u32 isnested        : 1;
38 					u32 countofelements : 12;
39 					u32 reserved2       : 4;
40 					u32 repstartindex   : 12;
41 					u32 reserved3       : 4;
42 				};
43 				u64 asuint64;
44 			} hypercallinput;
45 			union {
46 				struct {
47 					u16 callstatus;
48 					u16 reserved1;
49 					u32 elementsprocessed : 12;
50 					u32 reserved2         : 20;
51 				};
52 				u64 asunit64;
53 			} hypercalloutput;
54 		};
55 		u64 reserved2;
56 	} hypercall;
57 } __packed __aligned(HV_HYP_PAGE_SIZE);
58 
59 static u16 hv_ghcb_version __ro_after_init;
60 
61 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
62 {
63 	union hv_ghcb *hv_ghcb;
64 	void **ghcb_base;
65 	unsigned long flags;
66 	u64 status;
67 
68 	if (!hv_ghcb_pg)
69 		return -EFAULT;
70 
71 	WARN_ON(in_nmi());
72 
73 	local_irq_save(flags);
74 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
75 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
76 	if (!hv_ghcb) {
77 		local_irq_restore(flags);
78 		return -EFAULT;
79 	}
80 
81 	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
82 	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
83 
84 	hv_ghcb->hypercall.outputgpa = (u64)output;
85 	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
86 	hv_ghcb->hypercall.hypercallinput.callcode = control;
87 
88 	if (input_size)
89 		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
90 
91 	VMGEXIT();
92 
93 	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
94 	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
95 	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
96 
97 	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
98 
99 	local_irq_restore(flags);
100 
101 	return status;
102 }
103 
104 static inline u64 rd_ghcb_msr(void)
105 {
106 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
107 }
108 
109 static inline void wr_ghcb_msr(u64 val)
110 {
111 	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
112 }
113 
114 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
115 				   u64 exit_info_1, u64 exit_info_2)
116 {
117 	/* Fill in protocol and format specifiers */
118 	ghcb->protocol_version = hv_ghcb_version;
119 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
120 
121 	ghcb_set_sw_exit_code(ghcb, exit_code);
122 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
123 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
124 
125 	VMGEXIT();
126 
127 	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
128 		return ES_VMM_ERROR;
129 	else
130 		return ES_OK;
131 }
132 
133 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
134 {
135 	u64 val = GHCB_MSR_TERM_REQ;
136 
137 	/* Tell the hypervisor what went wrong. */
138 	val |= GHCB_SEV_TERM_REASON(set, reason);
139 
140 	/* Request Guest Termination from Hypvervisor */
141 	wr_ghcb_msr(val);
142 	VMGEXIT();
143 
144 	while (true)
145 		asm volatile("hlt\n" : : : "memory");
146 }
147 
148 bool hv_ghcb_negotiate_protocol(void)
149 {
150 	u64 ghcb_gpa;
151 	u64 val;
152 
153 	/* Save ghcb page gpa. */
154 	ghcb_gpa = rd_ghcb_msr();
155 
156 	/* Do the GHCB protocol version negotiation */
157 	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
158 	VMGEXIT();
159 	val = rd_ghcb_msr();
160 
161 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
162 		return false;
163 
164 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
165 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
166 		return false;
167 
168 	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
169 			     GHCB_PROTOCOL_MAX);
170 
171 	/* Write ghcb page back after negotiating protocol. */
172 	wr_ghcb_msr(ghcb_gpa);
173 	VMGEXIT();
174 
175 	return true;
176 }
177 
178 void hv_ghcb_msr_write(u64 msr, u64 value)
179 {
180 	union hv_ghcb *hv_ghcb;
181 	void **ghcb_base;
182 	unsigned long flags;
183 
184 	if (!hv_ghcb_pg)
185 		return;
186 
187 	WARN_ON(in_nmi());
188 
189 	local_irq_save(flags);
190 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
191 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
192 	if (!hv_ghcb) {
193 		local_irq_restore(flags);
194 		return;
195 	}
196 
197 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
198 	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
199 	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
200 
201 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
202 		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
203 
204 	local_irq_restore(flags);
205 }
206 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
207 
208 void hv_ghcb_msr_read(u64 msr, u64 *value)
209 {
210 	union hv_ghcb *hv_ghcb;
211 	void **ghcb_base;
212 	unsigned long flags;
213 
214 	/* Check size of union hv_ghcb here. */
215 	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
216 
217 	if (!hv_ghcb_pg)
218 		return;
219 
220 	WARN_ON(in_nmi());
221 
222 	local_irq_save(flags);
223 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
224 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
225 	if (!hv_ghcb) {
226 		local_irq_restore(flags);
227 		return;
228 	}
229 
230 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
231 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
232 		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
233 	else
234 		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
235 			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
236 	local_irq_restore(flags);
237 }
238 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
239 
240 /*
241  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
242  *
243  * In Isolation VM, all guest memory is encrypted from host and guest
244  * needs to set memory visible to host via hvcall before sharing memory
245  * with host.
246  */
247 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
248 			   enum hv_mem_host_visibility visibility)
249 {
250 	struct hv_gpa_range_for_visibility *input;
251 	u16 pages_processed;
252 	u64 hv_status;
253 	unsigned long flags;
254 
255 	/* no-op if partition isolation is not enabled */
256 	if (!hv_is_isolation_supported())
257 		return 0;
258 
259 	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
260 		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
261 			HV_MAX_MODIFY_GPA_REP_COUNT);
262 		return -EINVAL;
263 	}
264 
265 	local_irq_save(flags);
266 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
267 
268 	if (unlikely(!input)) {
269 		local_irq_restore(flags);
270 		return -EINVAL;
271 	}
272 
273 	input->partition_id = HV_PARTITION_ID_SELF;
274 	input->host_visibility = visibility;
275 	input->reserved0 = 0;
276 	input->reserved1 = 0;
277 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
278 	hv_status = hv_do_rep_hypercall(
279 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
280 			0, input, &pages_processed);
281 	local_irq_restore(flags);
282 
283 	if (hv_result_success(hv_status))
284 		return 0;
285 	else
286 		return -EFAULT;
287 }
288 
289 /*
290  * hv_vtom_set_host_visibility - Set specified memory visible to host.
291  *
292  * In Isolation VM, all guest memory is encrypted from host and guest
293  * needs to set memory visible to host via hvcall before sharing memory
294  * with host. This function works as wrap of hv_mark_gpa_visibility()
295  * with memory base and size.
296  */
297 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
298 {
299 	enum hv_mem_host_visibility visibility = enc ?
300 			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
301 	u64 *pfn_array;
302 	int ret = 0;
303 	bool result = true;
304 	int i, pfn;
305 
306 	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
307 	if (!pfn_array)
308 		return false;
309 
310 	for (i = 0, pfn = 0; i < pagecount; i++) {
311 		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
312 		pfn++;
313 
314 		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
315 			ret = hv_mark_gpa_visibility(pfn, pfn_array,
316 						     visibility);
317 			if (ret) {
318 				result = false;
319 				goto err_free_pfn_array;
320 			}
321 			pfn = 0;
322 		}
323 	}
324 
325  err_free_pfn_array:
326 	kfree(pfn_array);
327 	return result;
328 }
329 
330 static bool hv_vtom_tlb_flush_required(bool private)
331 {
332 	return true;
333 }
334 
335 static bool hv_vtom_cache_flush_required(void)
336 {
337 	return false;
338 }
339 
340 static bool hv_is_private_mmio(u64 addr)
341 {
342 	/*
343 	 * Hyper-V always provides a single IO-APIC in a guest VM.
344 	 * When a paravisor is used, it is emulated by the paravisor
345 	 * in the guest context and must be mapped private.
346 	 */
347 	if (addr >= HV_IOAPIC_BASE_ADDRESS &&
348 	    addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
349 		return true;
350 
351 	/* Same with a vTPM */
352 	if (addr >= VTPM_BASE_ADDRESS &&
353 	    addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
354 		return true;
355 
356 	return false;
357 }
358 
359 void __init hv_vtom_init(void)
360 {
361 	/*
362 	 * By design, a VM using vTOM doesn't see the SEV setting,
363 	 * so SEV initialization is bypassed and sev_status isn't set.
364 	 * Set it here to indicate a vTOM VM.
365 	 */
366 	sev_status = MSR_AMD64_SNP_VTOM;
367 	cc_vendor = CC_VENDOR_AMD;
368 	cc_set_mask(ms_hyperv.shared_gpa_boundary);
369 	physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
370 
371 	x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
372 	x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
373 	x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
374 	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
375 
376 	/* Set WB as the default cache mode. */
377 	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
378 }
379 
380 #endif /* CONFIG_AMD_MEM_ENCRYPT */
381 
382 enum hv_isolation_type hv_get_isolation_type(void)
383 {
384 	if (!(ms_hyperv.priv_high & HV_ISOLATION))
385 		return HV_ISOLATION_TYPE_NONE;
386 	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
387 }
388 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
389 
390 /*
391  * hv_is_isolation_supported - Check system runs in the Hyper-V
392  * isolation VM.
393  */
394 bool hv_is_isolation_supported(void)
395 {
396 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
397 		return false;
398 
399 	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
400 		return false;
401 
402 	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
403 }
404 
405 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
406 
407 /*
408  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
409  * isolation VM.
410  */
411 bool hv_isolation_type_snp(void)
412 {
413 	return static_branch_unlikely(&isolation_type_snp);
414 }
415