xref: /linux/arch/x86/hyperv/ivm.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 
21 #ifdef CONFIG_AMD_MEM_ENCRYPT
22 
23 #define GHCB_USAGE_HYPERV_CALL	1
24 
25 union hv_ghcb {
26 	struct ghcb ghcb;
27 	struct {
28 		u64 hypercalldata[509];
29 		u64 outputgpa;
30 		union {
31 			union {
32 				struct {
33 					u32 callcode        : 16;
34 					u32 isfast          : 1;
35 					u32 reserved1       : 14;
36 					u32 isnested        : 1;
37 					u32 countofelements : 12;
38 					u32 reserved2       : 4;
39 					u32 repstartindex   : 12;
40 					u32 reserved3       : 4;
41 				};
42 				u64 asuint64;
43 			} hypercallinput;
44 			union {
45 				struct {
46 					u16 callstatus;
47 					u16 reserved1;
48 					u32 elementsprocessed : 12;
49 					u32 reserved2         : 20;
50 				};
51 				u64 asunit64;
52 			} hypercalloutput;
53 		};
54 		u64 reserved2;
55 	} hypercall;
56 } __packed __aligned(HV_HYP_PAGE_SIZE);
57 
58 static u16 hv_ghcb_version __ro_after_init;
59 
60 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
61 {
62 	union hv_ghcb *hv_ghcb;
63 	void **ghcb_base;
64 	unsigned long flags;
65 	u64 status;
66 
67 	if (!hv_ghcb_pg)
68 		return -EFAULT;
69 
70 	WARN_ON(in_nmi());
71 
72 	local_irq_save(flags);
73 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
74 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
75 	if (!hv_ghcb) {
76 		local_irq_restore(flags);
77 		return -EFAULT;
78 	}
79 
80 	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
81 	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
82 
83 	hv_ghcb->hypercall.outputgpa = (u64)output;
84 	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
85 	hv_ghcb->hypercall.hypercallinput.callcode = control;
86 
87 	if (input_size)
88 		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
89 
90 	VMGEXIT();
91 
92 	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
93 	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
94 	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
95 
96 	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
97 
98 	local_irq_restore(flags);
99 
100 	return status;
101 }
102 
103 static inline u64 rd_ghcb_msr(void)
104 {
105 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
106 }
107 
108 static inline void wr_ghcb_msr(u64 val)
109 {
110 	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
111 }
112 
113 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
114 				   u64 exit_info_1, u64 exit_info_2)
115 {
116 	/* Fill in protocol and format specifiers */
117 	ghcb->protocol_version = hv_ghcb_version;
118 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
119 
120 	ghcb_set_sw_exit_code(ghcb, exit_code);
121 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
122 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
123 
124 	VMGEXIT();
125 
126 	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
127 		return ES_VMM_ERROR;
128 	else
129 		return ES_OK;
130 }
131 
132 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
133 {
134 	u64 val = GHCB_MSR_TERM_REQ;
135 
136 	/* Tell the hypervisor what went wrong. */
137 	val |= GHCB_SEV_TERM_REASON(set, reason);
138 
139 	/* Request Guest Termination from Hypvervisor */
140 	wr_ghcb_msr(val);
141 	VMGEXIT();
142 
143 	while (true)
144 		asm volatile("hlt\n" : : : "memory");
145 }
146 
147 bool hv_ghcb_negotiate_protocol(void)
148 {
149 	u64 ghcb_gpa;
150 	u64 val;
151 
152 	/* Save ghcb page gpa. */
153 	ghcb_gpa = rd_ghcb_msr();
154 
155 	/* Do the GHCB protocol version negotiation */
156 	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
157 	VMGEXIT();
158 	val = rd_ghcb_msr();
159 
160 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
161 		return false;
162 
163 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
164 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
165 		return false;
166 
167 	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
168 			     GHCB_PROTOCOL_MAX);
169 
170 	/* Write ghcb page back after negotiating protocol. */
171 	wr_ghcb_msr(ghcb_gpa);
172 	VMGEXIT();
173 
174 	return true;
175 }
176 
177 void hv_ghcb_msr_write(u64 msr, u64 value)
178 {
179 	union hv_ghcb *hv_ghcb;
180 	void **ghcb_base;
181 	unsigned long flags;
182 
183 	if (!hv_ghcb_pg)
184 		return;
185 
186 	WARN_ON(in_nmi());
187 
188 	local_irq_save(flags);
189 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
190 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
191 	if (!hv_ghcb) {
192 		local_irq_restore(flags);
193 		return;
194 	}
195 
196 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
197 	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
198 	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
199 
200 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
201 		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
202 
203 	local_irq_restore(flags);
204 }
205 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
206 
207 void hv_ghcb_msr_read(u64 msr, u64 *value)
208 {
209 	union hv_ghcb *hv_ghcb;
210 	void **ghcb_base;
211 	unsigned long flags;
212 
213 	/* Check size of union hv_ghcb here. */
214 	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
215 
216 	if (!hv_ghcb_pg)
217 		return;
218 
219 	WARN_ON(in_nmi());
220 
221 	local_irq_save(flags);
222 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
223 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
224 	if (!hv_ghcb) {
225 		local_irq_restore(flags);
226 		return;
227 	}
228 
229 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
230 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
231 		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
232 	else
233 		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
234 			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
235 	local_irq_restore(flags);
236 }
237 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
238 
239 /*
240  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
241  *
242  * In Isolation VM, all guest memory is encrypted from host and guest
243  * needs to set memory visible to host via hvcall before sharing memory
244  * with host.
245  */
246 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
247 			   enum hv_mem_host_visibility visibility)
248 {
249 	struct hv_gpa_range_for_visibility **input_pcpu, *input;
250 	u16 pages_processed;
251 	u64 hv_status;
252 	unsigned long flags;
253 
254 	/* no-op if partition isolation is not enabled */
255 	if (!hv_is_isolation_supported())
256 		return 0;
257 
258 	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
259 		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
260 			HV_MAX_MODIFY_GPA_REP_COUNT);
261 		return -EINVAL;
262 	}
263 
264 	local_irq_save(flags);
265 	input_pcpu = (struct hv_gpa_range_for_visibility **)
266 			this_cpu_ptr(hyperv_pcpu_input_arg);
267 	input = *input_pcpu;
268 	if (unlikely(!input)) {
269 		local_irq_restore(flags);
270 		return -EINVAL;
271 	}
272 
273 	input->partition_id = HV_PARTITION_ID_SELF;
274 	input->host_visibility = visibility;
275 	input->reserved0 = 0;
276 	input->reserved1 = 0;
277 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
278 	hv_status = hv_do_rep_hypercall(
279 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
280 			0, input, &pages_processed);
281 	local_irq_restore(flags);
282 
283 	if (hv_result_success(hv_status))
284 		return 0;
285 	else
286 		return -EFAULT;
287 }
288 
289 /*
290  * hv_vtom_set_host_visibility - Set specified memory visible to host.
291  *
292  * In Isolation VM, all guest memory is encrypted from host and guest
293  * needs to set memory visible to host via hvcall before sharing memory
294  * with host. This function works as wrap of hv_mark_gpa_visibility()
295  * with memory base and size.
296  */
297 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
298 {
299 	enum hv_mem_host_visibility visibility = enc ?
300 			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
301 	u64 *pfn_array;
302 	int ret = 0;
303 	bool result = true;
304 	int i, pfn;
305 
306 	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
307 	if (!pfn_array)
308 		return false;
309 
310 	for (i = 0, pfn = 0; i < pagecount; i++) {
311 		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
312 		pfn++;
313 
314 		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
315 			ret = hv_mark_gpa_visibility(pfn, pfn_array,
316 						     visibility);
317 			if (ret) {
318 				result = false;
319 				goto err_free_pfn_array;
320 			}
321 			pfn = 0;
322 		}
323 	}
324 
325  err_free_pfn_array:
326 	kfree(pfn_array);
327 	return result;
328 }
329 
330 static bool hv_vtom_tlb_flush_required(bool private)
331 {
332 	return true;
333 }
334 
335 static bool hv_vtom_cache_flush_required(void)
336 {
337 	return false;
338 }
339 
340 static bool hv_is_private_mmio(u64 addr)
341 {
342 	/*
343 	 * Hyper-V always provides a single IO-APIC in a guest VM.
344 	 * When a paravisor is used, it is emulated by the paravisor
345 	 * in the guest context and must be mapped private.
346 	 */
347 	if (addr >= HV_IOAPIC_BASE_ADDRESS &&
348 	    addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
349 		return true;
350 
351 	/* Same with a vTPM */
352 	if (addr >= VTPM_BASE_ADDRESS &&
353 	    addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
354 		return true;
355 
356 	return false;
357 }
358 
359 void __init hv_vtom_init(void)
360 {
361 	/*
362 	 * By design, a VM using vTOM doesn't see the SEV setting,
363 	 * so SEV initialization is bypassed and sev_status isn't set.
364 	 * Set it here to indicate a vTOM VM.
365 	 */
366 	sev_status = MSR_AMD64_SNP_VTOM;
367 	cc_set_vendor(CC_VENDOR_AMD);
368 	cc_set_mask(ms_hyperv.shared_gpa_boundary);
369 	physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
370 
371 	x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
372 	x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
373 	x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
374 	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
375 }
376 
377 #endif /* CONFIG_AMD_MEM_ENCRYPT */
378 
379 enum hv_isolation_type hv_get_isolation_type(void)
380 {
381 	if (!(ms_hyperv.priv_high & HV_ISOLATION))
382 		return HV_ISOLATION_TYPE_NONE;
383 	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
384 }
385 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
386 
387 /*
388  * hv_is_isolation_supported - Check system runs in the Hyper-V
389  * isolation VM.
390  */
391 bool hv_is_isolation_supported(void)
392 {
393 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
394 		return false;
395 
396 	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
397 		return false;
398 
399 	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
400 }
401 
402 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
403 
404 /*
405  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
406  * isolation VM.
407  */
408 bool hv_isolation_type_snp(void)
409 {
410 	return static_branch_unlikely(&isolation_type_snp);
411 }
412