xref: /linux/arch/x86/hyperv/ivm.c (revision bf80eef2212a1e8451df13b52533f4bc31bb4f8e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/mshyperv.h>
17 #include <asm/hypervisor.h>
18 
19 #ifdef CONFIG_AMD_MEM_ENCRYPT
20 
21 #define GHCB_USAGE_HYPERV_CALL	1
22 
23 union hv_ghcb {
24 	struct ghcb ghcb;
25 	struct {
26 		u64 hypercalldata[509];
27 		u64 outputgpa;
28 		union {
29 			union {
30 				struct {
31 					u32 callcode        : 16;
32 					u32 isfast          : 1;
33 					u32 reserved1       : 14;
34 					u32 isnested        : 1;
35 					u32 countofelements : 12;
36 					u32 reserved2       : 4;
37 					u32 repstartindex   : 12;
38 					u32 reserved3       : 4;
39 				};
40 				u64 asuint64;
41 			} hypercallinput;
42 			union {
43 				struct {
44 					u16 callstatus;
45 					u16 reserved1;
46 					u32 elementsprocessed : 12;
47 					u32 reserved2         : 20;
48 				};
49 				u64 asunit64;
50 			} hypercalloutput;
51 		};
52 		u64 reserved2;
53 	} hypercall;
54 } __packed __aligned(HV_HYP_PAGE_SIZE);
55 
56 static u16 hv_ghcb_version __ro_after_init;
57 
58 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
59 {
60 	union hv_ghcb *hv_ghcb;
61 	void **ghcb_base;
62 	unsigned long flags;
63 	u64 status;
64 
65 	if (!hv_ghcb_pg)
66 		return -EFAULT;
67 
68 	WARN_ON(in_nmi());
69 
70 	local_irq_save(flags);
71 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
72 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
73 	if (!hv_ghcb) {
74 		local_irq_restore(flags);
75 		return -EFAULT;
76 	}
77 
78 	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
79 	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
80 
81 	hv_ghcb->hypercall.outputgpa = (u64)output;
82 	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
83 	hv_ghcb->hypercall.hypercallinput.callcode = control;
84 
85 	if (input_size)
86 		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
87 
88 	VMGEXIT();
89 
90 	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
91 	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
92 	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
93 
94 	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
95 
96 	local_irq_restore(flags);
97 
98 	return status;
99 }
100 
101 static inline u64 rd_ghcb_msr(void)
102 {
103 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
104 }
105 
106 static inline void wr_ghcb_msr(u64 val)
107 {
108 	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
109 }
110 
111 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
112 				   u64 exit_info_1, u64 exit_info_2)
113 {
114 	/* Fill in protocol and format specifiers */
115 	ghcb->protocol_version = hv_ghcb_version;
116 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
117 
118 	ghcb_set_sw_exit_code(ghcb, exit_code);
119 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
120 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
121 
122 	VMGEXIT();
123 
124 	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
125 		return ES_VMM_ERROR;
126 	else
127 		return ES_OK;
128 }
129 
130 void hv_ghcb_terminate(unsigned int set, unsigned int reason)
131 {
132 	u64 val = GHCB_MSR_TERM_REQ;
133 
134 	/* Tell the hypervisor what went wrong. */
135 	val |= GHCB_SEV_TERM_REASON(set, reason);
136 
137 	/* Request Guest Termination from Hypvervisor */
138 	wr_ghcb_msr(val);
139 	VMGEXIT();
140 
141 	while (true)
142 		asm volatile("hlt\n" : : : "memory");
143 }
144 
145 bool hv_ghcb_negotiate_protocol(void)
146 {
147 	u64 ghcb_gpa;
148 	u64 val;
149 
150 	/* Save ghcb page gpa. */
151 	ghcb_gpa = rd_ghcb_msr();
152 
153 	/* Do the GHCB protocol version negotiation */
154 	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
155 	VMGEXIT();
156 	val = rd_ghcb_msr();
157 
158 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
159 		return false;
160 
161 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
162 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
163 		return false;
164 
165 	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
166 			     GHCB_PROTOCOL_MAX);
167 
168 	/* Write ghcb page back after negotiating protocol. */
169 	wr_ghcb_msr(ghcb_gpa);
170 	VMGEXIT();
171 
172 	return true;
173 }
174 
175 void hv_ghcb_msr_write(u64 msr, u64 value)
176 {
177 	union hv_ghcb *hv_ghcb;
178 	void **ghcb_base;
179 	unsigned long flags;
180 
181 	if (!hv_ghcb_pg)
182 		return;
183 
184 	WARN_ON(in_nmi());
185 
186 	local_irq_save(flags);
187 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
188 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
189 	if (!hv_ghcb) {
190 		local_irq_restore(flags);
191 		return;
192 	}
193 
194 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
195 	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
196 	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
197 
198 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
199 		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
200 
201 	local_irq_restore(flags);
202 }
203 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
204 
205 void hv_ghcb_msr_read(u64 msr, u64 *value)
206 {
207 	union hv_ghcb *hv_ghcb;
208 	void **ghcb_base;
209 	unsigned long flags;
210 
211 	/* Check size of union hv_ghcb here. */
212 	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
213 
214 	if (!hv_ghcb_pg)
215 		return;
216 
217 	WARN_ON(in_nmi());
218 
219 	local_irq_save(flags);
220 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
221 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
222 	if (!hv_ghcb) {
223 		local_irq_restore(flags);
224 		return;
225 	}
226 
227 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
228 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
229 		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
230 	else
231 		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
232 			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
233 	local_irq_restore(flags);
234 }
235 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
236 #endif
237 
238 enum hv_isolation_type hv_get_isolation_type(void)
239 {
240 	if (!(ms_hyperv.priv_high & HV_ISOLATION))
241 		return HV_ISOLATION_TYPE_NONE;
242 	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
243 }
244 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
245 
246 /*
247  * hv_is_isolation_supported - Check system runs in the Hyper-V
248  * isolation VM.
249  */
250 bool hv_is_isolation_supported(void)
251 {
252 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
253 		return false;
254 
255 	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
256 		return false;
257 
258 	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
259 }
260 
261 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
262 
263 /*
264  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
265  * isolation VM.
266  */
267 bool hv_isolation_type_snp(void)
268 {
269 	return static_branch_unlikely(&isolation_type_snp);
270 }
271 
272 /*
273  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
274  *
275  * In Isolation VM, all guest memory is encrypted from host and guest
276  * needs to set memory visible to host via hvcall before sharing memory
277  * with host.
278  */
279 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
280 			   enum hv_mem_host_visibility visibility)
281 {
282 	struct hv_gpa_range_for_visibility **input_pcpu, *input;
283 	u16 pages_processed;
284 	u64 hv_status;
285 	unsigned long flags;
286 
287 	/* no-op if partition isolation is not enabled */
288 	if (!hv_is_isolation_supported())
289 		return 0;
290 
291 	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
292 		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
293 			HV_MAX_MODIFY_GPA_REP_COUNT);
294 		return -EINVAL;
295 	}
296 
297 	local_irq_save(flags);
298 	input_pcpu = (struct hv_gpa_range_for_visibility **)
299 			this_cpu_ptr(hyperv_pcpu_input_arg);
300 	input = *input_pcpu;
301 	if (unlikely(!input)) {
302 		local_irq_restore(flags);
303 		return -EINVAL;
304 	}
305 
306 	input->partition_id = HV_PARTITION_ID_SELF;
307 	input->host_visibility = visibility;
308 	input->reserved0 = 0;
309 	input->reserved1 = 0;
310 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
311 	hv_status = hv_do_rep_hypercall(
312 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
313 			0, input, &pages_processed);
314 	local_irq_restore(flags);
315 
316 	if (hv_result_success(hv_status))
317 		return 0;
318 	else
319 		return -EFAULT;
320 }
321 
322 /*
323  * hv_set_mem_host_visibility - Set specified memory visible to host.
324  *
325  * In Isolation VM, all guest memory is encrypted from host and guest
326  * needs to set memory visible to host via hvcall before sharing memory
327  * with host. This function works as wrap of hv_mark_gpa_visibility()
328  * with memory base and size.
329  */
330 int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visible)
331 {
332 	enum hv_mem_host_visibility visibility = visible ?
333 			VMBUS_PAGE_VISIBLE_READ_WRITE : VMBUS_PAGE_NOT_VISIBLE;
334 	u64 *pfn_array;
335 	int ret = 0;
336 	int i, pfn;
337 
338 	if (!hv_is_isolation_supported() || !hv_hypercall_pg)
339 		return 0;
340 
341 	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
342 	if (!pfn_array)
343 		return -ENOMEM;
344 
345 	for (i = 0, pfn = 0; i < pagecount; i++) {
346 		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
347 		pfn++;
348 
349 		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
350 			ret = hv_mark_gpa_visibility(pfn, pfn_array,
351 						     visibility);
352 			if (ret)
353 				goto err_free_pfn_array;
354 			pfn = 0;
355 		}
356 	}
357 
358  err_free_pfn_array:
359 	kfree(pfn_array);
360 	return ret;
361 }
362 
363 /*
364  * hv_map_memory - map memory to extra space in the AMD SEV-SNP Isolation VM.
365  */
366 void *hv_map_memory(void *addr, unsigned long size)
367 {
368 	unsigned long *pfns = kcalloc(size / PAGE_SIZE,
369 				      sizeof(unsigned long), GFP_KERNEL);
370 	void *vaddr;
371 	int i;
372 
373 	if (!pfns)
374 		return NULL;
375 
376 	for (i = 0; i < size / PAGE_SIZE; i++)
377 		pfns[i] = vmalloc_to_pfn(addr + i * PAGE_SIZE) +
378 			(ms_hyperv.shared_gpa_boundary >> PAGE_SHIFT);
379 
380 	vaddr = vmap_pfn(pfns, size / PAGE_SIZE, PAGE_KERNEL_IO);
381 	kfree(pfns);
382 
383 	return vaddr;
384 }
385 
386 void hv_unmap_memory(void *addr)
387 {
388 	vunmap(addr);
389 }
390