xref: /linux/drivers/hv/mshv_root_hv_call.c (revision d31558c077d8be422b65e97974017c030b4bd91a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Microsoft Corporation.
4  *
5  * Hypercall helper functions used by the mshv_root module.
6  *
7  * Authors: Microsoft Linux virtualization team
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/export.h>
13 #include <asm/mshyperv.h>
14 
15 #include "mshv_root.h"
16 
17 /* Determined empirically */
18 #define HV_INIT_PARTITION_DEPOSIT_PAGES 208
19 #define HV_MAP_GPA_DEPOSIT_PAGES	256
20 #define HV_UMAP_GPA_PAGES		512
21 
22 #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
23 
24 #define HV_WITHDRAW_BATCH_SIZE	(HV_HYP_PAGE_SIZE / sizeof(u64))
25 #define HV_MAP_GPA_BATCH_SIZE	\
26 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
27 		/ sizeof(u64))
28 #define HV_GET_VP_STATE_BATCH_SIZE	\
29 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
30 		/ sizeof(u64))
31 #define HV_SET_VP_STATE_BATCH_SIZE	\
32 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
33 		/ sizeof(u64))
34 #define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE	\
35 	((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
36 		/ sizeof(union hv_gpa_page_access_state))
37 #define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT		       \
38 	((HV_HYP_PAGE_SIZE -						       \
39 	  sizeof(struct hv_input_modify_sparse_spa_page_host_access)) /        \
40 	 sizeof(u64))
41 
hv_call_withdraw_memory(u64 count,int node,u64 partition_id)42 int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
43 {
44 	struct hv_input_withdraw_memory *input_page;
45 	struct hv_output_withdraw_memory *output_page;
46 	struct page *page;
47 	u16 completed;
48 	unsigned long remaining = count;
49 	u64 status;
50 	int i;
51 	unsigned long flags;
52 
53 	page = alloc_page(GFP_KERNEL);
54 	if (!page)
55 		return -ENOMEM;
56 	output_page = page_address(page);
57 
58 	while (remaining) {
59 		local_irq_save(flags);
60 
61 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
62 
63 		memset(input_page, 0, sizeof(*input_page));
64 		input_page->partition_id = partition_id;
65 		status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
66 					     min(remaining, HV_WITHDRAW_BATCH_SIZE),
67 					     0, input_page, output_page);
68 
69 		local_irq_restore(flags);
70 
71 		completed = hv_repcomp(status);
72 
73 		for (i = 0; i < completed; i++)
74 			__free_page(pfn_to_page(output_page->gpa_page_list[i]));
75 
76 		if (!hv_result_success(status)) {
77 			if (hv_result(status) == HV_STATUS_NO_RESOURCES)
78 				status = HV_STATUS_SUCCESS;
79 			break;
80 		}
81 
82 		remaining -= completed;
83 	}
84 	free_page((unsigned long)output_page);
85 
86 	return hv_result_to_errno(status);
87 }
88 
hv_call_create_partition(u64 flags,struct hv_partition_creation_properties creation_properties,union hv_partition_isolation_properties isolation_properties,u64 * partition_id)89 int hv_call_create_partition(u64 flags,
90 			     struct hv_partition_creation_properties creation_properties,
91 			     union hv_partition_isolation_properties isolation_properties,
92 			     u64 *partition_id)
93 {
94 	struct hv_input_create_partition *input;
95 	struct hv_output_create_partition *output;
96 	u64 status;
97 	int ret;
98 	unsigned long irq_flags;
99 
100 	do {
101 		local_irq_save(irq_flags);
102 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
103 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
104 
105 		memset(input, 0, sizeof(*input));
106 		input->flags = flags;
107 		input->compatibility_version = HV_COMPATIBILITY_21_H2;
108 
109 		memcpy(&input->partition_creation_properties, &creation_properties,
110 		       sizeof(creation_properties));
111 
112 		memcpy(&input->isolation_properties, &isolation_properties,
113 		       sizeof(isolation_properties));
114 
115 		status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
116 					 input, output);
117 
118 		if (!hv_result_needs_memory(status)) {
119 			if (hv_result_success(status))
120 				*partition_id = output->partition_id;
121 			local_irq_restore(irq_flags);
122 			ret = hv_result_to_errno(status);
123 			break;
124 		}
125 		local_irq_restore(irq_flags);
126 		ret = hv_deposit_memory(hv_current_partition_id, status);
127 	} while (!ret);
128 
129 	return ret;
130 }
131 
hv_call_initialize_partition(u64 partition_id)132 int hv_call_initialize_partition(u64 partition_id)
133 {
134 	struct hv_input_initialize_partition input;
135 	u64 status;
136 	int ret;
137 
138 	input.partition_id = partition_id;
139 
140 	ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
141 				    HV_INIT_PARTITION_DEPOSIT_PAGES);
142 	if (ret)
143 		return ret;
144 
145 	do {
146 		status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
147 					       *(u64 *)&input);
148 
149 		if (!hv_result_needs_memory(status)) {
150 			ret = hv_result_to_errno(status);
151 			break;
152 		}
153 		ret = hv_deposit_memory(partition_id, status);
154 	} while (!ret);
155 
156 	return ret;
157 }
158 
hv_call_finalize_partition(u64 partition_id)159 int hv_call_finalize_partition(u64 partition_id)
160 {
161 	struct hv_input_finalize_partition input;
162 	u64 status;
163 
164 	input.partition_id = partition_id;
165 	status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
166 				       *(u64 *)&input);
167 
168 	return hv_result_to_errno(status);
169 }
170 
hv_call_delete_partition(u64 partition_id)171 int hv_call_delete_partition(u64 partition_id)
172 {
173 	struct hv_input_delete_partition input;
174 	u64 status;
175 
176 	input.partition_id = partition_id;
177 	status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
178 
179 	return hv_result_to_errno(status);
180 }
181 
182 /* Ask the hypervisor to map guest ram pages or the guest mmio space */
hv_do_map_gpa_hcall(u64 partition_id,u64 gfn,u64 page_struct_count,u32 flags,struct page ** pages,u64 mmio_spa)183 static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
184 			       u32 flags, struct page **pages, u64 mmio_spa)
185 {
186 	struct hv_input_map_gpa_pages *input_page;
187 	u64 status, *pfnlist;
188 	unsigned long irq_flags, large_shift = 0;
189 	int ret = 0, done = 0;
190 	u64 page_count = page_struct_count;
191 
192 	if (page_count == 0 || (pages && mmio_spa))
193 		return -EINVAL;
194 
195 	if (flags & HV_MAP_GPA_LARGE_PAGE) {
196 		if (mmio_spa)
197 			return -EINVAL;
198 
199 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
200 			return -EINVAL;
201 
202 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
203 		page_count >>= large_shift;
204 	}
205 
206 	while (done < page_count) {
207 		ulong i, completed, remain = page_count - done;
208 		int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
209 
210 		local_irq_save(irq_flags);
211 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
212 
213 		input_page->target_partition_id = partition_id;
214 		input_page->target_gpa_base = gfn + (done << large_shift);
215 		input_page->map_flags = flags;
216 		pfnlist = input_page->source_gpa_page_list;
217 
218 		for (i = 0; i < rep_count; i++)
219 			if (flags & HV_MAP_GPA_NO_ACCESS) {
220 				pfnlist[i] = 0;
221 			} else if (pages) {
222 				u64 index = (done + i) << large_shift;
223 
224 				if (index >= page_struct_count) {
225 					ret = -EINVAL;
226 					break;
227 				}
228 				pfnlist[i] = page_to_pfn(pages[index]);
229 			} else {
230 				pfnlist[i] = mmio_spa + done + i;
231 			}
232 		if (ret)
233 			break;
234 
235 		status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
236 					     input_page, NULL);
237 		local_irq_restore(irq_flags);
238 
239 		completed = hv_repcomp(status);
240 
241 		if (hv_result_needs_memory(status)) {
242 			ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
243 						    HV_MAP_GPA_DEPOSIT_PAGES);
244 			if (ret)
245 				break;
246 
247 		} else if (!hv_result_success(status)) {
248 			ret = hv_result_to_errno(status);
249 			break;
250 		}
251 
252 		done += completed;
253 	}
254 
255 	if (ret && done) {
256 		u32 unmap_flags = 0;
257 
258 		if (flags & HV_MAP_GPA_LARGE_PAGE)
259 			unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
260 		hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
261 	}
262 
263 	return ret;
264 }
265 
266 /* Ask the hypervisor to map guest ram pages */
hv_call_map_gpa_pages(u64 partition_id,u64 gpa_target,u64 page_count,u32 flags,struct page ** pages)267 int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
268 			  u32 flags, struct page **pages)
269 {
270 	return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
271 				   flags, pages, 0);
272 }
273 
274 /* Ask the hypervisor to map guest mmio space */
hv_call_map_mmio_pages(u64 partition_id,u64 gfn,u64 mmio_spa,u64 numpgs)275 int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
276 {
277 	int i;
278 	u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
279 		    HV_MAP_GPA_NOT_CACHED;
280 
281 	for (i = 0; i < numpgs; i++)
282 		if (page_is_ram(mmio_spa + i))
283 			return -EINVAL;
284 
285 	return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
286 				   mmio_spa);
287 }
288 
hv_call_unmap_gpa_pages(u64 partition_id,u64 gfn,u64 page_count_4k,u32 flags)289 int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
290 			    u32 flags)
291 {
292 	struct hv_input_unmap_gpa_pages *input_page;
293 	u64 status, page_count = page_count_4k;
294 	unsigned long irq_flags, large_shift = 0;
295 	int ret = 0, done = 0;
296 
297 	if (page_count == 0)
298 		return -EINVAL;
299 
300 	if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
301 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
302 			return -EINVAL;
303 
304 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
305 		page_count >>= large_shift;
306 	}
307 
308 	while (done < page_count) {
309 		ulong completed, remain = page_count - done;
310 		int rep_count = min(remain, HV_UMAP_GPA_PAGES);
311 
312 		local_irq_save(irq_flags);
313 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
314 
315 		input_page->target_partition_id = partition_id;
316 		input_page->target_gpa_base = gfn + (done << large_shift);
317 		input_page->unmap_flags = flags;
318 		status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
319 					     0, input_page, NULL);
320 		local_irq_restore(irq_flags);
321 
322 		completed = hv_repcomp(status);
323 		if (!hv_result_success(status)) {
324 			ret = hv_result_to_errno(status);
325 			break;
326 		}
327 
328 		done += completed;
329 	}
330 
331 	return ret;
332 }
333 
hv_call_get_gpa_access_states(u64 partition_id,u32 count,u64 gpa_base_pfn,union hv_gpa_page_access_state_flags state_flags,int * written_total,union hv_gpa_page_access_state * states)334 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
335 				  union hv_gpa_page_access_state_flags state_flags,
336 				  int *written_total,
337 				  union hv_gpa_page_access_state *states)
338 {
339 	struct hv_input_get_gpa_pages_access_state *input_page;
340 	union hv_gpa_page_access_state *output_page;
341 	int completed = 0;
342 	unsigned long remaining = count;
343 	int rep_count, i;
344 	u64 status = 0;
345 	unsigned long flags;
346 
347 	*written_total = 0;
348 	while (remaining) {
349 		local_irq_save(flags);
350 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
351 		output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
352 
353 		input_page->partition_id = partition_id;
354 		input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
355 		input_page->flags = state_flags;
356 		rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
357 
358 		status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
359 					     0, input_page, output_page);
360 		if (!hv_result_success(status)) {
361 			local_irq_restore(flags);
362 			break;
363 		}
364 		completed = hv_repcomp(status);
365 		for (i = 0; i < completed; ++i)
366 			states[i].as_uint8 = output_page[i].as_uint8;
367 
368 		local_irq_restore(flags);
369 		states += completed;
370 		*written_total += completed;
371 		remaining -= completed;
372 	}
373 
374 	return hv_result_to_errno(status);
375 }
376 
hv_call_assert_virtual_interrupt(u64 partition_id,u32 vector,u64 dest_addr,union hv_interrupt_control control)377 int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
378 				     u64 dest_addr,
379 				     union hv_interrupt_control control)
380 {
381 	struct hv_input_assert_virtual_interrupt *input;
382 	unsigned long flags;
383 	u64 status;
384 
385 	local_irq_save(flags);
386 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
387 	memset(input, 0, sizeof(*input));
388 	input->partition_id = partition_id;
389 	input->vector = vector;
390 	/*
391 	 * NOTE: dest_addr only needs to be provided while asserting an
392 	 * interrupt on x86 platform
393 	 */
394 #if IS_ENABLED(CONFIG_X86)
395 	input->dest_addr = dest_addr;
396 #endif
397 	input->control = control;
398 	status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
399 	local_irq_restore(flags);
400 
401 	return hv_result_to_errno(status);
402 }
403 
hv_call_delete_vp(u64 partition_id,u32 vp_index)404 int hv_call_delete_vp(u64 partition_id, u32 vp_index)
405 {
406 	union hv_input_delete_vp input = {};
407 	u64 status;
408 
409 	input.partition_id = partition_id;
410 	input.vp_index = vp_index;
411 
412 	status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
413 					input.as_uint64[0], input.as_uint64[1]);
414 
415 	return hv_result_to_errno(status);
416 }
417 EXPORT_SYMBOL_GPL(hv_call_delete_vp);
418 
hv_call_get_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,union hv_output_get_vp_state * ret_output)419 int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
420 			 struct hv_vp_state_data state_data,
421 			 /* Choose between pages and ret_output */
422 			 u64 page_count, struct page **pages,
423 			 union hv_output_get_vp_state *ret_output)
424 {
425 	struct hv_input_get_vp_state *input;
426 	union hv_output_get_vp_state *output;
427 	u64 status;
428 	int i;
429 	u64 control;
430 	unsigned long flags;
431 	int ret = 0;
432 
433 	if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
434 		return -EINVAL;
435 
436 	if (!page_count && !ret_output)
437 		return -EINVAL;
438 
439 	do {
440 		local_irq_save(flags);
441 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
442 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
443 		memset(input, 0, sizeof(*input));
444 		memset(output, 0, sizeof(*output));
445 
446 		input->partition_id = partition_id;
447 		input->vp_index = vp_index;
448 		input->state_data = state_data;
449 		for (i = 0; i < page_count; i++)
450 			input->output_data_pfns[i] = page_to_pfn(pages[i]);
451 
452 		control = (HVCALL_GET_VP_STATE) |
453 			  (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
454 
455 		status = hv_do_hypercall(control, input, output);
456 
457 		if (!hv_result_needs_memory(status)) {
458 			if (hv_result_success(status) && ret_output)
459 				memcpy(ret_output, output, sizeof(*output));
460 
461 			local_irq_restore(flags);
462 			ret = hv_result_to_errno(status);
463 			break;
464 		}
465 		local_irq_restore(flags);
466 
467 		ret = hv_deposit_memory(partition_id, status);
468 	} while (!ret);
469 
470 	return ret;
471 }
472 
hv_call_set_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,u32 num_bytes,u8 * bytes)473 int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
474 			 /* Choose between pages and bytes */
475 			 struct hv_vp_state_data state_data, u64 page_count,
476 			 struct page **pages, u32 num_bytes, u8 *bytes)
477 {
478 	struct hv_input_set_vp_state *input;
479 	u64 status;
480 	int i;
481 	u64 control;
482 	unsigned long flags;
483 	int ret = 0;
484 	u16 varhead_sz;
485 
486 	if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
487 		return -EINVAL;
488 	if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
489 		return -EINVAL;
490 
491 	if (num_bytes)
492 		/* round up to 8 and divide by 8 */
493 		varhead_sz = (num_bytes + 7) >> 3;
494 	else if (page_count)
495 		varhead_sz = page_count;
496 	else
497 		return -EINVAL;
498 
499 	do {
500 		local_irq_save(flags);
501 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
502 		memset(input, 0, sizeof(*input));
503 
504 		input->partition_id = partition_id;
505 		input->vp_index = vp_index;
506 		input->state_data = state_data;
507 		if (num_bytes) {
508 			memcpy((u8 *)input->data, bytes, num_bytes);
509 		} else {
510 			for (i = 0; i < page_count; i++)
511 				input->data[i].pfns = page_to_pfn(pages[i]);
512 		}
513 
514 		control = (HVCALL_SET_VP_STATE) |
515 			  (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
516 
517 		status = hv_do_hypercall(control, input, NULL);
518 
519 		if (!hv_result_needs_memory(status)) {
520 			local_irq_restore(flags);
521 			ret = hv_result_to_errno(status);
522 			break;
523 		}
524 		local_irq_restore(flags);
525 
526 		ret = hv_deposit_memory(partition_id, status);
527 	} while (!ret);
528 
529 	return ret;
530 }
531 
hv_call_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)532 static int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
533 				     union hv_input_vtl input_vtl,
534 				     struct page **state_page)
535 {
536 	struct hv_input_map_vp_state_page *input;
537 	struct hv_output_map_vp_state_page *output;
538 	u64 status;
539 	int ret;
540 	unsigned long flags;
541 
542 	do {
543 		local_irq_save(flags);
544 
545 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
546 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
547 
548 		memset(input, 0, sizeof(*input));
549 		input->partition_id = partition_id;
550 		input->vp_index = vp_index;
551 		input->type = type;
552 		input->input_vtl = input_vtl;
553 
554 		if (*state_page) {
555 			input->flags.map_location_provided = 1;
556 			input->requested_map_location =
557 				page_to_pfn(*state_page);
558 		}
559 
560 		status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input,
561 					 output);
562 
563 		if (!hv_result_needs_memory(status)) {
564 			if (hv_result_success(status))
565 				*state_page = pfn_to_page(output->map_location);
566 			local_irq_restore(flags);
567 			ret = hv_result_to_errno(status);
568 			break;
569 		}
570 
571 		local_irq_restore(flags);
572 
573 		ret = hv_deposit_memory(partition_id, status);
574 	} while (!ret);
575 
576 	return ret;
577 }
578 
mshv_use_overlay_gpfn(void)579 static bool mshv_use_overlay_gpfn(void)
580 {
581 	return hv_l1vh_partition() &&
582 	       mshv_root.vmm_caps.vmm_can_provide_overlay_gpfn;
583 }
584 
hv_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)585 int hv_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
586 			 union hv_input_vtl input_vtl,
587 			 struct page **state_page)
588 {
589 	int ret = 0;
590 	struct page *allocated_page = NULL;
591 
592 	if (mshv_use_overlay_gpfn()) {
593 		allocated_page = alloc_page(GFP_KERNEL);
594 		if (!allocated_page)
595 			return -ENOMEM;
596 		*state_page = allocated_page;
597 	} else {
598 		*state_page = NULL;
599 	}
600 
601 	ret = hv_call_map_vp_state_page(partition_id, vp_index, type, input_vtl,
602 					state_page);
603 
604 	if (ret && allocated_page) {
605 		__free_page(allocated_page);
606 		*state_page = NULL;
607 	}
608 
609 	return ret;
610 }
611 
hv_call_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl)612 static int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
613 				       union hv_input_vtl input_vtl)
614 {
615 	unsigned long flags;
616 	u64 status;
617 	struct hv_input_unmap_vp_state_page *input;
618 
619 	local_irq_save(flags);
620 
621 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
622 
623 	memset(input, 0, sizeof(*input));
624 
625 	input->partition_id = partition_id;
626 	input->vp_index = vp_index;
627 	input->type = type;
628 	input->input_vtl = input_vtl;
629 
630 	status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
631 
632 	local_irq_restore(flags);
633 
634 	return hv_result_to_errno(status);
635 }
636 
hv_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,struct page * state_page,union hv_input_vtl input_vtl)637 int hv_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
638 			   struct page *state_page, union hv_input_vtl input_vtl)
639 {
640 	int ret = hv_call_unmap_vp_state_page(partition_id, vp_index, type, input_vtl);
641 
642 	if (mshv_use_overlay_gpfn() && state_page)
643 		__free_page(state_page);
644 
645 	return ret;
646 }
647 
hv_call_get_partition_property_ex(u64 partition_id,u64 property_code,u64 arg,void * property_value,size_t property_value_sz)648 int hv_call_get_partition_property_ex(u64 partition_id, u64 property_code,
649 				      u64 arg, void *property_value,
650 				      size_t property_value_sz)
651 {
652 	u64 status;
653 	unsigned long flags;
654 	struct hv_input_get_partition_property_ex *input;
655 	struct hv_output_get_partition_property_ex *output;
656 
657 	local_irq_save(flags);
658 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
659 	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
660 
661 	memset(input, 0, sizeof(*input));
662 	input->partition_id = partition_id;
663 	input->property_code = property_code;
664 	input->arg = arg;
665 	status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY_EX, input, output);
666 
667 	if (!hv_result_success(status)) {
668 		local_irq_restore(flags);
669 		hv_status_debug(status, "\n");
670 		return hv_result_to_errno(status);
671 	}
672 	memcpy(property_value, &output->property_value, property_value_sz);
673 
674 	local_irq_restore(flags);
675 
676 	return 0;
677 }
678 
679 int
hv_call_clear_virtual_interrupt(u64 partition_id)680 hv_call_clear_virtual_interrupt(u64 partition_id)
681 {
682 	int status;
683 
684 	status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
685 				       partition_id);
686 
687 	return hv_result_to_errno(status);
688 }
689 
690 int
hv_call_create_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,struct hv_port_info * port_info,u8 port_vtl,u8 min_connection_vtl,int node)691 hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
692 		    u64 connection_partition_id,
693 		    struct hv_port_info *port_info,
694 		    u8 port_vtl, u8 min_connection_vtl, int node)
695 {
696 	struct hv_input_create_port *input;
697 	unsigned long flags;
698 	int ret = 0;
699 	int status;
700 
701 	do {
702 		local_irq_save(flags);
703 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
704 		memset(input, 0, sizeof(*input));
705 
706 		input->port_partition_id = port_partition_id;
707 		input->port_id = port_id;
708 		input->connection_partition_id = connection_partition_id;
709 		input->port_info = *port_info;
710 		input->port_vtl = port_vtl;
711 		input->min_connection_vtl = min_connection_vtl;
712 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
713 		status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
714 		local_irq_restore(flags);
715 		if (hv_result_success(status))
716 			break;
717 
718 		if (!hv_result_needs_memory(status)) {
719 			ret = hv_result_to_errno(status);
720 			break;
721 		}
722 		ret = hv_deposit_memory(port_partition_id, status);
723 	} while (!ret);
724 
725 	return ret;
726 }
727 
728 int
hv_call_delete_port(u64 port_partition_id,union hv_port_id port_id)729 hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
730 {
731 	union hv_input_delete_port input = { 0 };
732 	int status;
733 
734 	input.port_partition_id = port_partition_id;
735 	input.port_id = port_id;
736 	status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
737 					input.as_uint64[0],
738 					input.as_uint64[1]);
739 
740 	return hv_result_to_errno(status);
741 }
742 
743 int
hv_call_connect_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,union hv_connection_id connection_id,struct hv_connection_info * connection_info,u8 connection_vtl,int node)744 hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
745 		     u64 connection_partition_id,
746 		     union hv_connection_id connection_id,
747 		     struct hv_connection_info *connection_info,
748 		     u8 connection_vtl, int node)
749 {
750 	struct hv_input_connect_port *input;
751 	unsigned long flags;
752 	int ret = 0, status;
753 
754 	do {
755 		local_irq_save(flags);
756 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
757 		memset(input, 0, sizeof(*input));
758 		input->port_partition_id = port_partition_id;
759 		input->port_id = port_id;
760 		input->connection_partition_id = connection_partition_id;
761 		input->connection_id = connection_id;
762 		input->connection_info = *connection_info;
763 		input->connection_vtl = connection_vtl;
764 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
765 		status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
766 
767 		local_irq_restore(flags);
768 		if (hv_result_success(status))
769 			break;
770 
771 		if (!hv_result_needs_memory(status)) {
772 			ret = hv_result_to_errno(status);
773 			break;
774 		}
775 		ret = hv_deposit_memory(connection_partition_id, status);
776 	} while (!ret);
777 
778 	return ret;
779 }
780 
781 int
hv_call_disconnect_port(u64 connection_partition_id,union hv_connection_id connection_id)782 hv_call_disconnect_port(u64 connection_partition_id,
783 			union hv_connection_id connection_id)
784 {
785 	union hv_input_disconnect_port input = { 0 };
786 	int status;
787 
788 	input.connection_partition_id = connection_partition_id;
789 	input.connection_id = connection_id;
790 	input.is_doorbell = 1;
791 	status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
792 					input.as_uint64[0],
793 					input.as_uint64[1]);
794 
795 	return hv_result_to_errno(status);
796 }
797 
798 int
hv_call_notify_port_ring_empty(u32 sint_index)799 hv_call_notify_port_ring_empty(u32 sint_index)
800 {
801 	union hv_input_notify_port_ring_empty input = { 0 };
802 	int status;
803 
804 	input.sint_index = sint_index;
805 	status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
806 				       input.as_uint64);
807 
808 	return hv_result_to_errno(status);
809 }
810 
811 /*
812  * Equivalent of hv_call_map_stats_page() for cases when the caller provides
813  * the map location.
814  *
815  * NOTE: This is a newer hypercall that always supports SELF and PARENT stats
816  * areas, unlike hv_call_map_stats_page().
817  */
hv_call_map_stats_page2(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,u64 map_location)818 static int hv_call_map_stats_page2(enum hv_stats_object_type type,
819 				   const union hv_stats_object_identity *identity,
820 				   u64 map_location)
821 {
822 	unsigned long flags;
823 	struct hv_input_map_stats_page2 *input;
824 	u64 status;
825 	int ret;
826 
827 	if (!map_location || !mshv_use_overlay_gpfn())
828 		return -EINVAL;
829 
830 	do {
831 		local_irq_save(flags);
832 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
833 
834 		memset(input, 0, sizeof(*input));
835 		input->type = type;
836 		input->identity = *identity;
837 		input->map_location = map_location;
838 
839 		status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE2, input, NULL);
840 
841 		local_irq_restore(flags);
842 
843 		ret = hv_result_to_errno(status);
844 
845 		if (!ret)
846 			break;
847 
848 		if (!hv_result_needs_memory(status)) {
849 			hv_status_debug(status, "\n");
850 			break;
851 		}
852 
853 		ret = hv_deposit_memory(hv_current_partition_id, status);
854 	} while (!ret);
855 
856 	return ret;
857 }
858 
859 static int
hv_stats_get_area_type(enum hv_stats_object_type type,const union hv_stats_object_identity * identity)860 hv_stats_get_area_type(enum hv_stats_object_type type,
861 		       const union hv_stats_object_identity *identity)
862 {
863 	switch (type) {
864 	case HV_STATS_OBJECT_HYPERVISOR:
865 		return identity->hv.stats_area_type;
866 	case HV_STATS_OBJECT_LOGICAL_PROCESSOR:
867 		return identity->lp.stats_area_type;
868 	case HV_STATS_OBJECT_PARTITION:
869 		return identity->partition.stats_area_type;
870 	case HV_STATS_OBJECT_VP:
871 		return identity->vp.stats_area_type;
872 	}
873 
874 	return -EINVAL;
875 }
876 
877 /*
878  * Map a stats page, where the page location is provided by the hypervisor.
879  *
880  * NOTE: The concept of separate SELF and PARENT stats areas does not exist on
881  * older hypervisor versions. All the available stats information can be found
882  * on the SELF page. When attempting to map the PARENT area on a hypervisor
883  * that doesn't support it, return "success" but with a NULL address. The
884  * caller should check for this case and instead fallback to the SELF area
885  * alone.
886  */
887 static int
hv_call_map_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,struct hv_stats_page ** addr)888 hv_call_map_stats_page(enum hv_stats_object_type type,
889 		       const union hv_stats_object_identity *identity,
890 		       struct hv_stats_page **addr)
891 {
892 	unsigned long flags;
893 	struct hv_input_map_stats_page *input;
894 	struct hv_output_map_stats_page *output;
895 	u64 status, pfn;
896 	int ret = 0;
897 
898 	do {
899 		local_irq_save(flags);
900 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
901 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
902 
903 		memset(input, 0, sizeof(*input));
904 		input->type = type;
905 		input->identity = *identity;
906 
907 		status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
908 		pfn = output->map_location;
909 
910 		local_irq_restore(flags);
911 
912 		if (!hv_result_needs_memory(status)) {
913 			if (hv_result_success(status))
914 				break;
915 
916 			if (hv_stats_get_area_type(type, identity) == HV_STATS_AREA_PARENT &&
917 			    hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
918 				*addr = NULL;
919 				return 0;
920 			}
921 
922 			hv_status_debug(status, "\n");
923 			return hv_result_to_errno(status);
924 		}
925 
926 		ret = hv_deposit_memory(hv_current_partition_id, status);
927 		if (ret)
928 			return ret;
929 	} while (!ret);
930 
931 	*addr = page_address(pfn_to_page(pfn));
932 
933 	return ret;
934 }
935 
hv_map_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,struct hv_stats_page ** addr)936 int hv_map_stats_page(enum hv_stats_object_type type,
937 		      const union hv_stats_object_identity *identity,
938 		      struct hv_stats_page **addr)
939 {
940 	int ret;
941 	struct page *allocated_page = NULL;
942 
943 	if (!addr)
944 		return -EINVAL;
945 
946 	if (mshv_use_overlay_gpfn()) {
947 		allocated_page = alloc_page(GFP_KERNEL);
948 		if (!allocated_page)
949 			return -ENOMEM;
950 
951 		ret = hv_call_map_stats_page2(type, identity,
952 					      page_to_pfn(allocated_page));
953 		*addr = page_address(allocated_page);
954 	} else {
955 		ret = hv_call_map_stats_page(type, identity, addr);
956 	}
957 
958 	if (ret && allocated_page) {
959 		__free_page(allocated_page);
960 		*addr = NULL;
961 	}
962 
963 	return ret;
964 }
965 
hv_call_unmap_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity)966 static int hv_call_unmap_stats_page(enum hv_stats_object_type type,
967 				    const union hv_stats_object_identity *identity)
968 {
969 	unsigned long flags;
970 	struct hv_input_unmap_stats_page *input;
971 	u64 status;
972 
973 	local_irq_save(flags);
974 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
975 
976 	memset(input, 0, sizeof(*input));
977 	input->type = type;
978 	input->identity = *identity;
979 
980 	status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
981 	local_irq_restore(flags);
982 
983 	return hv_result_to_errno(status);
984 }
985 
hv_unmap_stats_page(enum hv_stats_object_type type,struct hv_stats_page * page_addr,const union hv_stats_object_identity * identity)986 int hv_unmap_stats_page(enum hv_stats_object_type type,
987 			struct hv_stats_page *page_addr,
988 			const union hv_stats_object_identity *identity)
989 {
990 	int ret;
991 
992 	ret = hv_call_unmap_stats_page(type, identity);
993 
994 	if (mshv_use_overlay_gpfn() && page_addr)
995 		__free_page(virt_to_page(page_addr));
996 
997 	return ret;
998 }
999 
hv_call_modify_spa_host_access(u64 partition_id,struct page ** pages,u64 page_struct_count,u32 host_access,u32 flags,u8 acquire)1000 int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
1001 				   u64 page_struct_count, u32 host_access,
1002 				   u32 flags, u8 acquire)
1003 {
1004 	struct hv_input_modify_sparse_spa_page_host_access *input_page;
1005 	u64 status;
1006 	int done = 0;
1007 	unsigned long irq_flags, large_shift = 0;
1008 	u64 page_count = page_struct_count;
1009 	u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
1010 			     HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
1011 
1012 	if (page_count == 0)
1013 		return -EINVAL;
1014 
1015 	if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
1016 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
1017 			return -EINVAL;
1018 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
1019 		page_count >>= large_shift;
1020 	}
1021 
1022 	while (done < page_count) {
1023 		ulong i, completed, remain = page_count - done;
1024 		int rep_count = min(remain,
1025 				    HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
1026 
1027 		local_irq_save(irq_flags);
1028 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
1029 
1030 		memset(input_page, 0, sizeof(*input_page));
1031 		/* Only set the partition id if you are making the pages
1032 		 * exclusive
1033 		 */
1034 		if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
1035 			input_page->partition_id = partition_id;
1036 		input_page->flags = flags;
1037 		input_page->host_access = host_access;
1038 
1039 		for (i = 0; i < rep_count; i++) {
1040 			u64 index = (done + i) << large_shift;
1041 
1042 			if (index >= page_struct_count)
1043 				return -EINVAL;
1044 
1045 			input_page->spa_page_list[i] =
1046 						page_to_pfn(pages[index]);
1047 		}
1048 
1049 		status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
1050 					     NULL);
1051 		local_irq_restore(irq_flags);
1052 
1053 		completed = hv_repcomp(status);
1054 
1055 		if (!hv_result_success(status))
1056 			return hv_result_to_errno(status);
1057 
1058 		done += completed;
1059 	}
1060 
1061 	return 0;
1062 }
1063