xref: /linux/drivers/hv/mshv_root_hv_call.c (revision feb06d2690bb826fd33798a99ce5cff8d07b38f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Microsoft Corporation.
4  *
5  * Hypercall helper functions used by the mshv_root module.
6  *
7  * Authors: Microsoft Linux virtualization team
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/export.h>
13 #include <asm/mshyperv.h>
14 
15 #include "mshv_root.h"
16 
17 /* Determined empirically */
18 #define HV_INIT_PARTITION_DEPOSIT_PAGES 208
19 #define HV_MAP_GPA_DEPOSIT_PAGES	256
20 #define HV_UMAP_GPA_PAGES		512
21 
22 #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
23 
24 #define HV_WITHDRAW_BATCH_SIZE	(HV_HYP_PAGE_SIZE / sizeof(u64))
25 #define HV_MAP_GPA_BATCH_SIZE	\
26 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
27 		/ sizeof(u64))
28 #define HV_GET_VP_STATE_BATCH_SIZE	\
29 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
30 		/ sizeof(u64))
31 #define HV_SET_VP_STATE_BATCH_SIZE	\
32 	((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
33 		/ sizeof(u64))
34 #define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE	\
35 	((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
36 		/ sizeof(union hv_gpa_page_access_state))
37 #define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT		       \
38 	((HV_HYP_PAGE_SIZE -						       \
39 	  sizeof(struct hv_input_modify_sparse_spa_page_host_access)) /        \
40 	 sizeof(u64))
41 
hv_call_withdraw_memory(u64 count,int node,u64 partition_id)42 int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
43 {
44 	struct hv_input_withdraw_memory *input_page;
45 	struct hv_output_withdraw_memory *output_page;
46 	struct page *page;
47 	u16 completed;
48 	unsigned long remaining = count;
49 	u64 status;
50 	int i;
51 	unsigned long flags;
52 
53 	page = alloc_page(GFP_KERNEL);
54 	if (!page)
55 		return -ENOMEM;
56 	output_page = page_address(page);
57 
58 	while (remaining) {
59 		local_irq_save(flags);
60 
61 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
62 
63 		memset(input_page, 0, sizeof(*input_page));
64 		input_page->partition_id = partition_id;
65 		status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
66 					     min(remaining, HV_WITHDRAW_BATCH_SIZE),
67 					     0, input_page, output_page);
68 
69 		local_irq_restore(flags);
70 
71 		completed = hv_repcomp(status);
72 
73 		for (i = 0; i < completed; i++)
74 			__free_page(pfn_to_page(output_page->gpa_page_list[i]));
75 
76 		if (!hv_result_success(status)) {
77 			if (hv_result(status) == HV_STATUS_NO_RESOURCES)
78 				status = HV_STATUS_SUCCESS;
79 			break;
80 		}
81 
82 		remaining -= completed;
83 	}
84 	free_page((unsigned long)output_page);
85 
86 	return hv_result_to_errno(status);
87 }
88 
hv_call_create_partition(u64 flags,struct hv_partition_creation_properties creation_properties,union hv_partition_isolation_properties isolation_properties,u64 * partition_id)89 int hv_call_create_partition(u64 flags,
90 			     struct hv_partition_creation_properties creation_properties,
91 			     union hv_partition_isolation_properties isolation_properties,
92 			     u64 *partition_id)
93 {
94 	struct hv_input_create_partition *input;
95 	struct hv_output_create_partition *output;
96 	u64 status;
97 	int ret;
98 	unsigned long irq_flags;
99 
100 	do {
101 		local_irq_save(irq_flags);
102 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
103 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
104 
105 		memset(input, 0, sizeof(*input));
106 		input->flags = flags;
107 		input->compatibility_version = HV_COMPATIBILITY_21_H2;
108 
109 		memcpy(&input->partition_creation_properties, &creation_properties,
110 		       sizeof(creation_properties));
111 
112 		memcpy(&input->isolation_properties, &isolation_properties,
113 		       sizeof(isolation_properties));
114 
115 		status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
116 					 input, output);
117 
118 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
119 			if (hv_result_success(status))
120 				*partition_id = output->partition_id;
121 			local_irq_restore(irq_flags);
122 			ret = hv_result_to_errno(status);
123 			break;
124 		}
125 		local_irq_restore(irq_flags);
126 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
127 					    hv_current_partition_id, 1);
128 	} while (!ret);
129 
130 	return ret;
131 }
132 
hv_call_initialize_partition(u64 partition_id)133 int hv_call_initialize_partition(u64 partition_id)
134 {
135 	struct hv_input_initialize_partition input;
136 	u64 status;
137 	int ret;
138 
139 	input.partition_id = partition_id;
140 
141 	ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
142 				    HV_INIT_PARTITION_DEPOSIT_PAGES);
143 	if (ret)
144 		return ret;
145 
146 	do {
147 		status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
148 					       *(u64 *)&input);
149 
150 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
151 			ret = hv_result_to_errno(status);
152 			break;
153 		}
154 		ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
155 	} while (!ret);
156 
157 	return ret;
158 }
159 
hv_call_finalize_partition(u64 partition_id)160 int hv_call_finalize_partition(u64 partition_id)
161 {
162 	struct hv_input_finalize_partition input;
163 	u64 status;
164 
165 	input.partition_id = partition_id;
166 	status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
167 				       *(u64 *)&input);
168 
169 	return hv_result_to_errno(status);
170 }
171 
hv_call_delete_partition(u64 partition_id)172 int hv_call_delete_partition(u64 partition_id)
173 {
174 	struct hv_input_delete_partition input;
175 	u64 status;
176 
177 	input.partition_id = partition_id;
178 	status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
179 
180 	return hv_result_to_errno(status);
181 }
182 
183 /* Ask the hypervisor to map guest ram pages or the guest mmio space */
hv_do_map_gpa_hcall(u64 partition_id,u64 gfn,u64 page_struct_count,u32 flags,struct page ** pages,u64 mmio_spa)184 static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
185 			       u32 flags, struct page **pages, u64 mmio_spa)
186 {
187 	struct hv_input_map_gpa_pages *input_page;
188 	u64 status, *pfnlist;
189 	unsigned long irq_flags, large_shift = 0;
190 	int ret = 0, done = 0;
191 	u64 page_count = page_struct_count;
192 
193 	if (page_count == 0 || (pages && mmio_spa))
194 		return -EINVAL;
195 
196 	if (flags & HV_MAP_GPA_LARGE_PAGE) {
197 		if (mmio_spa)
198 			return -EINVAL;
199 
200 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
201 			return -EINVAL;
202 
203 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
204 		page_count >>= large_shift;
205 	}
206 
207 	while (done < page_count) {
208 		ulong i, completed, remain = page_count - done;
209 		int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
210 
211 		local_irq_save(irq_flags);
212 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
213 
214 		input_page->target_partition_id = partition_id;
215 		input_page->target_gpa_base = gfn + (done << large_shift);
216 		input_page->map_flags = flags;
217 		pfnlist = input_page->source_gpa_page_list;
218 
219 		for (i = 0; i < rep_count; i++)
220 			if (flags & HV_MAP_GPA_NO_ACCESS) {
221 				pfnlist[i] = 0;
222 			} else if (pages) {
223 				u64 index = (done + i) << large_shift;
224 
225 				if (index >= page_struct_count) {
226 					ret = -EINVAL;
227 					break;
228 				}
229 				pfnlist[i] = page_to_pfn(pages[index]);
230 			} else {
231 				pfnlist[i] = mmio_spa + done + i;
232 			}
233 		if (ret)
234 			break;
235 
236 		status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
237 					     input_page, NULL);
238 		local_irq_restore(irq_flags);
239 
240 		completed = hv_repcomp(status);
241 
242 		if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
243 			ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
244 						    HV_MAP_GPA_DEPOSIT_PAGES);
245 			if (ret)
246 				break;
247 
248 		} else if (!hv_result_success(status)) {
249 			ret = hv_result_to_errno(status);
250 			break;
251 		}
252 
253 		done += completed;
254 	}
255 
256 	if (ret && done) {
257 		u32 unmap_flags = 0;
258 
259 		if (flags & HV_MAP_GPA_LARGE_PAGE)
260 			unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
261 		hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
262 	}
263 
264 	return ret;
265 }
266 
267 /* Ask the hypervisor to map guest ram pages */
hv_call_map_gpa_pages(u64 partition_id,u64 gpa_target,u64 page_count,u32 flags,struct page ** pages)268 int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
269 			  u32 flags, struct page **pages)
270 {
271 	return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
272 				   flags, pages, 0);
273 }
274 
275 /* Ask the hypervisor to map guest mmio space */
hv_call_map_mmio_pages(u64 partition_id,u64 gfn,u64 mmio_spa,u64 numpgs)276 int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
277 {
278 	int i;
279 	u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
280 		    HV_MAP_GPA_NOT_CACHED;
281 
282 	for (i = 0; i < numpgs; i++)
283 		if (page_is_ram(mmio_spa + i))
284 			return -EINVAL;
285 
286 	return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
287 				   mmio_spa);
288 }
289 
hv_call_unmap_gpa_pages(u64 partition_id,u64 gfn,u64 page_count_4k,u32 flags)290 int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
291 			    u32 flags)
292 {
293 	struct hv_input_unmap_gpa_pages *input_page;
294 	u64 status, page_count = page_count_4k;
295 	unsigned long irq_flags, large_shift = 0;
296 	int ret = 0, done = 0;
297 
298 	if (page_count == 0)
299 		return -EINVAL;
300 
301 	if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
302 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
303 			return -EINVAL;
304 
305 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
306 		page_count >>= large_shift;
307 	}
308 
309 	while (done < page_count) {
310 		ulong completed, remain = page_count - done;
311 		int rep_count = min(remain, HV_UMAP_GPA_PAGES);
312 
313 		local_irq_save(irq_flags);
314 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
315 
316 		input_page->target_partition_id = partition_id;
317 		input_page->target_gpa_base = gfn + (done << large_shift);
318 		input_page->unmap_flags = flags;
319 		status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
320 					     0, input_page, NULL);
321 		local_irq_restore(irq_flags);
322 
323 		completed = hv_repcomp(status);
324 		if (!hv_result_success(status)) {
325 			ret = hv_result_to_errno(status);
326 			break;
327 		}
328 
329 		done += completed;
330 	}
331 
332 	return ret;
333 }
334 
hv_call_get_gpa_access_states(u64 partition_id,u32 count,u64 gpa_base_pfn,union hv_gpa_page_access_state_flags state_flags,int * written_total,union hv_gpa_page_access_state * states)335 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
336 				  union hv_gpa_page_access_state_flags state_flags,
337 				  int *written_total,
338 				  union hv_gpa_page_access_state *states)
339 {
340 	struct hv_input_get_gpa_pages_access_state *input_page;
341 	union hv_gpa_page_access_state *output_page;
342 	int completed = 0;
343 	unsigned long remaining = count;
344 	int rep_count, i;
345 	u64 status = 0;
346 	unsigned long flags;
347 
348 	*written_total = 0;
349 	while (remaining) {
350 		local_irq_save(flags);
351 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
352 		output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
353 
354 		input_page->partition_id = partition_id;
355 		input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
356 		input_page->flags = state_flags;
357 		rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
358 
359 		status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
360 					     0, input_page, output_page);
361 		if (!hv_result_success(status)) {
362 			local_irq_restore(flags);
363 			break;
364 		}
365 		completed = hv_repcomp(status);
366 		for (i = 0; i < completed; ++i)
367 			states[i].as_uint8 = output_page[i].as_uint8;
368 
369 		local_irq_restore(flags);
370 		states += completed;
371 		*written_total += completed;
372 		remaining -= completed;
373 	}
374 
375 	return hv_result_to_errno(status);
376 }
377 
hv_call_assert_virtual_interrupt(u64 partition_id,u32 vector,u64 dest_addr,union hv_interrupt_control control)378 int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
379 				     u64 dest_addr,
380 				     union hv_interrupt_control control)
381 {
382 	struct hv_input_assert_virtual_interrupt *input;
383 	unsigned long flags;
384 	u64 status;
385 
386 	local_irq_save(flags);
387 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
388 	memset(input, 0, sizeof(*input));
389 	input->partition_id = partition_id;
390 	input->vector = vector;
391 	/*
392 	 * NOTE: dest_addr only needs to be provided while asserting an
393 	 * interrupt on x86 platform
394 	 */
395 #if IS_ENABLED(CONFIG_X86)
396 	input->dest_addr = dest_addr;
397 #endif
398 	input->control = control;
399 	status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
400 	local_irq_restore(flags);
401 
402 	return hv_result_to_errno(status);
403 }
404 
hv_call_delete_vp(u64 partition_id,u32 vp_index)405 int hv_call_delete_vp(u64 partition_id, u32 vp_index)
406 {
407 	union hv_input_delete_vp input = {};
408 	u64 status;
409 
410 	input.partition_id = partition_id;
411 	input.vp_index = vp_index;
412 
413 	status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
414 					input.as_uint64[0], input.as_uint64[1]);
415 
416 	return hv_result_to_errno(status);
417 }
418 EXPORT_SYMBOL_GPL(hv_call_delete_vp);
419 
hv_call_get_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,union hv_output_get_vp_state * ret_output)420 int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
421 			 struct hv_vp_state_data state_data,
422 			 /* Choose between pages and ret_output */
423 			 u64 page_count, struct page **pages,
424 			 union hv_output_get_vp_state *ret_output)
425 {
426 	struct hv_input_get_vp_state *input;
427 	union hv_output_get_vp_state *output;
428 	u64 status;
429 	int i;
430 	u64 control;
431 	unsigned long flags;
432 	int ret = 0;
433 
434 	if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
435 		return -EINVAL;
436 
437 	if (!page_count && !ret_output)
438 		return -EINVAL;
439 
440 	do {
441 		local_irq_save(flags);
442 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
443 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
444 		memset(input, 0, sizeof(*input));
445 		memset(output, 0, sizeof(*output));
446 
447 		input->partition_id = partition_id;
448 		input->vp_index = vp_index;
449 		input->state_data = state_data;
450 		for (i = 0; i < page_count; i++)
451 			input->output_data_pfns[i] = page_to_pfn(pages[i]);
452 
453 		control = (HVCALL_GET_VP_STATE) |
454 			  (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
455 
456 		status = hv_do_hypercall(control, input, output);
457 
458 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
459 			if (hv_result_success(status) && ret_output)
460 				memcpy(ret_output, output, sizeof(*output));
461 
462 			local_irq_restore(flags);
463 			ret = hv_result_to_errno(status);
464 			break;
465 		}
466 		local_irq_restore(flags);
467 
468 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
469 					    partition_id, 1);
470 	} while (!ret);
471 
472 	return ret;
473 }
474 
hv_call_set_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,u32 num_bytes,u8 * bytes)475 int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
476 			 /* Choose between pages and bytes */
477 			 struct hv_vp_state_data state_data, u64 page_count,
478 			 struct page **pages, u32 num_bytes, u8 *bytes)
479 {
480 	struct hv_input_set_vp_state *input;
481 	u64 status;
482 	int i;
483 	u64 control;
484 	unsigned long flags;
485 	int ret = 0;
486 	u16 varhead_sz;
487 
488 	if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
489 		return -EINVAL;
490 	if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
491 		return -EINVAL;
492 
493 	if (num_bytes)
494 		/* round up to 8 and divide by 8 */
495 		varhead_sz = (num_bytes + 7) >> 3;
496 	else if (page_count)
497 		varhead_sz = page_count;
498 	else
499 		return -EINVAL;
500 
501 	do {
502 		local_irq_save(flags);
503 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
504 		memset(input, 0, sizeof(*input));
505 
506 		input->partition_id = partition_id;
507 		input->vp_index = vp_index;
508 		input->state_data = state_data;
509 		if (num_bytes) {
510 			memcpy((u8 *)input->data, bytes, num_bytes);
511 		} else {
512 			for (i = 0; i < page_count; i++)
513 				input->data[i].pfns = page_to_pfn(pages[i]);
514 		}
515 
516 		control = (HVCALL_SET_VP_STATE) |
517 			  (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
518 
519 		status = hv_do_hypercall(control, input, NULL);
520 
521 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
522 			local_irq_restore(flags);
523 			ret = hv_result_to_errno(status);
524 			break;
525 		}
526 		local_irq_restore(flags);
527 
528 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
529 					    partition_id, 1);
530 	} while (!ret);
531 
532 	return ret;
533 }
534 
hv_call_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)535 static int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
536 				     union hv_input_vtl input_vtl,
537 				     struct page **state_page)
538 {
539 	struct hv_input_map_vp_state_page *input;
540 	struct hv_output_map_vp_state_page *output;
541 	u64 status;
542 	int ret;
543 	unsigned long flags;
544 
545 	do {
546 		local_irq_save(flags);
547 
548 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
549 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
550 
551 		memset(input, 0, sizeof(*input));
552 		input->partition_id = partition_id;
553 		input->vp_index = vp_index;
554 		input->type = type;
555 		input->input_vtl = input_vtl;
556 
557 		if (*state_page) {
558 			input->flags.map_location_provided = 1;
559 			input->requested_map_location =
560 				page_to_pfn(*state_page);
561 		}
562 
563 		status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input,
564 					 output);
565 
566 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
567 			if (hv_result_success(status))
568 				*state_page = pfn_to_page(output->map_location);
569 			local_irq_restore(flags);
570 			ret = hv_result_to_errno(status);
571 			break;
572 		}
573 
574 		local_irq_restore(flags);
575 
576 		ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
577 	} while (!ret);
578 
579 	return ret;
580 }
581 
mshv_use_overlay_gpfn(void)582 static bool mshv_use_overlay_gpfn(void)
583 {
584 	return hv_l1vh_partition() &&
585 	       mshv_root.vmm_caps.vmm_can_provide_overlay_gpfn;
586 }
587 
hv_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)588 int hv_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
589 			 union hv_input_vtl input_vtl,
590 			 struct page **state_page)
591 {
592 	int ret = 0;
593 	struct page *allocated_page = NULL;
594 
595 	if (mshv_use_overlay_gpfn()) {
596 		allocated_page = alloc_page(GFP_KERNEL);
597 		if (!allocated_page)
598 			return -ENOMEM;
599 		*state_page = allocated_page;
600 	} else {
601 		*state_page = NULL;
602 	}
603 
604 	ret = hv_call_map_vp_state_page(partition_id, vp_index, type, input_vtl,
605 					state_page);
606 
607 	if (ret && allocated_page) {
608 		__free_page(allocated_page);
609 		*state_page = NULL;
610 	}
611 
612 	return ret;
613 }
614 
hv_call_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl)615 static int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
616 				       union hv_input_vtl input_vtl)
617 {
618 	unsigned long flags;
619 	u64 status;
620 	struct hv_input_unmap_vp_state_page *input;
621 
622 	local_irq_save(flags);
623 
624 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
625 
626 	memset(input, 0, sizeof(*input));
627 
628 	input->partition_id = partition_id;
629 	input->vp_index = vp_index;
630 	input->type = type;
631 	input->input_vtl = input_vtl;
632 
633 	status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
634 
635 	local_irq_restore(flags);
636 
637 	return hv_result_to_errno(status);
638 }
639 
hv_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,struct page * state_page,union hv_input_vtl input_vtl)640 int hv_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
641 			   struct page *state_page, union hv_input_vtl input_vtl)
642 {
643 	int ret = hv_call_unmap_vp_state_page(partition_id, vp_index, type, input_vtl);
644 
645 	if (mshv_use_overlay_gpfn() && state_page)
646 		__free_page(state_page);
647 
648 	return ret;
649 }
650 
hv_call_get_partition_property_ex(u64 partition_id,u64 property_code,u64 arg,void * property_value,size_t property_value_sz)651 int hv_call_get_partition_property_ex(u64 partition_id, u64 property_code,
652 				      u64 arg, void *property_value,
653 				      size_t property_value_sz)
654 {
655 	u64 status;
656 	unsigned long flags;
657 	struct hv_input_get_partition_property_ex *input;
658 	struct hv_output_get_partition_property_ex *output;
659 
660 	local_irq_save(flags);
661 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
662 	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
663 
664 	memset(input, 0, sizeof(*input));
665 	input->partition_id = partition_id;
666 	input->property_code = property_code;
667 	input->arg = arg;
668 	status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY_EX, input, output);
669 
670 	if (!hv_result_success(status)) {
671 		local_irq_restore(flags);
672 		hv_status_debug(status, "\n");
673 		return hv_result_to_errno(status);
674 	}
675 	memcpy(property_value, &output->property_value, property_value_sz);
676 
677 	local_irq_restore(flags);
678 
679 	return 0;
680 }
681 
682 int
hv_call_clear_virtual_interrupt(u64 partition_id)683 hv_call_clear_virtual_interrupt(u64 partition_id)
684 {
685 	int status;
686 
687 	status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
688 				       partition_id);
689 
690 	return hv_result_to_errno(status);
691 }
692 
693 int
hv_call_create_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,struct hv_port_info * port_info,u8 port_vtl,u8 min_connection_vtl,int node)694 hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
695 		    u64 connection_partition_id,
696 		    struct hv_port_info *port_info,
697 		    u8 port_vtl, u8 min_connection_vtl, int node)
698 {
699 	struct hv_input_create_port *input;
700 	unsigned long flags;
701 	int ret = 0;
702 	int status;
703 
704 	do {
705 		local_irq_save(flags);
706 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
707 		memset(input, 0, sizeof(*input));
708 
709 		input->port_partition_id = port_partition_id;
710 		input->port_id = port_id;
711 		input->connection_partition_id = connection_partition_id;
712 		input->port_info = *port_info;
713 		input->port_vtl = port_vtl;
714 		input->min_connection_vtl = min_connection_vtl;
715 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
716 		status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
717 		local_irq_restore(flags);
718 		if (hv_result_success(status))
719 			break;
720 
721 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
722 			ret = hv_result_to_errno(status);
723 			break;
724 		}
725 		ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1);
726 
727 	} while (!ret);
728 
729 	return ret;
730 }
731 
732 int
hv_call_delete_port(u64 port_partition_id,union hv_port_id port_id)733 hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
734 {
735 	union hv_input_delete_port input = { 0 };
736 	int status;
737 
738 	input.port_partition_id = port_partition_id;
739 	input.port_id = port_id;
740 	status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
741 					input.as_uint64[0],
742 					input.as_uint64[1]);
743 
744 	return hv_result_to_errno(status);
745 }
746 
747 int
hv_call_connect_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,union hv_connection_id connection_id,struct hv_connection_info * connection_info,u8 connection_vtl,int node)748 hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
749 		     u64 connection_partition_id,
750 		     union hv_connection_id connection_id,
751 		     struct hv_connection_info *connection_info,
752 		     u8 connection_vtl, int node)
753 {
754 	struct hv_input_connect_port *input;
755 	unsigned long flags;
756 	int ret = 0, status;
757 
758 	do {
759 		local_irq_save(flags);
760 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
761 		memset(input, 0, sizeof(*input));
762 		input->port_partition_id = port_partition_id;
763 		input->port_id = port_id;
764 		input->connection_partition_id = connection_partition_id;
765 		input->connection_id = connection_id;
766 		input->connection_info = *connection_info;
767 		input->connection_vtl = connection_vtl;
768 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
769 		status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
770 
771 		local_irq_restore(flags);
772 		if (hv_result_success(status))
773 			break;
774 
775 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
776 			ret = hv_result_to_errno(status);
777 			break;
778 		}
779 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
780 					    connection_partition_id, 1);
781 	} while (!ret);
782 
783 	return ret;
784 }
785 
786 int
hv_call_disconnect_port(u64 connection_partition_id,union hv_connection_id connection_id)787 hv_call_disconnect_port(u64 connection_partition_id,
788 			union hv_connection_id connection_id)
789 {
790 	union hv_input_disconnect_port input = { 0 };
791 	int status;
792 
793 	input.connection_partition_id = connection_partition_id;
794 	input.connection_id = connection_id;
795 	input.is_doorbell = 1;
796 	status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
797 					input.as_uint64[0],
798 					input.as_uint64[1]);
799 
800 	return hv_result_to_errno(status);
801 }
802 
803 int
hv_call_notify_port_ring_empty(u32 sint_index)804 hv_call_notify_port_ring_empty(u32 sint_index)
805 {
806 	union hv_input_notify_port_ring_empty input = { 0 };
807 	int status;
808 
809 	input.sint_index = sint_index;
810 	status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
811 				       input.as_uint64);
812 
813 	return hv_result_to_errno(status);
814 }
815 
hv_call_map_stats_page2(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,u64 map_location)816 static int hv_call_map_stats_page2(enum hv_stats_object_type type,
817 				   const union hv_stats_object_identity *identity,
818 				   u64 map_location)
819 {
820 	unsigned long flags;
821 	struct hv_input_map_stats_page2 *input;
822 	u64 status;
823 	int ret;
824 
825 	if (!map_location || !mshv_use_overlay_gpfn())
826 		return -EINVAL;
827 
828 	do {
829 		local_irq_save(flags);
830 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
831 
832 		memset(input, 0, sizeof(*input));
833 		input->type = type;
834 		input->identity = *identity;
835 		input->map_location = map_location;
836 
837 		status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE2, input, NULL);
838 
839 		local_irq_restore(flags);
840 
841 		ret = hv_result_to_errno(status);
842 
843 		if (!ret)
844 			break;
845 
846 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
847 			hv_status_debug(status, "\n");
848 			break;
849 		}
850 
851 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
852 					    hv_current_partition_id, 1);
853 	} while (!ret);
854 
855 	return ret;
856 }
857 
hv_call_map_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,void ** addr)858 static int hv_call_map_stats_page(enum hv_stats_object_type type,
859 				  const union hv_stats_object_identity *identity,
860 				  void **addr)
861 {
862 	unsigned long flags;
863 	struct hv_input_map_stats_page *input;
864 	struct hv_output_map_stats_page *output;
865 	u64 status, pfn;
866 	int ret = 0;
867 
868 	do {
869 		local_irq_save(flags);
870 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
871 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
872 
873 		memset(input, 0, sizeof(*input));
874 		input->type = type;
875 		input->identity = *identity;
876 
877 		status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
878 		pfn = output->map_location;
879 
880 		local_irq_restore(flags);
881 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
882 			ret = hv_result_to_errno(status);
883 			if (hv_result_success(status))
884 				break;
885 			return ret;
886 		}
887 
888 		ret = hv_call_deposit_pages(NUMA_NO_NODE,
889 					    hv_current_partition_id, 1);
890 		if (ret)
891 			return ret;
892 	} while (!ret);
893 
894 	*addr = page_address(pfn_to_page(pfn));
895 
896 	return ret;
897 }
898 
hv_map_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,void ** addr)899 int hv_map_stats_page(enum hv_stats_object_type type,
900 		      const union hv_stats_object_identity *identity,
901 		      void **addr)
902 {
903 	int ret;
904 	struct page *allocated_page = NULL;
905 
906 	if (!addr)
907 		return -EINVAL;
908 
909 	if (mshv_use_overlay_gpfn()) {
910 		allocated_page = alloc_page(GFP_KERNEL);
911 		if (!allocated_page)
912 			return -ENOMEM;
913 
914 		ret = hv_call_map_stats_page2(type, identity,
915 					      page_to_pfn(allocated_page));
916 		*addr = page_address(allocated_page);
917 	} else {
918 		ret = hv_call_map_stats_page(type, identity, addr);
919 	}
920 
921 	if (ret && allocated_page) {
922 		__free_page(allocated_page);
923 		*addr = NULL;
924 	}
925 
926 	return ret;
927 }
928 
hv_call_unmap_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity)929 static int hv_call_unmap_stats_page(enum hv_stats_object_type type,
930 				    const union hv_stats_object_identity *identity)
931 {
932 	unsigned long flags;
933 	struct hv_input_unmap_stats_page *input;
934 	u64 status;
935 
936 	local_irq_save(flags);
937 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
938 
939 	memset(input, 0, sizeof(*input));
940 	input->type = type;
941 	input->identity = *identity;
942 
943 	status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
944 	local_irq_restore(flags);
945 
946 	return hv_result_to_errno(status);
947 }
948 
hv_unmap_stats_page(enum hv_stats_object_type type,void * page_addr,const union hv_stats_object_identity * identity)949 int hv_unmap_stats_page(enum hv_stats_object_type type, void *page_addr,
950 			const union hv_stats_object_identity *identity)
951 {
952 	int ret;
953 
954 	ret = hv_call_unmap_stats_page(type, identity);
955 
956 	if (mshv_use_overlay_gpfn() && page_addr)
957 		__free_page(virt_to_page(page_addr));
958 
959 	return ret;
960 }
961 
hv_call_modify_spa_host_access(u64 partition_id,struct page ** pages,u64 page_struct_count,u32 host_access,u32 flags,u8 acquire)962 int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
963 				   u64 page_struct_count, u32 host_access,
964 				   u32 flags, u8 acquire)
965 {
966 	struct hv_input_modify_sparse_spa_page_host_access *input_page;
967 	u64 status;
968 	int done = 0;
969 	unsigned long irq_flags, large_shift = 0;
970 	u64 page_count = page_struct_count;
971 	u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
972 			     HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
973 
974 	if (page_count == 0)
975 		return -EINVAL;
976 
977 	if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
978 		if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
979 			return -EINVAL;
980 		large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
981 		page_count >>= large_shift;
982 	}
983 
984 	while (done < page_count) {
985 		ulong i, completed, remain = page_count - done;
986 		int rep_count = min(remain,
987 				    HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
988 
989 		local_irq_save(irq_flags);
990 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
991 
992 		memset(input_page, 0, sizeof(*input_page));
993 		/* Only set the partition id if you are making the pages
994 		 * exclusive
995 		 */
996 		if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
997 			input_page->partition_id = partition_id;
998 		input_page->flags = flags;
999 		input_page->host_access = host_access;
1000 
1001 		for (i = 0; i < rep_count; i++) {
1002 			u64 index = (done + i) << large_shift;
1003 
1004 			if (index >= page_struct_count)
1005 				return -EINVAL;
1006 
1007 			input_page->spa_page_list[i] =
1008 						page_to_pfn(pages[index]);
1009 		}
1010 
1011 		status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
1012 					     NULL);
1013 		local_irq_restore(irq_flags);
1014 
1015 		completed = hv_repcomp(status);
1016 
1017 		if (!hv_result_success(status))
1018 			return hv_result_to_errno(status);
1019 
1020 		done += completed;
1021 	}
1022 
1023 	return 0;
1024 }
1025