1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2023, Microsoft Corporation.
4 *
5 * Hypercall helper functions used by the mshv_root module.
6 *
7 * Authors: Microsoft Linux virtualization team
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/export.h>
13 #include <asm/mshyperv.h>
14
15 #include "mshv_root.h"
16
17 /* Determined empirically */
18 #define HV_INIT_PARTITION_DEPOSIT_PAGES 208
19 #define HV_MAP_GPA_DEPOSIT_PAGES 256
20 #define HV_UMAP_GPA_PAGES 512
21
22 #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
23
24 #define HV_WITHDRAW_BATCH_SIZE (HV_HYP_PAGE_SIZE / sizeof(u64))
25 #define HV_MAP_GPA_BATCH_SIZE \
26 ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
27 / sizeof(u64))
28 #define HV_GET_VP_STATE_BATCH_SIZE \
29 ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
30 / sizeof(u64))
31 #define HV_SET_VP_STATE_BATCH_SIZE \
32 ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
33 / sizeof(u64))
34 #define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE \
35 ((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
36 / sizeof(union hv_gpa_page_access_state))
37 #define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT \
38 ((HV_HYP_PAGE_SIZE - \
39 sizeof(struct hv_input_modify_sparse_spa_page_host_access)) / \
40 sizeof(u64))
41
hv_call_withdraw_memory(u64 count,int node,u64 partition_id)42 int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
43 {
44 struct hv_input_withdraw_memory *input_page;
45 struct hv_output_withdraw_memory *output_page;
46 struct page *page;
47 u16 completed;
48 u64 status, withdrawn = 0;
49 int i;
50 unsigned long flags;
51
52 page = alloc_page(GFP_KERNEL);
53 if (!page)
54 return -ENOMEM;
55 output_page = page_address(page);
56
57 while (withdrawn < count) {
58 local_irq_save(flags);
59
60 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
61
62 memset(input_page, 0, sizeof(*input_page));
63 input_page->partition_id = partition_id;
64 status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
65 min(count - withdrawn, HV_WITHDRAW_BATCH_SIZE),
66 0, input_page, output_page);
67
68 local_irq_restore(flags);
69
70 completed = hv_repcomp(status);
71
72 for (i = 0; i < completed; i++)
73 __free_page(pfn_to_page(output_page->gpa_page_list[i]));
74
75 if (!hv_result_success(status)) {
76 if (hv_result(status) == HV_STATUS_NO_RESOURCES)
77 status = HV_STATUS_SUCCESS;
78 break;
79 }
80
81 withdrawn += completed;
82 }
83 free_page((unsigned long)output_page);
84
85 trace_mshv_hvcall_withdraw_memory(partition_id, withdrawn, status);
86
87 return hv_result_to_errno(status);
88 }
89
hv_call_create_partition(u64 flags,struct hv_partition_creation_properties creation_properties,union hv_partition_isolation_properties isolation_properties,u64 * partition_id)90 int hv_call_create_partition(u64 flags,
91 struct hv_partition_creation_properties creation_properties,
92 union hv_partition_isolation_properties isolation_properties,
93 u64 *partition_id)
94 {
95 struct hv_input_create_partition *input;
96 struct hv_output_create_partition *output;
97 u64 status;
98 int ret;
99 unsigned long irq_flags;
100
101 do {
102 local_irq_save(irq_flags);
103 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
104 output = *this_cpu_ptr(hyperv_pcpu_output_arg);
105
106 memset(input, 0, sizeof(*input));
107 input->flags = flags;
108 input->compatibility_version = HV_COMPATIBILITY_21_H2;
109
110 memcpy(&input->partition_creation_properties, &creation_properties,
111 sizeof(creation_properties));
112
113 memcpy(&input->isolation_properties, &isolation_properties,
114 sizeof(isolation_properties));
115
116 status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
117 input, output);
118
119 if (!hv_result_needs_memory(status)) {
120 if (hv_result_success(status))
121 *partition_id = output->partition_id;
122 local_irq_restore(irq_flags);
123 ret = hv_result_to_errno(status);
124 break;
125 }
126 local_irq_restore(irq_flags);
127 ret = hv_deposit_memory(hv_current_partition_id, status);
128 } while (!ret);
129
130 trace_mshv_hvcall_create_partition(flags, ret ? ret : *partition_id);
131
132 return ret;
133 }
134
hv_call_initialize_partition(u64 partition_id)135 int hv_call_initialize_partition(u64 partition_id)
136 {
137 struct hv_input_initialize_partition input;
138 u64 status;
139 int ret;
140
141 input.partition_id = partition_id;
142
143 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
144 HV_INIT_PARTITION_DEPOSIT_PAGES);
145 if (ret)
146 return ret;
147
148 do {
149 status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
150 *(u64 *)&input);
151
152 if (!hv_result_needs_memory(status)) {
153 ret = hv_result_to_errno(status);
154 break;
155 }
156 ret = hv_deposit_memory(partition_id, status);
157 } while (!ret);
158
159 trace_mshv_hvcall_initialize_partition(partition_id, status);
160
161 return ret;
162 }
163
hv_call_finalize_partition(u64 partition_id)164 int hv_call_finalize_partition(u64 partition_id)
165 {
166 struct hv_input_finalize_partition input;
167 u64 status;
168
169 input.partition_id = partition_id;
170 status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
171 *(u64 *)&input);
172
173 trace_mshv_hvcall_finalize_partition(partition_id, status);
174
175 return hv_result_to_errno(status);
176 }
177
hv_call_delete_partition(u64 partition_id)178 int hv_call_delete_partition(u64 partition_id)
179 {
180 struct hv_input_delete_partition input;
181 u64 status;
182
183 input.partition_id = partition_id;
184 status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
185
186 trace_mshv_hvcall_delete_partition(partition_id, status);
187
188 return hv_result_to_errno(status);
189 }
190
191 /* Ask the hypervisor to map guest ram pages or the guest mmio space */
hv_do_map_gpa_hcall(u64 partition_id,u64 gfn,u64 page_struct_count,u32 flags,struct page ** pages,u64 mmio_spa)192 static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
193 u32 flags, struct page **pages, u64 mmio_spa)
194 {
195 struct hv_input_map_gpa_pages *input_page;
196 u64 status, *pfnlist;
197 unsigned long irq_flags, large_shift = 0;
198 int ret = 0, done = 0;
199 u64 page_count = page_struct_count;
200
201 if (page_count == 0 || (pages && mmio_spa))
202 return -EINVAL;
203
204 if (flags & HV_MAP_GPA_LARGE_PAGE) {
205 if (mmio_spa)
206 return -EINVAL;
207
208 if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
209 return -EINVAL;
210
211 large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
212 page_count >>= large_shift;
213 }
214
215 while (done < page_count) {
216 ulong i, completed, remain = page_count - done;
217 int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
218
219 local_irq_save(irq_flags);
220 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
221
222 input_page->target_partition_id = partition_id;
223 input_page->target_gpa_base = gfn + (done << large_shift);
224 input_page->map_flags = flags;
225 pfnlist = input_page->source_gpa_page_list;
226
227 for (i = 0; i < rep_count; i++)
228 if (flags & HV_MAP_GPA_NO_ACCESS) {
229 pfnlist[i] = 0;
230 } else if (pages) {
231 u64 index = (done + i) << large_shift;
232
233 if (index >= page_struct_count) {
234 ret = -EINVAL;
235 break;
236 }
237 pfnlist[i] = page_to_pfn(pages[index]);
238 } else {
239 pfnlist[i] = mmio_spa + done + i;
240 }
241 if (ret)
242 break;
243
244 status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
245 input_page, NULL);
246 local_irq_restore(irq_flags);
247
248 completed = hv_repcomp(status);
249
250 if (hv_result_needs_memory(status)) {
251 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
252 HV_MAP_GPA_DEPOSIT_PAGES);
253 if (ret)
254 break;
255
256 } else if (!hv_result_success(status)) {
257 ret = hv_result_to_errno(status);
258 break;
259 }
260
261 done += completed;
262 }
263
264 if (ret && done) {
265 u32 unmap_flags = 0;
266
267 if (flags & HV_MAP_GPA_LARGE_PAGE)
268 unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
269 hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
270 }
271
272 return ret;
273 }
274
275 /* Ask the hypervisor to map guest ram pages */
hv_call_map_gpa_pages(u64 partition_id,u64 gpa_target,u64 page_count,u32 flags,struct page ** pages)276 int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
277 u32 flags, struct page **pages)
278 {
279 return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
280 flags, pages, 0);
281 }
282
283 /* Ask the hypervisor to map guest mmio space */
hv_call_map_mmio_pages(u64 partition_id,u64 gfn,u64 mmio_spa,u64 numpgs)284 int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
285 {
286 int i;
287 u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
288 HV_MAP_GPA_NOT_CACHED;
289
290 for (i = 0; i < numpgs; i++)
291 if (page_is_ram(mmio_spa + i))
292 return -EINVAL;
293
294 return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
295 mmio_spa);
296 }
297
hv_call_unmap_gpa_pages(u64 partition_id,u64 gfn,u64 page_count_4k,u32 flags)298 int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
299 u32 flags)
300 {
301 struct hv_input_unmap_gpa_pages *input_page;
302 u64 status, page_count = page_count_4k;
303 unsigned long irq_flags, large_shift = 0;
304 int ret = 0, done = 0;
305
306 if (page_count == 0)
307 return -EINVAL;
308
309 if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
310 if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
311 return -EINVAL;
312
313 large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
314 page_count >>= large_shift;
315 }
316
317 while (done < page_count) {
318 ulong completed, remain = page_count - done;
319 int rep_count = min(remain, HV_UMAP_GPA_PAGES);
320
321 local_irq_save(irq_flags);
322 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
323
324 input_page->target_partition_id = partition_id;
325 input_page->target_gpa_base = gfn + (done << large_shift);
326 input_page->unmap_flags = flags;
327 status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
328 0, input_page, NULL);
329 local_irq_restore(irq_flags);
330
331 completed = hv_repcomp(status);
332 if (!hv_result_success(status)) {
333 ret = hv_result_to_errno(status);
334 break;
335 }
336
337 done += completed;
338 }
339
340 return ret;
341 }
342
hv_call_get_gpa_access_states(u64 partition_id,u32 count,u64 gpa_base_pfn,union hv_gpa_page_access_state_flags state_flags,int * written_total,union hv_gpa_page_access_state * states)343 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
344 union hv_gpa_page_access_state_flags state_flags,
345 int *written_total,
346 union hv_gpa_page_access_state *states)
347 {
348 struct hv_input_get_gpa_pages_access_state *input_page;
349 union hv_gpa_page_access_state *output_page;
350 int completed = 0;
351 unsigned long remaining = count;
352 int rep_count, i;
353 u64 status = 0;
354 unsigned long flags;
355
356 *written_total = 0;
357 while (remaining) {
358 local_irq_save(flags);
359 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
360 output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
361
362 input_page->partition_id = partition_id;
363 input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
364 input_page->flags = state_flags;
365 rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
366
367 status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
368 0, input_page, output_page);
369 if (!hv_result_success(status)) {
370 local_irq_restore(flags);
371 break;
372 }
373 completed = hv_repcomp(status);
374 for (i = 0; i < completed; ++i)
375 states[i].as_uint8 = output_page[i].as_uint8;
376
377 local_irq_restore(flags);
378 states += completed;
379 *written_total += completed;
380 remaining -= completed;
381 }
382
383 return hv_result_to_errno(status);
384 }
385
hv_call_assert_virtual_interrupt(u64 partition_id,u32 vector,u64 dest_addr,union hv_interrupt_control control)386 int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
387 u64 dest_addr,
388 union hv_interrupt_control control)
389 {
390 struct hv_input_assert_virtual_interrupt *input;
391 unsigned long flags;
392 u64 status;
393
394 local_irq_save(flags);
395 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
396 memset(input, 0, sizeof(*input));
397 input->partition_id = partition_id;
398 input->vector = vector;
399 /*
400 * NOTE: dest_addr only needs to be provided while asserting an
401 * interrupt on x86 platform
402 */
403 #if IS_ENABLED(CONFIG_X86)
404 input->dest_addr = dest_addr;
405 #endif
406 input->control = control;
407 status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
408 local_irq_restore(flags);
409
410 return hv_result_to_errno(status);
411 }
412
hv_call_delete_vp(u64 partition_id,u32 vp_index)413 int hv_call_delete_vp(u64 partition_id, u32 vp_index)
414 {
415 union hv_input_delete_vp input = {};
416 u64 status;
417
418 input.partition_id = partition_id;
419 input.vp_index = vp_index;
420
421 status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
422 input.as_uint64[0], input.as_uint64[1]);
423
424 return hv_result_to_errno(status);
425 }
426 EXPORT_SYMBOL_GPL(hv_call_delete_vp);
427
hv_call_get_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,union hv_output_get_vp_state * ret_output)428 int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
429 struct hv_vp_state_data state_data,
430 /* Choose between pages and ret_output */
431 u64 page_count, struct page **pages,
432 union hv_output_get_vp_state *ret_output)
433 {
434 struct hv_input_get_vp_state *input;
435 union hv_output_get_vp_state *output;
436 u64 status;
437 int i;
438 u64 control;
439 unsigned long flags;
440 int ret = 0;
441
442 if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
443 return -EINVAL;
444
445 if (!page_count && !ret_output)
446 return -EINVAL;
447
448 do {
449 local_irq_save(flags);
450 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
451 output = *this_cpu_ptr(hyperv_pcpu_output_arg);
452 memset(input, 0, sizeof(*input));
453 memset(output, 0, sizeof(*output));
454
455 input->partition_id = partition_id;
456 input->vp_index = vp_index;
457 input->state_data = state_data;
458 for (i = 0; i < page_count; i++)
459 input->output_data_pfns[i] = page_to_pfn(pages[i]);
460
461 control = (HVCALL_GET_VP_STATE) |
462 (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
463
464 status = hv_do_hypercall(control, input, output);
465
466 if (!hv_result_needs_memory(status)) {
467 if (hv_result_success(status) && ret_output)
468 memcpy(ret_output, output, sizeof(*output));
469
470 local_irq_restore(flags);
471 ret = hv_result_to_errno(status);
472 break;
473 }
474 local_irq_restore(flags);
475
476 ret = hv_deposit_memory(partition_id, status);
477 } while (!ret);
478
479 return ret;
480 }
481
hv_call_set_vp_state(u32 vp_index,u64 partition_id,struct hv_vp_state_data state_data,u64 page_count,struct page ** pages,u32 num_bytes,u8 * bytes)482 int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
483 /* Choose between pages and bytes */
484 struct hv_vp_state_data state_data, u64 page_count,
485 struct page **pages, u32 num_bytes, u8 *bytes)
486 {
487 struct hv_input_set_vp_state *input;
488 u64 status;
489 int i;
490 u64 control;
491 unsigned long flags;
492 int ret = 0;
493 u16 varhead_sz;
494
495 if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
496 return -EINVAL;
497 if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
498 return -EINVAL;
499
500 if (num_bytes)
501 /* round up to 8 and divide by 8 */
502 varhead_sz = (num_bytes + 7) >> 3;
503 else if (page_count)
504 varhead_sz = page_count;
505 else
506 return -EINVAL;
507
508 do {
509 local_irq_save(flags);
510 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
511 memset(input, 0, sizeof(*input));
512
513 input->partition_id = partition_id;
514 input->vp_index = vp_index;
515 input->state_data = state_data;
516 if (num_bytes) {
517 memcpy((u8 *)input->data, bytes, num_bytes);
518 } else {
519 for (i = 0; i < page_count; i++)
520 input->data[i].pfns = page_to_pfn(pages[i]);
521 }
522
523 control = (HVCALL_SET_VP_STATE) |
524 (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
525
526 status = hv_do_hypercall(control, input, NULL);
527
528 if (!hv_result_needs_memory(status)) {
529 local_irq_restore(flags);
530 ret = hv_result_to_errno(status);
531 break;
532 }
533 local_irq_restore(flags);
534
535 ret = hv_deposit_memory(partition_id, status);
536 } while (!ret);
537
538 return ret;
539 }
540
hv_call_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)541 static int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
542 union hv_input_vtl input_vtl,
543 struct page **state_page)
544 {
545 struct hv_input_map_vp_state_page *input;
546 struct hv_output_map_vp_state_page *output;
547 u64 status;
548 int ret;
549 unsigned long flags;
550
551 do {
552 local_irq_save(flags);
553
554 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
555 output = *this_cpu_ptr(hyperv_pcpu_output_arg);
556
557 memset(input, 0, sizeof(*input));
558 input->partition_id = partition_id;
559 input->vp_index = vp_index;
560 input->type = type;
561 input->input_vtl = input_vtl;
562
563 if (*state_page) {
564 input->flags.map_location_provided = 1;
565 input->requested_map_location =
566 page_to_pfn(*state_page);
567 }
568
569 status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input,
570 output);
571
572 if (!hv_result_needs_memory(status)) {
573 if (hv_result_success(status))
574 *state_page = pfn_to_page(output->map_location);
575 local_irq_restore(flags);
576 ret = hv_result_to_errno(status);
577 break;
578 }
579
580 local_irq_restore(flags);
581
582 ret = hv_deposit_memory(partition_id, status);
583 } while (!ret);
584
585 trace_mshv_hvcall_map_vp_state_page(partition_id, vp_index,
586 type, status);
587
588 return ret;
589 }
590
mshv_use_overlay_gpfn(void)591 static bool mshv_use_overlay_gpfn(void)
592 {
593 return hv_l1vh_partition() &&
594 mshv_root.vmm_caps.vmm_can_provide_overlay_gpfn;
595 }
596
hv_map_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl,struct page ** state_page)597 int hv_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
598 union hv_input_vtl input_vtl,
599 struct page **state_page)
600 {
601 int ret = 0;
602 struct page *allocated_page = NULL;
603
604 if (mshv_use_overlay_gpfn()) {
605 allocated_page = alloc_page(GFP_KERNEL);
606 if (!allocated_page)
607 return -ENOMEM;
608 *state_page = allocated_page;
609 } else {
610 *state_page = NULL;
611 }
612
613 ret = hv_call_map_vp_state_page(partition_id, vp_index, type, input_vtl,
614 state_page);
615
616 if (ret && allocated_page) {
617 __free_page(allocated_page);
618 *state_page = NULL;
619 }
620
621 return ret;
622 }
623
hv_call_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,union hv_input_vtl input_vtl)624 static int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
625 union hv_input_vtl input_vtl)
626 {
627 unsigned long flags;
628 u64 status;
629 struct hv_input_unmap_vp_state_page *input;
630
631 local_irq_save(flags);
632
633 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
634
635 memset(input, 0, sizeof(*input));
636
637 input->partition_id = partition_id;
638 input->vp_index = vp_index;
639 input->type = type;
640 input->input_vtl = input_vtl;
641
642 status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
643
644 local_irq_restore(flags);
645
646 return hv_result_to_errno(status);
647 }
648
hv_unmap_vp_state_page(u64 partition_id,u32 vp_index,u32 type,struct page * state_page,union hv_input_vtl input_vtl)649 int hv_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
650 struct page *state_page, union hv_input_vtl input_vtl)
651 {
652 int ret = hv_call_unmap_vp_state_page(partition_id, vp_index, type, input_vtl);
653
654 if (mshv_use_overlay_gpfn() && state_page)
655 __free_page(state_page);
656
657 return ret;
658 }
659
hv_call_get_partition_property_ex(u64 partition_id,u64 property_code,u64 arg,void * property_value,size_t property_value_sz)660 int hv_call_get_partition_property_ex(u64 partition_id, u64 property_code,
661 u64 arg, void *property_value,
662 size_t property_value_sz)
663 {
664 u64 status;
665 unsigned long flags;
666 struct hv_input_get_partition_property_ex *input;
667 struct hv_output_get_partition_property_ex *output;
668
669 local_irq_save(flags);
670 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
671 output = *this_cpu_ptr(hyperv_pcpu_output_arg);
672
673 memset(input, 0, sizeof(*input));
674 input->partition_id = partition_id;
675 input->property_code = property_code;
676 input->arg = arg;
677 status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY_EX, input, output);
678
679 if (!hv_result_success(status)) {
680 local_irq_restore(flags);
681 hv_status_debug(status, "\n");
682 return hv_result_to_errno(status);
683 }
684 memcpy(property_value, &output->property_value, property_value_sz);
685
686 local_irq_restore(flags);
687
688 return 0;
689 }
690
691 int
hv_call_clear_virtual_interrupt(u64 partition_id)692 hv_call_clear_virtual_interrupt(u64 partition_id)
693 {
694 int status;
695
696 status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
697 partition_id);
698
699 return hv_result_to_errno(status);
700 }
701
702 int
hv_call_create_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,struct hv_port_info * port_info,u8 port_vtl,u8 min_connection_vtl,int node)703 hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
704 u64 connection_partition_id,
705 struct hv_port_info *port_info,
706 u8 port_vtl, u8 min_connection_vtl, int node)
707 {
708 struct hv_input_create_port *input;
709 unsigned long flags;
710 int ret = 0;
711 int status;
712
713 do {
714 local_irq_save(flags);
715 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
716 memset(input, 0, sizeof(*input));
717
718 input->port_partition_id = port_partition_id;
719 input->port_id = port_id;
720 input->connection_partition_id = connection_partition_id;
721 input->port_info = *port_info;
722 input->port_vtl = port_vtl;
723 input->min_connection_vtl = min_connection_vtl;
724 input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
725 status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
726 local_irq_restore(flags);
727 if (hv_result_success(status))
728 break;
729
730 if (!hv_result_needs_memory(status)) {
731 ret = hv_result_to_errno(status);
732 break;
733 }
734 ret = hv_deposit_memory(port_partition_id, status);
735 } while (!ret);
736
737 return ret;
738 }
739
740 int
hv_call_delete_port(u64 port_partition_id,union hv_port_id port_id)741 hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
742 {
743 union hv_input_delete_port input = { 0 };
744 int status;
745
746 input.port_partition_id = port_partition_id;
747 input.port_id = port_id;
748 status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
749 input.as_uint64[0],
750 input.as_uint64[1]);
751
752 return hv_result_to_errno(status);
753 }
754
755 int
hv_call_connect_port(u64 port_partition_id,union hv_port_id port_id,u64 connection_partition_id,union hv_connection_id connection_id,struct hv_connection_info * connection_info,u8 connection_vtl,int node)756 hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
757 u64 connection_partition_id,
758 union hv_connection_id connection_id,
759 struct hv_connection_info *connection_info,
760 u8 connection_vtl, int node)
761 {
762 struct hv_input_connect_port *input;
763 unsigned long flags;
764 int ret = 0, status;
765
766 do {
767 local_irq_save(flags);
768 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
769 memset(input, 0, sizeof(*input));
770 input->port_partition_id = port_partition_id;
771 input->port_id = port_id;
772 input->connection_partition_id = connection_partition_id;
773 input->connection_id = connection_id;
774 input->connection_info = *connection_info;
775 input->connection_vtl = connection_vtl;
776 input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
777 status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
778
779 local_irq_restore(flags);
780 if (hv_result_success(status))
781 break;
782
783 if (!hv_result_needs_memory(status)) {
784 ret = hv_result_to_errno(status);
785 break;
786 }
787 ret = hv_deposit_memory(connection_partition_id, status);
788 } while (!ret);
789
790 return ret;
791 }
792
793 int
hv_call_disconnect_port(u64 connection_partition_id,union hv_connection_id connection_id)794 hv_call_disconnect_port(u64 connection_partition_id,
795 union hv_connection_id connection_id)
796 {
797 union hv_input_disconnect_port input = { 0 };
798 int status;
799
800 input.connection_partition_id = connection_partition_id;
801 input.connection_id = connection_id;
802 input.is_doorbell = 1;
803 status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
804 input.as_uint64[0],
805 input.as_uint64[1]);
806
807 return hv_result_to_errno(status);
808 }
809
810 int
hv_call_notify_port_ring_empty(u32 sint_index)811 hv_call_notify_port_ring_empty(u32 sint_index)
812 {
813 union hv_input_notify_port_ring_empty input = { 0 };
814 int status;
815
816 input.sint_index = sint_index;
817 status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
818 input.as_uint64);
819
820 return hv_result_to_errno(status);
821 }
822
823 /*
824 * Equivalent of hv_call_map_stats_page() for cases when the caller provides
825 * the map location.
826 *
827 * NOTE: This is a newer hypercall that always supports SELF and PARENT stats
828 * areas, unlike hv_call_map_stats_page().
829 */
hv_call_map_stats_page2(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,u64 map_location)830 static int hv_call_map_stats_page2(enum hv_stats_object_type type,
831 const union hv_stats_object_identity *identity,
832 u64 map_location)
833 {
834 unsigned long flags;
835 struct hv_input_map_stats_page2 *input;
836 u64 status;
837 int ret;
838
839 if (!map_location || !mshv_use_overlay_gpfn())
840 return -EINVAL;
841
842 do {
843 local_irq_save(flags);
844 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
845
846 memset(input, 0, sizeof(*input));
847 input->type = type;
848 input->identity = *identity;
849 input->map_location = map_location;
850
851 status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE2, input, NULL);
852
853 local_irq_restore(flags);
854
855 ret = hv_result_to_errno(status);
856
857 if (!ret)
858 break;
859
860 if (!hv_result_needs_memory(status)) {
861 hv_status_debug(status, "\n");
862 break;
863 }
864
865 ret = hv_deposit_memory(hv_current_partition_id, status);
866 } while (!ret);
867
868 return ret;
869 }
870
871 static int
hv_stats_get_area_type(enum hv_stats_object_type type,const union hv_stats_object_identity * identity)872 hv_stats_get_area_type(enum hv_stats_object_type type,
873 const union hv_stats_object_identity *identity)
874 {
875 switch (type) {
876 case HV_STATS_OBJECT_HYPERVISOR:
877 return identity->hv.stats_area_type;
878 case HV_STATS_OBJECT_LOGICAL_PROCESSOR:
879 return identity->lp.stats_area_type;
880 case HV_STATS_OBJECT_PARTITION:
881 return identity->partition.stats_area_type;
882 case HV_STATS_OBJECT_VP:
883 return identity->vp.stats_area_type;
884 }
885
886 return -EINVAL;
887 }
888
889 /*
890 * Map a stats page, where the page location is provided by the hypervisor.
891 *
892 * NOTE: The concept of separate SELF and PARENT stats areas does not exist on
893 * older hypervisor versions. All the available stats information can be found
894 * on the SELF page. When attempting to map the PARENT area on a hypervisor
895 * that doesn't support it, return "success" but with a NULL address. The
896 * caller should check for this case and instead fallback to the SELF area
897 * alone.
898 */
899 static int
hv_call_map_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,struct hv_stats_page ** addr)900 hv_call_map_stats_page(enum hv_stats_object_type type,
901 const union hv_stats_object_identity *identity,
902 struct hv_stats_page **addr)
903 {
904 unsigned long flags;
905 struct hv_input_map_stats_page *input;
906 struct hv_output_map_stats_page *output;
907 u64 status, pfn;
908 int ret = 0;
909
910 do {
911 local_irq_save(flags);
912 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
913 output = *this_cpu_ptr(hyperv_pcpu_output_arg);
914
915 memset(input, 0, sizeof(*input));
916 input->type = type;
917 input->identity = *identity;
918
919 status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
920 pfn = output->map_location;
921
922 local_irq_restore(flags);
923
924 if (!hv_result_needs_memory(status)) {
925 if (hv_result_success(status))
926 break;
927
928 if (hv_stats_get_area_type(type, identity) == HV_STATS_AREA_PARENT &&
929 hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
930 *addr = NULL;
931 return 0;
932 }
933
934 hv_status_debug(status, "\n");
935 return hv_result_to_errno(status);
936 }
937
938 ret = hv_deposit_memory(hv_current_partition_id, status);
939 if (ret)
940 return ret;
941 } while (!ret);
942
943 *addr = page_address(pfn_to_page(pfn));
944
945 return ret;
946 }
947
hv_map_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity,struct hv_stats_page ** addr)948 int hv_map_stats_page(enum hv_stats_object_type type,
949 const union hv_stats_object_identity *identity,
950 struct hv_stats_page **addr)
951 {
952 int ret;
953 struct page *allocated_page = NULL;
954
955 if (!addr)
956 return -EINVAL;
957
958 if (mshv_use_overlay_gpfn()) {
959 allocated_page = alloc_page(GFP_KERNEL);
960 if (!allocated_page)
961 return -ENOMEM;
962
963 ret = hv_call_map_stats_page2(type, identity,
964 page_to_pfn(allocated_page));
965 *addr = page_address(allocated_page);
966 } else {
967 ret = hv_call_map_stats_page(type, identity, addr);
968 }
969
970 if (ret && allocated_page) {
971 __free_page(allocated_page);
972 *addr = NULL;
973 }
974
975 return ret;
976 }
977
hv_call_unmap_stats_page(enum hv_stats_object_type type,const union hv_stats_object_identity * identity)978 static int hv_call_unmap_stats_page(enum hv_stats_object_type type,
979 const union hv_stats_object_identity *identity)
980 {
981 unsigned long flags;
982 struct hv_input_unmap_stats_page *input;
983 u64 status;
984
985 local_irq_save(flags);
986 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
987
988 memset(input, 0, sizeof(*input));
989 input->type = type;
990 input->identity = *identity;
991
992 status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
993 local_irq_restore(flags);
994
995 return hv_result_to_errno(status);
996 }
997
hv_unmap_stats_page(enum hv_stats_object_type type,struct hv_stats_page * page_addr,const union hv_stats_object_identity * identity)998 int hv_unmap_stats_page(enum hv_stats_object_type type,
999 struct hv_stats_page *page_addr,
1000 const union hv_stats_object_identity *identity)
1001 {
1002 int ret;
1003
1004 ret = hv_call_unmap_stats_page(type, identity);
1005
1006 if (mshv_use_overlay_gpfn() && page_addr)
1007 __free_page(virt_to_page(page_addr));
1008
1009 return ret;
1010 }
1011
hv_call_modify_spa_host_access(u64 partition_id,struct page ** pages,u64 page_struct_count,u32 host_access,u32 flags,u8 acquire)1012 int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
1013 u64 page_struct_count, u32 host_access,
1014 u32 flags, u8 acquire)
1015 {
1016 struct hv_input_modify_sparse_spa_page_host_access *input_page;
1017 u64 status;
1018 int done = 0;
1019 unsigned long irq_flags, large_shift = 0;
1020 u64 page_count = page_struct_count;
1021 u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
1022 HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
1023
1024 if (page_count == 0)
1025 return -EINVAL;
1026
1027 if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
1028 if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
1029 return -EINVAL;
1030 large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
1031 page_count >>= large_shift;
1032 }
1033
1034 while (done < page_count) {
1035 ulong i, completed, remain = page_count - done;
1036 int rep_count = min(remain,
1037 HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
1038
1039 local_irq_save(irq_flags);
1040 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
1041
1042 memset(input_page, 0, sizeof(*input_page));
1043 /* Only set the partition id if you are making the pages
1044 * exclusive
1045 */
1046 if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
1047 input_page->partition_id = partition_id;
1048 input_page->flags = flags;
1049 input_page->host_access = host_access;
1050
1051 for (i = 0; i < rep_count; i++) {
1052 u64 index = (done + i) << large_shift;
1053
1054 if (index >= page_struct_count)
1055 return -EINVAL;
1056
1057 input_page->spa_page_list[i] =
1058 page_to_pfn(pages[index]);
1059 }
1060
1061 status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
1062 NULL);
1063 local_irq_restore(irq_flags);
1064
1065 completed = hv_repcomp(status);
1066
1067 if (!hv_result_success(status))
1068 return hv_result_to_errno(status);
1069
1070 done += completed;
1071 }
1072
1073 return 0;
1074 }
1075