1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2023, Microsoft Corporation. 4 * 5 * Hypercall helper functions used by the mshv_root module. 6 * 7 * Authors: Microsoft Linux virtualization team 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/mm.h> 12 #include <linux/export.h> 13 #include <asm/mshyperv.h> 14 15 #include "mshv_root.h" 16 17 /* Determined empirically */ 18 #define HV_INIT_PARTITION_DEPOSIT_PAGES 208 19 #define HV_MAP_GPA_DEPOSIT_PAGES 256 20 #define HV_UMAP_GPA_PAGES 512 21 22 #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1))) 23 24 #define HV_WITHDRAW_BATCH_SIZE (HV_HYP_PAGE_SIZE / sizeof(u64)) 25 #define HV_MAP_GPA_BATCH_SIZE \ 26 ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \ 27 / sizeof(u64)) 28 #define HV_GET_VP_STATE_BATCH_SIZE \ 29 ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \ 30 / sizeof(u64)) 31 #define HV_SET_VP_STATE_BATCH_SIZE \ 32 ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \ 33 / sizeof(u64)) 34 #define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE \ 35 ((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \ 36 / sizeof(union hv_gpa_page_access_state)) 37 #define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT \ 38 ((HV_HYP_PAGE_SIZE - \ 39 sizeof(struct hv_input_modify_sparse_spa_page_host_access)) / \ 40 sizeof(u64)) 41 42 int hv_call_withdraw_memory(u64 count, int node, u64 partition_id) 43 { 44 struct hv_input_withdraw_memory *input_page; 45 struct hv_output_withdraw_memory *output_page; 46 struct page *page; 47 u16 completed; 48 unsigned long remaining = count; 49 u64 status; 50 int i; 51 unsigned long flags; 52 53 page = alloc_page(GFP_KERNEL); 54 if (!page) 55 return -ENOMEM; 56 output_page = page_address(page); 57 58 while (remaining) { 59 local_irq_save(flags); 60 61 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); 62 63 memset(input_page, 0, sizeof(*input_page)); 64 input_page->partition_id = partition_id; 65 status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY, 66 min(remaining, HV_WITHDRAW_BATCH_SIZE), 67 0, input_page, output_page); 68 69 local_irq_restore(flags); 70 71 completed = hv_repcomp(status); 72 73 for (i = 0; i < completed; i++) 74 __free_page(pfn_to_page(output_page->gpa_page_list[i])); 75 76 if (!hv_result_success(status)) { 77 if (hv_result(status) == HV_STATUS_NO_RESOURCES) 78 status = HV_STATUS_SUCCESS; 79 break; 80 } 81 82 remaining -= completed; 83 } 84 free_page((unsigned long)output_page); 85 86 return hv_result_to_errno(status); 87 } 88 89 int hv_call_create_partition(u64 flags, 90 struct hv_partition_creation_properties creation_properties, 91 union hv_partition_isolation_properties isolation_properties, 92 u64 *partition_id) 93 { 94 struct hv_input_create_partition *input; 95 struct hv_output_create_partition *output; 96 u64 status; 97 int ret; 98 unsigned long irq_flags; 99 100 do { 101 local_irq_save(irq_flags); 102 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 103 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 104 105 memset(input, 0, sizeof(*input)); 106 input->flags = flags; 107 input->compatibility_version = HV_COMPATIBILITY_21_H2; 108 109 memcpy(&input->partition_creation_properties, &creation_properties, 110 sizeof(creation_properties)); 111 112 memcpy(&input->isolation_properties, &isolation_properties, 113 sizeof(isolation_properties)); 114 115 status = hv_do_hypercall(HVCALL_CREATE_PARTITION, 116 input, output); 117 118 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 119 if (hv_result_success(status)) 120 *partition_id = output->partition_id; 121 local_irq_restore(irq_flags); 122 ret = hv_result_to_errno(status); 123 break; 124 } 125 local_irq_restore(irq_flags); 126 ret = hv_call_deposit_pages(NUMA_NO_NODE, 127 hv_current_partition_id, 1); 128 } while (!ret); 129 130 return ret; 131 } 132 133 int hv_call_initialize_partition(u64 partition_id) 134 { 135 struct hv_input_initialize_partition input; 136 u64 status; 137 int ret; 138 139 input.partition_id = partition_id; 140 141 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 142 HV_INIT_PARTITION_DEPOSIT_PAGES); 143 if (ret) 144 return ret; 145 146 do { 147 status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION, 148 *(u64 *)&input); 149 150 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 151 ret = hv_result_to_errno(status); 152 break; 153 } 154 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1); 155 } while (!ret); 156 157 return ret; 158 } 159 160 int hv_call_finalize_partition(u64 partition_id) 161 { 162 struct hv_input_finalize_partition input; 163 u64 status; 164 165 input.partition_id = partition_id; 166 status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION, 167 *(u64 *)&input); 168 169 return hv_result_to_errno(status); 170 } 171 172 int hv_call_delete_partition(u64 partition_id) 173 { 174 struct hv_input_delete_partition input; 175 u64 status; 176 177 input.partition_id = partition_id; 178 status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input); 179 180 return hv_result_to_errno(status); 181 } 182 183 /* Ask the hypervisor to map guest ram pages or the guest mmio space */ 184 static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count, 185 u32 flags, struct page **pages, u64 mmio_spa) 186 { 187 struct hv_input_map_gpa_pages *input_page; 188 u64 status, *pfnlist; 189 unsigned long irq_flags, large_shift = 0; 190 int ret = 0, done = 0; 191 u64 page_count = page_struct_count; 192 193 if (page_count == 0 || (pages && mmio_spa)) 194 return -EINVAL; 195 196 if (flags & HV_MAP_GPA_LARGE_PAGE) { 197 if (mmio_spa) 198 return -EINVAL; 199 200 if (!HV_PAGE_COUNT_2M_ALIGNED(page_count)) 201 return -EINVAL; 202 203 large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT; 204 page_count >>= large_shift; 205 } 206 207 while (done < page_count) { 208 ulong i, completed, remain = page_count - done; 209 int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE); 210 211 local_irq_save(irq_flags); 212 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); 213 214 input_page->target_partition_id = partition_id; 215 input_page->target_gpa_base = gfn + (done << large_shift); 216 input_page->map_flags = flags; 217 pfnlist = input_page->source_gpa_page_list; 218 219 for (i = 0; i < rep_count; i++) 220 if (flags & HV_MAP_GPA_NO_ACCESS) { 221 pfnlist[i] = 0; 222 } else if (pages) { 223 u64 index = (done + i) << large_shift; 224 225 if (index >= page_struct_count) { 226 ret = -EINVAL; 227 break; 228 } 229 pfnlist[i] = page_to_pfn(pages[index]); 230 } else { 231 pfnlist[i] = mmio_spa + done + i; 232 } 233 if (ret) 234 break; 235 236 status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0, 237 input_page, NULL); 238 local_irq_restore(irq_flags); 239 240 completed = hv_repcomp(status); 241 242 if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) { 243 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 244 HV_MAP_GPA_DEPOSIT_PAGES); 245 if (ret) 246 break; 247 248 } else if (!hv_result_success(status)) { 249 ret = hv_result_to_errno(status); 250 break; 251 } 252 253 done += completed; 254 } 255 256 if (ret && done) { 257 u32 unmap_flags = 0; 258 259 if (flags & HV_MAP_GPA_LARGE_PAGE) 260 unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE; 261 hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags); 262 } 263 264 return ret; 265 } 266 267 /* Ask the hypervisor to map guest ram pages */ 268 int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count, 269 u32 flags, struct page **pages) 270 { 271 return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count, 272 flags, pages, 0); 273 } 274 275 /* Ask the hypervisor to map guest mmio space */ 276 int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs) 277 { 278 int i; 279 u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE | 280 HV_MAP_GPA_NOT_CACHED; 281 282 for (i = 0; i < numpgs; i++) 283 if (page_is_ram(mmio_spa + i)) 284 return -EINVAL; 285 286 return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL, 287 mmio_spa); 288 } 289 290 int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k, 291 u32 flags) 292 { 293 struct hv_input_unmap_gpa_pages *input_page; 294 u64 status, page_count = page_count_4k; 295 unsigned long irq_flags, large_shift = 0; 296 int ret = 0, done = 0; 297 298 if (page_count == 0) 299 return -EINVAL; 300 301 if (flags & HV_UNMAP_GPA_LARGE_PAGE) { 302 if (!HV_PAGE_COUNT_2M_ALIGNED(page_count)) 303 return -EINVAL; 304 305 large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT; 306 page_count >>= large_shift; 307 } 308 309 while (done < page_count) { 310 ulong completed, remain = page_count - done; 311 int rep_count = min(remain, HV_UMAP_GPA_PAGES); 312 313 local_irq_save(irq_flags); 314 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); 315 316 input_page->target_partition_id = partition_id; 317 input_page->target_gpa_base = gfn + (done << large_shift); 318 input_page->unmap_flags = flags; 319 status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count, 320 0, input_page, NULL); 321 local_irq_restore(irq_flags); 322 323 completed = hv_repcomp(status); 324 if (!hv_result_success(status)) { 325 ret = hv_result_to_errno(status); 326 break; 327 } 328 329 done += completed; 330 } 331 332 return ret; 333 } 334 335 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn, 336 union hv_gpa_page_access_state_flags state_flags, 337 int *written_total, 338 union hv_gpa_page_access_state *states) 339 { 340 struct hv_input_get_gpa_pages_access_state *input_page; 341 union hv_gpa_page_access_state *output_page; 342 int completed = 0; 343 unsigned long remaining = count; 344 int rep_count, i; 345 u64 status = 0; 346 unsigned long flags; 347 348 *written_total = 0; 349 while (remaining) { 350 local_irq_save(flags); 351 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); 352 output_page = *this_cpu_ptr(hyperv_pcpu_output_arg); 353 354 input_page->partition_id = partition_id; 355 input_page->hv_gpa_page_number = gpa_base_pfn + *written_total; 356 input_page->flags = state_flags; 357 rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE); 358 359 status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count, 360 0, input_page, output_page); 361 if (!hv_result_success(status)) { 362 local_irq_restore(flags); 363 break; 364 } 365 completed = hv_repcomp(status); 366 for (i = 0; i < completed; ++i) 367 states[i].as_uint8 = output_page[i].as_uint8; 368 369 local_irq_restore(flags); 370 states += completed; 371 *written_total += completed; 372 remaining -= completed; 373 } 374 375 return hv_result_to_errno(status); 376 } 377 378 int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector, 379 u64 dest_addr, 380 union hv_interrupt_control control) 381 { 382 struct hv_input_assert_virtual_interrupt *input; 383 unsigned long flags; 384 u64 status; 385 386 local_irq_save(flags); 387 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 388 memset(input, 0, sizeof(*input)); 389 input->partition_id = partition_id; 390 input->vector = vector; 391 input->dest_addr = dest_addr; 392 input->control = control; 393 status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL); 394 local_irq_restore(flags); 395 396 return hv_result_to_errno(status); 397 } 398 399 int hv_call_delete_vp(u64 partition_id, u32 vp_index) 400 { 401 union hv_input_delete_vp input = {}; 402 u64 status; 403 404 input.partition_id = partition_id; 405 input.vp_index = vp_index; 406 407 status = hv_do_fast_hypercall16(HVCALL_DELETE_VP, 408 input.as_uint64[0], input.as_uint64[1]); 409 410 return hv_result_to_errno(status); 411 } 412 EXPORT_SYMBOL_GPL(hv_call_delete_vp); 413 414 int hv_call_get_vp_state(u32 vp_index, u64 partition_id, 415 struct hv_vp_state_data state_data, 416 /* Choose between pages and ret_output */ 417 u64 page_count, struct page **pages, 418 union hv_output_get_vp_state *ret_output) 419 { 420 struct hv_input_get_vp_state *input; 421 union hv_output_get_vp_state *output; 422 u64 status; 423 int i; 424 u64 control; 425 unsigned long flags; 426 int ret = 0; 427 428 if (page_count > HV_GET_VP_STATE_BATCH_SIZE) 429 return -EINVAL; 430 431 if (!page_count && !ret_output) 432 return -EINVAL; 433 434 do { 435 local_irq_save(flags); 436 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 437 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 438 memset(input, 0, sizeof(*input)); 439 memset(output, 0, sizeof(*output)); 440 441 input->partition_id = partition_id; 442 input->vp_index = vp_index; 443 input->state_data = state_data; 444 for (i = 0; i < page_count; i++) 445 input->output_data_pfns[i] = page_to_pfn(pages[i]); 446 447 control = (HVCALL_GET_VP_STATE) | 448 (page_count << HV_HYPERCALL_VARHEAD_OFFSET); 449 450 status = hv_do_hypercall(control, input, output); 451 452 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 453 if (hv_result_success(status) && ret_output) 454 memcpy(ret_output, output, sizeof(*output)); 455 456 local_irq_restore(flags); 457 ret = hv_result_to_errno(status); 458 break; 459 } 460 local_irq_restore(flags); 461 462 ret = hv_call_deposit_pages(NUMA_NO_NODE, 463 partition_id, 1); 464 } while (!ret); 465 466 return ret; 467 } 468 469 int hv_call_set_vp_state(u32 vp_index, u64 partition_id, 470 /* Choose between pages and bytes */ 471 struct hv_vp_state_data state_data, u64 page_count, 472 struct page **pages, u32 num_bytes, u8 *bytes) 473 { 474 struct hv_input_set_vp_state *input; 475 u64 status; 476 int i; 477 u64 control; 478 unsigned long flags; 479 int ret = 0; 480 u16 varhead_sz; 481 482 if (page_count > HV_SET_VP_STATE_BATCH_SIZE) 483 return -EINVAL; 484 if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE) 485 return -EINVAL; 486 487 if (num_bytes) 488 /* round up to 8 and divide by 8 */ 489 varhead_sz = (num_bytes + 7) >> 3; 490 else if (page_count) 491 varhead_sz = page_count; 492 else 493 return -EINVAL; 494 495 do { 496 local_irq_save(flags); 497 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 498 memset(input, 0, sizeof(*input)); 499 500 input->partition_id = partition_id; 501 input->vp_index = vp_index; 502 input->state_data = state_data; 503 if (num_bytes) { 504 memcpy((u8 *)input->data, bytes, num_bytes); 505 } else { 506 for (i = 0; i < page_count; i++) 507 input->data[i].pfns = page_to_pfn(pages[i]); 508 } 509 510 control = (HVCALL_SET_VP_STATE) | 511 (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET); 512 513 status = hv_do_hypercall(control, input, NULL); 514 515 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 516 local_irq_restore(flags); 517 ret = hv_result_to_errno(status); 518 break; 519 } 520 local_irq_restore(flags); 521 522 ret = hv_call_deposit_pages(NUMA_NO_NODE, 523 partition_id, 1); 524 } while (!ret); 525 526 return ret; 527 } 528 529 int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type, 530 union hv_input_vtl input_vtl, 531 struct page **state_page) 532 { 533 struct hv_input_map_vp_state_page *input; 534 struct hv_output_map_vp_state_page *output; 535 u64 status; 536 int ret; 537 unsigned long flags; 538 539 do { 540 local_irq_save(flags); 541 542 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 543 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 544 545 input->partition_id = partition_id; 546 input->vp_index = vp_index; 547 input->type = type; 548 input->input_vtl = input_vtl; 549 550 status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input, output); 551 552 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 553 if (hv_result_success(status)) 554 *state_page = pfn_to_page(output->map_location); 555 local_irq_restore(flags); 556 ret = hv_result_to_errno(status); 557 break; 558 } 559 560 local_irq_restore(flags); 561 562 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1); 563 } while (!ret); 564 565 return ret; 566 } 567 568 int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type, 569 union hv_input_vtl input_vtl) 570 { 571 unsigned long flags; 572 u64 status; 573 struct hv_input_unmap_vp_state_page *input; 574 575 local_irq_save(flags); 576 577 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 578 579 memset(input, 0, sizeof(*input)); 580 581 input->partition_id = partition_id; 582 input->vp_index = vp_index; 583 input->type = type; 584 input->input_vtl = input_vtl; 585 586 status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL); 587 588 local_irq_restore(flags); 589 590 return hv_result_to_errno(status); 591 } 592 593 int 594 hv_call_clear_virtual_interrupt(u64 partition_id) 595 { 596 int status; 597 598 status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT, 599 partition_id); 600 601 return hv_result_to_errno(status); 602 } 603 604 int 605 hv_call_create_port(u64 port_partition_id, union hv_port_id port_id, 606 u64 connection_partition_id, 607 struct hv_port_info *port_info, 608 u8 port_vtl, u8 min_connection_vtl, int node) 609 { 610 struct hv_input_create_port *input; 611 unsigned long flags; 612 int ret = 0; 613 int status; 614 615 do { 616 local_irq_save(flags); 617 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 618 memset(input, 0, sizeof(*input)); 619 620 input->port_partition_id = port_partition_id; 621 input->port_id = port_id; 622 input->connection_partition_id = connection_partition_id; 623 input->port_info = *port_info; 624 input->port_vtl = port_vtl; 625 input->min_connection_vtl = min_connection_vtl; 626 input->proximity_domain_info = hv_numa_node_to_pxm_info(node); 627 status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL); 628 local_irq_restore(flags); 629 if (hv_result_success(status)) 630 break; 631 632 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 633 ret = hv_result_to_errno(status); 634 break; 635 } 636 ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1); 637 638 } while (!ret); 639 640 return ret; 641 } 642 643 int 644 hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id) 645 { 646 union hv_input_delete_port input = { 0 }; 647 int status; 648 649 input.port_partition_id = port_partition_id; 650 input.port_id = port_id; 651 status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT, 652 input.as_uint64[0], 653 input.as_uint64[1]); 654 655 return hv_result_to_errno(status); 656 } 657 658 int 659 hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id, 660 u64 connection_partition_id, 661 union hv_connection_id connection_id, 662 struct hv_connection_info *connection_info, 663 u8 connection_vtl, int node) 664 { 665 struct hv_input_connect_port *input; 666 unsigned long flags; 667 int ret = 0, status; 668 669 do { 670 local_irq_save(flags); 671 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 672 memset(input, 0, sizeof(*input)); 673 input->port_partition_id = port_partition_id; 674 input->port_id = port_id; 675 input->connection_partition_id = connection_partition_id; 676 input->connection_id = connection_id; 677 input->connection_info = *connection_info; 678 input->connection_vtl = connection_vtl; 679 input->proximity_domain_info = hv_numa_node_to_pxm_info(node); 680 status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL); 681 682 local_irq_restore(flags); 683 if (hv_result_success(status)) 684 break; 685 686 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 687 ret = hv_result_to_errno(status); 688 break; 689 } 690 ret = hv_call_deposit_pages(NUMA_NO_NODE, 691 connection_partition_id, 1); 692 } while (!ret); 693 694 return ret; 695 } 696 697 int 698 hv_call_disconnect_port(u64 connection_partition_id, 699 union hv_connection_id connection_id) 700 { 701 union hv_input_disconnect_port input = { 0 }; 702 int status; 703 704 input.connection_partition_id = connection_partition_id; 705 input.connection_id = connection_id; 706 input.is_doorbell = 1; 707 status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT, 708 input.as_uint64[0], 709 input.as_uint64[1]); 710 711 return hv_result_to_errno(status); 712 } 713 714 int 715 hv_call_notify_port_ring_empty(u32 sint_index) 716 { 717 union hv_input_notify_port_ring_empty input = { 0 }; 718 int status; 719 720 input.sint_index = sint_index; 721 status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY, 722 input.as_uint64); 723 724 return hv_result_to_errno(status); 725 } 726 727 int hv_call_map_stat_page(enum hv_stats_object_type type, 728 const union hv_stats_object_identity *identity, 729 void **addr) 730 { 731 unsigned long flags; 732 struct hv_input_map_stats_page *input; 733 struct hv_output_map_stats_page *output; 734 u64 status, pfn; 735 int ret = 0; 736 737 do { 738 local_irq_save(flags); 739 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 740 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 741 742 memset(input, 0, sizeof(*input)); 743 input->type = type; 744 input->identity = *identity; 745 746 status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output); 747 pfn = output->map_location; 748 749 local_irq_restore(flags); 750 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 751 ret = hv_result_to_errno(status); 752 if (hv_result_success(status)) 753 break; 754 return ret; 755 } 756 757 ret = hv_call_deposit_pages(NUMA_NO_NODE, 758 hv_current_partition_id, 1); 759 if (ret) 760 return ret; 761 } while (!ret); 762 763 *addr = page_address(pfn_to_page(pfn)); 764 765 return ret; 766 } 767 768 int hv_call_unmap_stat_page(enum hv_stats_object_type type, 769 const union hv_stats_object_identity *identity) 770 { 771 unsigned long flags; 772 struct hv_input_unmap_stats_page *input; 773 u64 status; 774 775 local_irq_save(flags); 776 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 777 778 memset(input, 0, sizeof(*input)); 779 input->type = type; 780 input->identity = *identity; 781 782 status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL); 783 local_irq_restore(flags); 784 785 return hv_result_to_errno(status); 786 } 787 788 int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages, 789 u64 page_struct_count, u32 host_access, 790 u32 flags, u8 acquire) 791 { 792 struct hv_input_modify_sparse_spa_page_host_access *input_page; 793 u64 status; 794 int done = 0; 795 unsigned long irq_flags, large_shift = 0; 796 u64 page_count = page_struct_count; 797 u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS : 798 HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS; 799 800 if (page_count == 0) 801 return -EINVAL; 802 803 if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) { 804 if (!HV_PAGE_COUNT_2M_ALIGNED(page_count)) 805 return -EINVAL; 806 large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT; 807 page_count >>= large_shift; 808 } 809 810 while (done < page_count) { 811 ulong i, completed, remain = page_count - done; 812 int rep_count = min(remain, 813 HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT); 814 815 local_irq_save(irq_flags); 816 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); 817 818 memset(input_page, 0, sizeof(*input_page)); 819 /* Only set the partition id if you are making the pages 820 * exclusive 821 */ 822 if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE) 823 input_page->partition_id = partition_id; 824 input_page->flags = flags; 825 input_page->host_access = host_access; 826 827 for (i = 0; i < rep_count; i++) { 828 u64 index = (done + i) << large_shift; 829 830 if (index >= page_struct_count) 831 return -EINVAL; 832 833 input_page->spa_page_list[i] = 834 page_to_pfn(pages[index]); 835 } 836 837 status = hv_do_rep_hypercall(code, rep_count, 0, input_page, 838 NULL); 839 local_irq_restore(irq_flags); 840 841 completed = hv_repcomp(status); 842 843 if (!hv_result_success(status)) 844 return hv_result_to_errno(status); 845 846 done += completed; 847 } 848 849 return 0; 850 } 851