1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 */ 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/io.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 #include <linux/hyperv.h> 17 #include <linux/random.h> 18 #include <linux/clockchips.h> 19 #include <linux/delay.h> 20 #include <linux/interrupt.h> 21 #include <linux/export.h> 22 #include <clocksource/hyperv_timer.h> 23 #include <asm/mshyperv.h> 24 #include <linux/set_memory.h> 25 #include "hyperv_vmbus.h" 26 27 /* The one and only */ 28 struct hv_context hv_context; 29 EXPORT_SYMBOL_FOR_MODULES(hv_context, "mshv_vtl"); 30 31 /* 32 * hv_init - Main initialization routine. 33 * 34 * This routine must be called before any other routines in here are called 35 */ 36 int hv_init(void) 37 { 38 hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context); 39 if (!hv_context.cpu_context) 40 return -ENOMEM; 41 return 0; 42 } 43 44 /* 45 * hv_post_message - Post a message using the hypervisor message IPC. 46 * 47 * This involves a hypercall. 48 */ 49 int hv_post_message(union hv_connection_id connection_id, 50 enum hv_message_type message_type, 51 void *payload, size_t payload_size) 52 { 53 struct hv_input_post_message *aligned_msg; 54 unsigned long flags; 55 u64 status; 56 57 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) 58 return -EMSGSIZE; 59 60 local_irq_save(flags); 61 62 /* 63 * A TDX VM with the paravisor must use the decrypted post_msg_page: see 64 * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor 65 * can use the encrypted hyperv_pcpu_input_arg because it copies the 66 * input into the GHCB page, which has been decrypted by the paravisor. 67 */ 68 if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present) 69 aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page; 70 else 71 aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg); 72 73 aligned_msg->connectionid = connection_id; 74 aligned_msg->reserved = 0; 75 aligned_msg->message_type = message_type; 76 aligned_msg->payload_size = payload_size; 77 memcpy((void *)aligned_msg->payload, payload, payload_size); 78 79 if (ms_hyperv.paravisor_present && !vmbus_is_confidential()) { 80 /* 81 * If the VMBus isn't confidential, use the CoCo-specific 82 * mechanism to communicate with the hypervisor. 83 */ 84 if (hv_isolation_type_tdx()) 85 status = hv_tdx_hypercall(HVCALL_POST_MESSAGE, 86 virt_to_phys(aligned_msg), 0); 87 else if (hv_isolation_type_snp()) 88 status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE, 89 aligned_msg, NULL, 90 sizeof(*aligned_msg)); 91 else 92 status = HV_STATUS_INVALID_PARAMETER; 93 } else { 94 u64 control = HVCALL_POST_MESSAGE; 95 96 control |= hv_nested ? HV_HYPERCALL_NESTED : 0; 97 /* 98 * If there is no paravisor, this will go to the hypervisor. 99 * In the Confidential VMBus case, there is the paravisor 100 * to which this will trap. 101 */ 102 status = hv_do_hypercall(control, aligned_msg, NULL); 103 } 104 105 local_irq_restore(flags); 106 107 return hv_result(status); 108 } 109 EXPORT_SYMBOL_FOR_MODULES(hv_post_message, "mshv_vtl"); 110 111 static int hv_alloc_page(void **page, bool decrypt, const char *note) 112 { 113 int ret = 0; 114 115 /* 116 * After the page changes its encryption status, its contents might 117 * appear scrambled on some hardware. Thus `get_zeroed_page` would 118 * zero the page out in vain, so do that explicitly exactly once. 119 * 120 * By default, the page is allocated encrypted in a CoCo VM. 121 */ 122 *page = (void *)__get_free_page(GFP_KERNEL); 123 if (!*page) 124 return -ENOMEM; 125 126 if (decrypt) 127 ret = set_memory_decrypted((unsigned long)*page, 1); 128 if (ret) 129 goto failed; 130 131 memset(*page, 0, PAGE_SIZE); 132 return 0; 133 134 failed: 135 /* 136 * Report the failure but don't put the page back on the free list as 137 * its encryption status is unknown. 138 */ 139 pr_err("allocation failed for %s page, error %d, decrypted %d\n", 140 note, ret, decrypt); 141 *page = NULL; 142 return ret; 143 } 144 145 static int hv_free_page(void **page, bool encrypt, const char *note) 146 { 147 int ret = 0; 148 149 if (!*page) 150 return 0; 151 152 if (encrypt) 153 ret = set_memory_encrypted((unsigned long)*page, 1); 154 155 /* 156 * In the case of the failure, the page is leaked. Something is wrong, 157 * prefer to lose the page with the unknown encryption status and stay afloat. 158 */ 159 if (ret) 160 pr_err("deallocation failed for %s page, error %d, encrypt %d\n", 161 note, ret, encrypt); 162 else 163 free_page((unsigned long)*page); 164 165 *page = NULL; 166 167 return ret; 168 } 169 170 int hv_synic_alloc(void) 171 { 172 int cpu, ret = -ENOMEM; 173 struct hv_per_cpu_context *hv_cpu; 174 const bool decrypt = !vmbus_is_confidential(); 175 176 /* 177 * First, zero all per-cpu memory areas so hv_synic_free() can 178 * detect what memory has been allocated and cleanup properly 179 * after any failures. 180 */ 181 for_each_present_cpu(cpu) { 182 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); 183 memset(hv_cpu, 0, sizeof(*hv_cpu)); 184 } 185 186 hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids); 187 if (!hv_context.hv_numa_map) { 188 pr_err("Unable to allocate NUMA map\n"); 189 goto err; 190 } 191 192 for_each_present_cpu(cpu) { 193 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); 194 195 tasklet_init(&hv_cpu->msg_dpc, 196 vmbus_on_msg_dpc, (unsigned long)hv_cpu); 197 198 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { 199 ret = hv_alloc_page(&hv_cpu->post_msg_page, 200 decrypt, "post msg"); 201 if (ret) 202 goto err; 203 } 204 205 /* 206 * If these SynIC pages are not allocated, SIEF and SIM pages 207 * are configured using what the root partition or the paravisor 208 * provides upon reading the SIEFP and SIMP registers. 209 */ 210 if (!ms_hyperv.paravisor_present && !hv_root_partition()) { 211 ret = hv_alloc_page(&hv_cpu->hyp_synic_message_page, 212 decrypt, "hypervisor SynIC msg"); 213 if (ret) 214 goto err; 215 ret = hv_alloc_page(&hv_cpu->hyp_synic_event_page, 216 decrypt, "hypervisor SynIC event"); 217 if (ret) 218 goto err; 219 } 220 221 if (vmbus_is_confidential()) { 222 ret = hv_alloc_page(&hv_cpu->para_synic_message_page, 223 false, "paravisor SynIC msg"); 224 if (ret) 225 goto err; 226 ret = hv_alloc_page(&hv_cpu->para_synic_event_page, 227 false, "paravisor SynIC event"); 228 if (ret) 229 goto err; 230 } 231 } 232 233 return 0; 234 235 err: 236 /* 237 * Any memory allocations that succeeded will be freed when 238 * the caller cleans up by calling hv_synic_free() 239 */ 240 return ret; 241 } 242 243 void hv_synic_free(void) 244 { 245 int cpu; 246 const bool encrypt = !vmbus_is_confidential(); 247 248 for_each_present_cpu(cpu) { 249 struct hv_per_cpu_context *hv_cpu = 250 per_cpu_ptr(hv_context.cpu_context, cpu); 251 252 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) 253 hv_free_page(&hv_cpu->post_msg_page, 254 encrypt, "post msg"); 255 if (!ms_hyperv.paravisor_present && !hv_root_partition()) { 256 hv_free_page(&hv_cpu->hyp_synic_event_page, 257 encrypt, "hypervisor SynIC event"); 258 hv_free_page(&hv_cpu->hyp_synic_message_page, 259 encrypt, "hypervisor SynIC msg"); 260 } 261 if (vmbus_is_confidential()) { 262 hv_free_page(&hv_cpu->para_synic_event_page, 263 false, "paravisor SynIC event"); 264 hv_free_page(&hv_cpu->para_synic_message_page, 265 false, "paravisor SynIC msg"); 266 } 267 } 268 269 kfree(hv_context.hv_numa_map); 270 } 271 272 /* 273 * hv_hyp_synic_enable_regs - Initialize the Synthetic Interrupt Controller 274 * with the hypervisor. 275 */ 276 void hv_hyp_synic_enable_regs(unsigned int cpu) 277 { 278 struct hv_per_cpu_context *hv_cpu = 279 per_cpu_ptr(hv_context.cpu_context, cpu); 280 union hv_synic_simp simp; 281 union hv_synic_siefp siefp; 282 union hv_synic_sint shared_sint; 283 284 /* Setup the Synic's message page with the hypervisor. */ 285 simp.as_uint64 = hv_get_msr(HV_MSR_SIMP); 286 simp.simp_enabled = 1; 287 288 if (ms_hyperv.paravisor_present || hv_root_partition()) { 289 /* Mask out vTOM bit and map as decrypted */ 290 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & 291 ~ms_hyperv.shared_gpa_boundary; 292 hv_cpu->hyp_synic_message_page = 293 memremap(base, HV_HYP_PAGE_SIZE, MEMREMAP_WB | MEMREMAP_DEC); 294 if (!hv_cpu->hyp_synic_message_page) 295 pr_err("Fail to map synic message page.\n"); 296 } else { 297 simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page) 298 >> HV_HYP_PAGE_SHIFT; 299 } 300 301 hv_set_msr(HV_MSR_SIMP, simp.as_uint64); 302 303 /* Setup the Synic's event page with the hypervisor. */ 304 siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP); 305 siefp.siefp_enabled = 1; 306 307 if (ms_hyperv.paravisor_present || hv_root_partition()) { 308 /* Mask out vTOM bit and map as decrypted */ 309 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & 310 ~ms_hyperv.shared_gpa_boundary; 311 hv_cpu->hyp_synic_event_page = 312 memremap(base, HV_HYP_PAGE_SIZE, MEMREMAP_WB | MEMREMAP_DEC); 313 if (!hv_cpu->hyp_synic_event_page) 314 pr_err("Fail to map synic event page.\n"); 315 } else { 316 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page) 317 >> HV_HYP_PAGE_SHIFT; 318 } 319 320 hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64); 321 hv_enable_coco_interrupt(cpu, vmbus_interrupt, true); 322 323 /* Setup the shared SINT. */ 324 if (vmbus_irq != -1) 325 enable_percpu_irq(vmbus_irq, 0); 326 shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT); 327 328 shared_sint.vector = vmbus_interrupt; 329 shared_sint.masked = false; 330 shared_sint.auto_eoi = hv_recommend_using_aeoi(); 331 hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 332 } 333 334 static void hv_hyp_synic_enable_interrupts(void) 335 { 336 union hv_synic_scontrol sctrl; 337 338 /* Enable the global synic bit */ 339 sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL); 340 sctrl.enable = 1; 341 342 hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64); 343 } 344 345 static void hv_para_synic_enable_regs(unsigned int cpu) 346 { 347 union hv_synic_simp simp; 348 union hv_synic_siefp siefp; 349 struct hv_per_cpu_context *hv_cpu 350 = per_cpu_ptr(hv_context.cpu_context, cpu); 351 352 /* Setup the Synic's message page with the paravisor. */ 353 simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP); 354 simp.simp_enabled = 1; 355 simp.base_simp_gpa = virt_to_phys(hv_cpu->para_synic_message_page) 356 >> HV_HYP_PAGE_SHIFT; 357 hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64); 358 359 /* Setup the Synic's event page with the paravisor. */ 360 siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP); 361 siefp.siefp_enabled = 1; 362 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->para_synic_event_page) 363 >> HV_HYP_PAGE_SHIFT; 364 hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64); 365 } 366 367 static void hv_para_synic_enable_interrupts(void) 368 { 369 union hv_synic_scontrol sctrl; 370 371 /* Enable the global synic bit */ 372 sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL); 373 sctrl.enable = 1; 374 hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64); 375 } 376 377 int hv_synic_init(unsigned int cpu) 378 { 379 if (vmbus_is_confidential()) 380 hv_para_synic_enable_regs(cpu); 381 382 /* 383 * The SINT is set in hv_hyp_synic_enable_regs() by calling 384 * hv_set_msr(). hv_set_msr() in turn has special case code for the 385 * SINT MSRs that write to the hypervisor version of the MSR *and* 386 * the paravisor version of the MSR (but *without* the proxy bit when 387 * VMBus is confidential). 388 * 389 * Then enable interrupts via the paravisor if VMBus is confidential, 390 * and otherwise via the hypervisor. 391 */ 392 393 hv_hyp_synic_enable_regs(cpu); 394 if (vmbus_is_confidential()) 395 hv_para_synic_enable_interrupts(); 396 else 397 hv_hyp_synic_enable_interrupts(); 398 399 hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT); 400 401 return 0; 402 } 403 404 void hv_hyp_synic_disable_regs(unsigned int cpu) 405 { 406 struct hv_per_cpu_context *hv_cpu = 407 per_cpu_ptr(hv_context.cpu_context, cpu); 408 union hv_synic_sint shared_sint; 409 union hv_synic_simp simp; 410 union hv_synic_siefp siefp; 411 412 shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT); 413 414 shared_sint.masked = 1; 415 416 /* Need to correctly cleanup in the case of SMP!!! */ 417 /* Disable the interrupt */ 418 hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 419 hv_enable_coco_interrupt(cpu, vmbus_interrupt, false); 420 421 simp.as_uint64 = hv_get_msr(HV_MSR_SIMP); 422 /* 423 * In Isolation VM, simp and sief pages are allocated by 424 * paravisor. These pages also will be used by kdump 425 * kernel. So just reset enable bit here and keep page 426 * addresses. 427 */ 428 simp.simp_enabled = 0; 429 if (ms_hyperv.paravisor_present || hv_root_partition()) { 430 if (hv_cpu->hyp_synic_message_page) { 431 memunmap(hv_cpu->hyp_synic_message_page); 432 hv_cpu->hyp_synic_message_page = NULL; 433 } 434 } else { 435 simp.base_simp_gpa = 0; 436 } 437 438 hv_set_msr(HV_MSR_SIMP, simp.as_uint64); 439 440 siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP); 441 siefp.siefp_enabled = 0; 442 443 if (ms_hyperv.paravisor_present || hv_root_partition()) { 444 if (hv_cpu->hyp_synic_event_page) { 445 memunmap(hv_cpu->hyp_synic_event_page); 446 hv_cpu->hyp_synic_event_page = NULL; 447 } 448 } else { 449 siefp.base_siefp_gpa = 0; 450 } 451 452 hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64); 453 } 454 455 static void hv_hyp_synic_disable_interrupts(void) 456 { 457 union hv_synic_scontrol sctrl; 458 459 /* Disable the global synic bit */ 460 sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL); 461 sctrl.enable = 0; 462 hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64); 463 } 464 465 static void hv_para_synic_disable_regs(unsigned int cpu) 466 { 467 union hv_synic_simp simp; 468 union hv_synic_siefp siefp; 469 470 /* Disable SynIC's message page in the paravisor. */ 471 simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP); 472 simp.simp_enabled = 0; 473 hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64); 474 475 /* Disable SynIC's event page in the paravisor. */ 476 siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP); 477 siefp.siefp_enabled = 0; 478 hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64); 479 } 480 481 static void hv_para_synic_disable_interrupts(void) 482 { 483 union hv_synic_scontrol sctrl; 484 485 /* Disable the global synic bit */ 486 sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL); 487 sctrl.enable = 0; 488 hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64); 489 } 490 491 #define HV_MAX_TRIES 3 492 /* 493 * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one 494 * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times. 495 * Return 'true', if there is still any set bit after this operation; 'false', otherwise. 496 * 497 * If a bit is set, that means there is a pending channel interrupt. The expectation is 498 * that the normal interrupt handling mechanism will find and process the channel interrupt 499 * "very soon", and in the process clear the bit. 500 */ 501 static bool __hv_synic_event_pending(union hv_synic_event_flags *event, int sint) 502 { 503 unsigned long *recv_int_page; 504 bool pending; 505 u32 relid; 506 int tries = 0; 507 508 if (!event) 509 return false; 510 511 event += sint; 512 recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */ 513 retry: 514 pending = false; 515 for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) { 516 /* Special case - VMBus channel protocol messages */ 517 if (relid == 0) 518 continue; 519 pending = true; 520 break; 521 } 522 if (pending && tries++ < HV_MAX_TRIES) { 523 usleep_range(10000, 20000); 524 goto retry; 525 } 526 return pending; 527 } 528 529 static bool hv_synic_event_pending(void) 530 { 531 struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); 532 union hv_synic_event_flags *hyp_synic_event_page = hv_cpu->hyp_synic_event_page; 533 union hv_synic_event_flags *para_synic_event_page = hv_cpu->para_synic_event_page; 534 535 return 536 __hv_synic_event_pending(hyp_synic_event_page, VMBUS_MESSAGE_SINT) || 537 __hv_synic_event_pending(para_synic_event_page, VMBUS_MESSAGE_SINT); 538 } 539 540 static int hv_pick_new_cpu(struct vmbus_channel *channel) 541 { 542 int ret = -EBUSY; 543 int start; 544 int cpu; 545 546 lockdep_assert_cpus_held(); 547 lockdep_assert_held(&vmbus_connection.channel_mutex); 548 549 /* 550 * We can't assume that the relevant interrupts will be sent before 551 * the cpu is offlined on older versions of hyperv. 552 */ 553 if (vmbus_proto_version < VERSION_WIN10_V5_3) 554 return -EBUSY; 555 556 start = get_random_u32_below(nr_cpu_ids); 557 558 for_each_cpu_wrap(cpu, cpu_online_mask, start) { 559 if (channel->target_cpu == cpu || 560 channel->target_cpu == VMBUS_CONNECT_CPU) 561 continue; 562 563 ret = vmbus_channel_set_cpu(channel, cpu); 564 if (!ret) 565 break; 566 } 567 568 if (ret) 569 ret = vmbus_channel_set_cpu(channel, VMBUS_CONNECT_CPU); 570 571 return ret; 572 } 573 574 /* 575 * hv_synic_cleanup - Cleanup routine for hv_synic_init(). 576 */ 577 int hv_synic_cleanup(unsigned int cpu) 578 { 579 struct vmbus_channel *channel, *sc; 580 int ret = 0; 581 582 if (vmbus_connection.conn_state != CONNECTED) 583 goto always_cleanup; 584 585 /* 586 * Hyper-V does not provide a way to change the connect CPU once 587 * it is set; we must prevent the connect CPU from going offline 588 * while the VM is running normally. But in the panic or kexec() 589 * path where the vmbus is already disconnected, the CPU must be 590 * allowed to shut down. 591 */ 592 if (cpu == VMBUS_CONNECT_CPU) 593 return -EBUSY; 594 595 /* 596 * Search for channels which are bound to the CPU we're about to 597 * cleanup. 598 */ 599 mutex_lock(&vmbus_connection.channel_mutex); 600 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 601 if (channel->target_cpu == cpu) { 602 ret = hv_pick_new_cpu(channel); 603 if (ret) { 604 mutex_unlock(&vmbus_connection.channel_mutex); 605 return ret; 606 } 607 } 608 list_for_each_entry(sc, &channel->sc_list, sc_list) { 609 if (sc->target_cpu == cpu) { 610 ret = hv_pick_new_cpu(sc); 611 if (ret) { 612 mutex_unlock(&vmbus_connection.channel_mutex); 613 return ret; 614 } 615 } 616 } 617 } 618 mutex_unlock(&vmbus_connection.channel_mutex); 619 620 /* 621 * Scan the event flags page looking for bits that are set and waiting 622 * with a timeout for vmbus_chan_sched() to process such bits. If bits 623 * are still set after this operation and VMBus is connected, fail the 624 * CPU offlining operation. 625 */ 626 if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending()) 627 return -EBUSY; 628 629 always_cleanup: 630 hv_stimer_legacy_cleanup(cpu); 631 632 /* 633 * First, disable the event and message pages 634 * used for communicating with the host, and then 635 * disable the host interrupts if VMBus is not 636 * confidential. 637 */ 638 hv_hyp_synic_disable_regs(cpu); 639 if (!vmbus_is_confidential()) 640 hv_hyp_synic_disable_interrupts(); 641 642 /* 643 * Perform the same steps for the Confidential VMBus. 644 * The sequencing provides the guarantee that no data 645 * may be posted for processing before disabling interrupts. 646 */ 647 if (vmbus_is_confidential()) { 648 hv_para_synic_disable_regs(cpu); 649 hv_para_synic_disable_interrupts(); 650 } 651 if (vmbus_irq != -1) 652 disable_percpu_irq(vmbus_irq); 653 654 return ret; 655 } 656