1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 * K. Y. Srinivasan <kys@microsoft.com> 9 */ 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/device.h> 15 #include <linux/platform_device.h> 16 #include <linux/interrupt.h> 17 #include <linux/sysctl.h> 18 #include <linux/slab.h> 19 #include <linux/acpi.h> 20 #include <linux/completion.h> 21 #include <linux/hyperv.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/of_address.h> 24 #include <linux/clockchips.h> 25 #include <linux/cpu.h> 26 #include <linux/sched/isolation.h> 27 #include <linux/sched/task_stack.h> 28 29 #include <linux/delay.h> 30 #include <linux/panic_notifier.h> 31 #include <linux/ptrace.h> 32 #include <linux/screen_info.h> 33 #include <linux/efi.h> 34 #include <linux/random.h> 35 #include <linux/kernel.h> 36 #include <linux/syscore_ops.h> 37 #include <linux/dma-map-ops.h> 38 #include <linux/pci.h> 39 #include <clocksource/hyperv_timer.h> 40 #include <asm/mshyperv.h> 41 #include "hyperv_vmbus.h" 42 43 struct vmbus_dynid { 44 struct list_head node; 45 struct hv_vmbus_device_id id; 46 }; 47 48 static struct device *hv_dev; 49 50 static int hyperv_cpuhp_online; 51 52 static long __percpu *vmbus_evt; 53 54 /* Values parsed from ACPI DSDT */ 55 int vmbus_irq; 56 int vmbus_interrupt; 57 58 /* 59 * The panic notifier below is responsible solely for unloading the 60 * vmbus connection, which is necessary in a panic event. 61 * 62 * Notice an intrincate relation of this notifier with Hyper-V 63 * framebuffer panic notifier exists - we need vmbus connection alive 64 * there in order to succeed, so we need to order both with each other 65 * [see hvfb_on_panic()] - this is done using notifiers' priorities. 66 */ 67 static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val, 68 void *args) 69 { 70 vmbus_initiate_unload(true); 71 return NOTIFY_DONE; 72 } 73 static struct notifier_block hyperv_panic_vmbus_unload_block = { 74 .notifier_call = hv_panic_vmbus_unload, 75 .priority = INT_MIN + 1, /* almost the latest one to execute */ 76 }; 77 78 static const char *fb_mmio_name = "fb_range"; 79 static struct resource *fb_mmio; 80 static struct resource *hyperv_mmio; 81 static DEFINE_MUTEX(hyperv_mmio_lock); 82 83 static int vmbus_exists(void) 84 { 85 if (hv_dev == NULL) 86 return -ENODEV; 87 88 return 0; 89 } 90 91 static u8 channel_monitor_group(const struct vmbus_channel *channel) 92 { 93 return (u8)channel->offermsg.monitorid / 32; 94 } 95 96 static u8 channel_monitor_offset(const struct vmbus_channel *channel) 97 { 98 return (u8)channel->offermsg.monitorid % 32; 99 } 100 101 static u32 channel_pending(const struct vmbus_channel *channel, 102 const struct hv_monitor_page *monitor_page) 103 { 104 u8 monitor_group = channel_monitor_group(channel); 105 106 return monitor_page->trigger_group[monitor_group].pending; 107 } 108 109 static u32 channel_latency(const struct vmbus_channel *channel, 110 const struct hv_monitor_page *monitor_page) 111 { 112 u8 monitor_group = channel_monitor_group(channel); 113 u8 monitor_offset = channel_monitor_offset(channel); 114 115 return monitor_page->latency[monitor_group][monitor_offset]; 116 } 117 118 static u32 channel_conn_id(struct vmbus_channel *channel, 119 struct hv_monitor_page *monitor_page) 120 { 121 u8 monitor_group = channel_monitor_group(channel); 122 u8 monitor_offset = channel_monitor_offset(channel); 123 124 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id; 125 } 126 127 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr, 128 char *buf) 129 { 130 struct hv_device *hv_dev = device_to_hv_device(dev); 131 132 if (!hv_dev->channel) 133 return -ENODEV; 134 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid); 135 } 136 static DEVICE_ATTR_RO(id); 137 138 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr, 139 char *buf) 140 { 141 struct hv_device *hv_dev = device_to_hv_device(dev); 142 143 if (!hv_dev->channel) 144 return -ENODEV; 145 return sprintf(buf, "%d\n", hv_dev->channel->state); 146 } 147 static DEVICE_ATTR_RO(state); 148 149 static ssize_t monitor_id_show(struct device *dev, 150 struct device_attribute *dev_attr, char *buf) 151 { 152 struct hv_device *hv_dev = device_to_hv_device(dev); 153 154 if (!hv_dev->channel) 155 return -ENODEV; 156 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid); 157 } 158 static DEVICE_ATTR_RO(monitor_id); 159 160 static ssize_t class_id_show(struct device *dev, 161 struct device_attribute *dev_attr, char *buf) 162 { 163 struct hv_device *hv_dev = device_to_hv_device(dev); 164 165 if (!hv_dev->channel) 166 return -ENODEV; 167 return sprintf(buf, "{%pUl}\n", 168 &hv_dev->channel->offermsg.offer.if_type); 169 } 170 static DEVICE_ATTR_RO(class_id); 171 172 static ssize_t device_id_show(struct device *dev, 173 struct device_attribute *dev_attr, char *buf) 174 { 175 struct hv_device *hv_dev = device_to_hv_device(dev); 176 177 if (!hv_dev->channel) 178 return -ENODEV; 179 return sprintf(buf, "{%pUl}\n", 180 &hv_dev->channel->offermsg.offer.if_instance); 181 } 182 static DEVICE_ATTR_RO(device_id); 183 184 static ssize_t modalias_show(struct device *dev, 185 struct device_attribute *dev_attr, char *buf) 186 { 187 struct hv_device *hv_dev = device_to_hv_device(dev); 188 189 return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type); 190 } 191 static DEVICE_ATTR_RO(modalias); 192 193 #ifdef CONFIG_NUMA 194 static ssize_t numa_node_show(struct device *dev, 195 struct device_attribute *attr, char *buf) 196 { 197 struct hv_device *hv_dev = device_to_hv_device(dev); 198 199 if (!hv_dev->channel) 200 return -ENODEV; 201 202 return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu)); 203 } 204 static DEVICE_ATTR_RO(numa_node); 205 #endif 206 207 static ssize_t server_monitor_pending_show(struct device *dev, 208 struct device_attribute *dev_attr, 209 char *buf) 210 { 211 struct hv_device *hv_dev = device_to_hv_device(dev); 212 213 if (!hv_dev->channel) 214 return -ENODEV; 215 return sprintf(buf, "%d\n", 216 channel_pending(hv_dev->channel, 217 vmbus_connection.monitor_pages[0])); 218 } 219 static DEVICE_ATTR_RO(server_monitor_pending); 220 221 static ssize_t client_monitor_pending_show(struct device *dev, 222 struct device_attribute *dev_attr, 223 char *buf) 224 { 225 struct hv_device *hv_dev = device_to_hv_device(dev); 226 227 if (!hv_dev->channel) 228 return -ENODEV; 229 return sprintf(buf, "%d\n", 230 channel_pending(hv_dev->channel, 231 vmbus_connection.monitor_pages[1])); 232 } 233 static DEVICE_ATTR_RO(client_monitor_pending); 234 235 static ssize_t server_monitor_latency_show(struct device *dev, 236 struct device_attribute *dev_attr, 237 char *buf) 238 { 239 struct hv_device *hv_dev = device_to_hv_device(dev); 240 241 if (!hv_dev->channel) 242 return -ENODEV; 243 return sprintf(buf, "%d\n", 244 channel_latency(hv_dev->channel, 245 vmbus_connection.monitor_pages[0])); 246 } 247 static DEVICE_ATTR_RO(server_monitor_latency); 248 249 static ssize_t client_monitor_latency_show(struct device *dev, 250 struct device_attribute *dev_attr, 251 char *buf) 252 { 253 struct hv_device *hv_dev = device_to_hv_device(dev); 254 255 if (!hv_dev->channel) 256 return -ENODEV; 257 return sprintf(buf, "%d\n", 258 channel_latency(hv_dev->channel, 259 vmbus_connection.monitor_pages[1])); 260 } 261 static DEVICE_ATTR_RO(client_monitor_latency); 262 263 static ssize_t server_monitor_conn_id_show(struct device *dev, 264 struct device_attribute *dev_attr, 265 char *buf) 266 { 267 struct hv_device *hv_dev = device_to_hv_device(dev); 268 269 if (!hv_dev->channel) 270 return -ENODEV; 271 return sprintf(buf, "%d\n", 272 channel_conn_id(hv_dev->channel, 273 vmbus_connection.monitor_pages[0])); 274 } 275 static DEVICE_ATTR_RO(server_monitor_conn_id); 276 277 static ssize_t client_monitor_conn_id_show(struct device *dev, 278 struct device_attribute *dev_attr, 279 char *buf) 280 { 281 struct hv_device *hv_dev = device_to_hv_device(dev); 282 283 if (!hv_dev->channel) 284 return -ENODEV; 285 return sprintf(buf, "%d\n", 286 channel_conn_id(hv_dev->channel, 287 vmbus_connection.monitor_pages[1])); 288 } 289 static DEVICE_ATTR_RO(client_monitor_conn_id); 290 291 static ssize_t out_intr_mask_show(struct device *dev, 292 struct device_attribute *dev_attr, char *buf) 293 { 294 struct hv_device *hv_dev = device_to_hv_device(dev); 295 struct hv_ring_buffer_debug_info outbound; 296 int ret; 297 298 if (!hv_dev->channel) 299 return -ENODEV; 300 301 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 302 &outbound); 303 if (ret < 0) 304 return ret; 305 306 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 307 } 308 static DEVICE_ATTR_RO(out_intr_mask); 309 310 static ssize_t out_read_index_show(struct device *dev, 311 struct device_attribute *dev_attr, char *buf) 312 { 313 struct hv_device *hv_dev = device_to_hv_device(dev); 314 struct hv_ring_buffer_debug_info outbound; 315 int ret; 316 317 if (!hv_dev->channel) 318 return -ENODEV; 319 320 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 321 &outbound); 322 if (ret < 0) 323 return ret; 324 return sprintf(buf, "%d\n", outbound.current_read_index); 325 } 326 static DEVICE_ATTR_RO(out_read_index); 327 328 static ssize_t out_write_index_show(struct device *dev, 329 struct device_attribute *dev_attr, 330 char *buf) 331 { 332 struct hv_device *hv_dev = device_to_hv_device(dev); 333 struct hv_ring_buffer_debug_info outbound; 334 int ret; 335 336 if (!hv_dev->channel) 337 return -ENODEV; 338 339 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 340 &outbound); 341 if (ret < 0) 342 return ret; 343 return sprintf(buf, "%d\n", outbound.current_write_index); 344 } 345 static DEVICE_ATTR_RO(out_write_index); 346 347 static ssize_t out_read_bytes_avail_show(struct device *dev, 348 struct device_attribute *dev_attr, 349 char *buf) 350 { 351 struct hv_device *hv_dev = device_to_hv_device(dev); 352 struct hv_ring_buffer_debug_info outbound; 353 int ret; 354 355 if (!hv_dev->channel) 356 return -ENODEV; 357 358 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 359 &outbound); 360 if (ret < 0) 361 return ret; 362 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 363 } 364 static DEVICE_ATTR_RO(out_read_bytes_avail); 365 366 static ssize_t out_write_bytes_avail_show(struct device *dev, 367 struct device_attribute *dev_attr, 368 char *buf) 369 { 370 struct hv_device *hv_dev = device_to_hv_device(dev); 371 struct hv_ring_buffer_debug_info outbound; 372 int ret; 373 374 if (!hv_dev->channel) 375 return -ENODEV; 376 377 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 378 &outbound); 379 if (ret < 0) 380 return ret; 381 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 382 } 383 static DEVICE_ATTR_RO(out_write_bytes_avail); 384 385 static ssize_t in_intr_mask_show(struct device *dev, 386 struct device_attribute *dev_attr, char *buf) 387 { 388 struct hv_device *hv_dev = device_to_hv_device(dev); 389 struct hv_ring_buffer_debug_info inbound; 390 int ret; 391 392 if (!hv_dev->channel) 393 return -ENODEV; 394 395 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 396 if (ret < 0) 397 return ret; 398 399 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 400 } 401 static DEVICE_ATTR_RO(in_intr_mask); 402 403 static ssize_t in_read_index_show(struct device *dev, 404 struct device_attribute *dev_attr, char *buf) 405 { 406 struct hv_device *hv_dev = device_to_hv_device(dev); 407 struct hv_ring_buffer_debug_info inbound; 408 int ret; 409 410 if (!hv_dev->channel) 411 return -ENODEV; 412 413 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 414 if (ret < 0) 415 return ret; 416 417 return sprintf(buf, "%d\n", inbound.current_read_index); 418 } 419 static DEVICE_ATTR_RO(in_read_index); 420 421 static ssize_t in_write_index_show(struct device *dev, 422 struct device_attribute *dev_attr, char *buf) 423 { 424 struct hv_device *hv_dev = device_to_hv_device(dev); 425 struct hv_ring_buffer_debug_info inbound; 426 int ret; 427 428 if (!hv_dev->channel) 429 return -ENODEV; 430 431 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 432 if (ret < 0) 433 return ret; 434 435 return sprintf(buf, "%d\n", inbound.current_write_index); 436 } 437 static DEVICE_ATTR_RO(in_write_index); 438 439 static ssize_t in_read_bytes_avail_show(struct device *dev, 440 struct device_attribute *dev_attr, 441 char *buf) 442 { 443 struct hv_device *hv_dev = device_to_hv_device(dev); 444 struct hv_ring_buffer_debug_info inbound; 445 int ret; 446 447 if (!hv_dev->channel) 448 return -ENODEV; 449 450 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 451 if (ret < 0) 452 return ret; 453 454 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 455 } 456 static DEVICE_ATTR_RO(in_read_bytes_avail); 457 458 static ssize_t in_write_bytes_avail_show(struct device *dev, 459 struct device_attribute *dev_attr, 460 char *buf) 461 { 462 struct hv_device *hv_dev = device_to_hv_device(dev); 463 struct hv_ring_buffer_debug_info inbound; 464 int ret; 465 466 if (!hv_dev->channel) 467 return -ENODEV; 468 469 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 470 if (ret < 0) 471 return ret; 472 473 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 474 } 475 static DEVICE_ATTR_RO(in_write_bytes_avail); 476 477 static ssize_t channel_vp_mapping_show(struct device *dev, 478 struct device_attribute *dev_attr, 479 char *buf) 480 { 481 struct hv_device *hv_dev = device_to_hv_device(dev); 482 struct vmbus_channel *channel = hv_dev->channel, *cur_sc; 483 int buf_size = PAGE_SIZE, n_written, tot_written; 484 struct list_head *cur; 485 486 if (!channel) 487 return -ENODEV; 488 489 mutex_lock(&vmbus_connection.channel_mutex); 490 491 tot_written = snprintf(buf, buf_size, "%u:%u\n", 492 channel->offermsg.child_relid, channel->target_cpu); 493 494 list_for_each(cur, &channel->sc_list) { 495 if (tot_written >= buf_size - 1) 496 break; 497 498 cur_sc = list_entry(cur, struct vmbus_channel, sc_list); 499 n_written = scnprintf(buf + tot_written, 500 buf_size - tot_written, 501 "%u:%u\n", 502 cur_sc->offermsg.child_relid, 503 cur_sc->target_cpu); 504 tot_written += n_written; 505 } 506 507 mutex_unlock(&vmbus_connection.channel_mutex); 508 509 return tot_written; 510 } 511 static DEVICE_ATTR_RO(channel_vp_mapping); 512 513 static ssize_t vendor_show(struct device *dev, 514 struct device_attribute *dev_attr, 515 char *buf) 516 { 517 struct hv_device *hv_dev = device_to_hv_device(dev); 518 519 return sprintf(buf, "0x%x\n", hv_dev->vendor_id); 520 } 521 static DEVICE_ATTR_RO(vendor); 522 523 static ssize_t device_show(struct device *dev, 524 struct device_attribute *dev_attr, 525 char *buf) 526 { 527 struct hv_device *hv_dev = device_to_hv_device(dev); 528 529 return sprintf(buf, "0x%x\n", hv_dev->device_id); 530 } 531 static DEVICE_ATTR_RO(device); 532 533 static ssize_t driver_override_store(struct device *dev, 534 struct device_attribute *attr, 535 const char *buf, size_t count) 536 { 537 struct hv_device *hv_dev = device_to_hv_device(dev); 538 int ret; 539 540 ret = driver_set_override(dev, &hv_dev->driver_override, buf, count); 541 if (ret) 542 return ret; 543 544 return count; 545 } 546 547 static ssize_t driver_override_show(struct device *dev, 548 struct device_attribute *attr, char *buf) 549 { 550 struct hv_device *hv_dev = device_to_hv_device(dev); 551 ssize_t len; 552 553 device_lock(dev); 554 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override); 555 device_unlock(dev); 556 557 return len; 558 } 559 static DEVICE_ATTR_RW(driver_override); 560 561 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ 562 static struct attribute *vmbus_dev_attrs[] = { 563 &dev_attr_id.attr, 564 &dev_attr_state.attr, 565 &dev_attr_monitor_id.attr, 566 &dev_attr_class_id.attr, 567 &dev_attr_device_id.attr, 568 &dev_attr_modalias.attr, 569 #ifdef CONFIG_NUMA 570 &dev_attr_numa_node.attr, 571 #endif 572 &dev_attr_server_monitor_pending.attr, 573 &dev_attr_client_monitor_pending.attr, 574 &dev_attr_server_monitor_latency.attr, 575 &dev_attr_client_monitor_latency.attr, 576 &dev_attr_server_monitor_conn_id.attr, 577 &dev_attr_client_monitor_conn_id.attr, 578 &dev_attr_out_intr_mask.attr, 579 &dev_attr_out_read_index.attr, 580 &dev_attr_out_write_index.attr, 581 &dev_attr_out_read_bytes_avail.attr, 582 &dev_attr_out_write_bytes_avail.attr, 583 &dev_attr_in_intr_mask.attr, 584 &dev_attr_in_read_index.attr, 585 &dev_attr_in_write_index.attr, 586 &dev_attr_in_read_bytes_avail.attr, 587 &dev_attr_in_write_bytes_avail.attr, 588 &dev_attr_channel_vp_mapping.attr, 589 &dev_attr_vendor.attr, 590 &dev_attr_device.attr, 591 &dev_attr_driver_override.attr, 592 NULL, 593 }; 594 595 /* 596 * Device-level attribute_group callback function. Returns the permission for 597 * each attribute, and returns 0 if an attribute is not visible. 598 */ 599 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj, 600 struct attribute *attr, int idx) 601 { 602 struct device *dev = kobj_to_dev(kobj); 603 const struct hv_device *hv_dev = device_to_hv_device(dev); 604 605 /* Hide the monitor attributes if the monitor mechanism is not used. */ 606 if (!hv_dev->channel->offermsg.monitor_allocated && 607 (attr == &dev_attr_monitor_id.attr || 608 attr == &dev_attr_server_monitor_pending.attr || 609 attr == &dev_attr_client_monitor_pending.attr || 610 attr == &dev_attr_server_monitor_latency.attr || 611 attr == &dev_attr_client_monitor_latency.attr || 612 attr == &dev_attr_server_monitor_conn_id.attr || 613 attr == &dev_attr_client_monitor_conn_id.attr)) 614 return 0; 615 616 return attr->mode; 617 } 618 619 static const struct attribute_group vmbus_dev_group = { 620 .attrs = vmbus_dev_attrs, 621 .is_visible = vmbus_dev_attr_is_visible 622 }; 623 __ATTRIBUTE_GROUPS(vmbus_dev); 624 625 /* Set up the attribute for /sys/bus/vmbus/hibernation */ 626 static ssize_t hibernation_show(const struct bus_type *bus, char *buf) 627 { 628 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported()); 629 } 630 631 static BUS_ATTR_RO(hibernation); 632 633 static struct attribute *vmbus_bus_attrs[] = { 634 &bus_attr_hibernation.attr, 635 NULL, 636 }; 637 static const struct attribute_group vmbus_bus_group = { 638 .attrs = vmbus_bus_attrs, 639 }; 640 __ATTRIBUTE_GROUPS(vmbus_bus); 641 642 /* 643 * vmbus_uevent - add uevent for our device 644 * 645 * This routine is invoked when a device is added or removed on the vmbus to 646 * generate a uevent to udev in the userspace. The udev will then look at its 647 * rule and the uevent generated here to load the appropriate driver 648 * 649 * The alias string will be of the form vmbus:guid where guid is the string 650 * representation of the device guid (each byte of the guid will be 651 * represented with two hex characters. 652 */ 653 static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env) 654 { 655 const struct hv_device *dev = device_to_hv_device(device); 656 const char *format = "MODALIAS=vmbus:%*phN"; 657 658 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type); 659 } 660 661 static const struct hv_vmbus_device_id * 662 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid) 663 { 664 if (id == NULL) 665 return NULL; /* empty device table */ 666 667 for (; !guid_is_null(&id->guid); id++) 668 if (guid_equal(&id->guid, guid)) 669 return id; 670 671 return NULL; 672 } 673 674 static const struct hv_vmbus_device_id * 675 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid) 676 { 677 const struct hv_vmbus_device_id *id = NULL; 678 struct vmbus_dynid *dynid; 679 680 spin_lock(&drv->dynids.lock); 681 list_for_each_entry(dynid, &drv->dynids.list, node) { 682 if (guid_equal(&dynid->id.guid, guid)) { 683 id = &dynid->id; 684 break; 685 } 686 } 687 spin_unlock(&drv->dynids.lock); 688 689 return id; 690 } 691 692 static const struct hv_vmbus_device_id vmbus_device_null; 693 694 /* 695 * Return a matching hv_vmbus_device_id pointer. 696 * If there is no match, return NULL. 697 */ 698 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, 699 struct hv_device *dev) 700 { 701 const guid_t *guid = &dev->dev_type; 702 const struct hv_vmbus_device_id *id; 703 704 /* When driver_override is set, only bind to the matching driver */ 705 if (dev->driver_override && strcmp(dev->driver_override, drv->name)) 706 return NULL; 707 708 /* Look at the dynamic ids first, before the static ones */ 709 id = hv_vmbus_dynid_match(drv, guid); 710 if (!id) 711 id = hv_vmbus_dev_match(drv->id_table, guid); 712 713 /* driver_override will always match, send a dummy id */ 714 if (!id && dev->driver_override) 715 id = &vmbus_device_null; 716 717 return id; 718 } 719 720 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ 721 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid) 722 { 723 struct vmbus_dynid *dynid; 724 725 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 726 if (!dynid) 727 return -ENOMEM; 728 729 dynid->id.guid = *guid; 730 731 spin_lock(&drv->dynids.lock); 732 list_add_tail(&dynid->node, &drv->dynids.list); 733 spin_unlock(&drv->dynids.lock); 734 735 return driver_attach(&drv->driver); 736 } 737 738 static void vmbus_free_dynids(struct hv_driver *drv) 739 { 740 struct vmbus_dynid *dynid, *n; 741 742 spin_lock(&drv->dynids.lock); 743 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 744 list_del(&dynid->node); 745 kfree(dynid); 746 } 747 spin_unlock(&drv->dynids.lock); 748 } 749 750 /* 751 * store_new_id - sysfs frontend to vmbus_add_dynid() 752 * 753 * Allow GUIDs to be added to an existing driver via sysfs. 754 */ 755 static ssize_t new_id_store(struct device_driver *driver, const char *buf, 756 size_t count) 757 { 758 struct hv_driver *drv = drv_to_hv_drv(driver); 759 guid_t guid; 760 ssize_t retval; 761 762 retval = guid_parse(buf, &guid); 763 if (retval) 764 return retval; 765 766 if (hv_vmbus_dynid_match(drv, &guid)) 767 return -EEXIST; 768 769 retval = vmbus_add_dynid(drv, &guid); 770 if (retval) 771 return retval; 772 return count; 773 } 774 static DRIVER_ATTR_WO(new_id); 775 776 /* 777 * store_remove_id - remove a PCI device ID from this driver 778 * 779 * Removes a dynamic pci device ID to this driver. 780 */ 781 static ssize_t remove_id_store(struct device_driver *driver, const char *buf, 782 size_t count) 783 { 784 struct hv_driver *drv = drv_to_hv_drv(driver); 785 struct vmbus_dynid *dynid, *n; 786 guid_t guid; 787 ssize_t retval; 788 789 retval = guid_parse(buf, &guid); 790 if (retval) 791 return retval; 792 793 retval = -ENODEV; 794 spin_lock(&drv->dynids.lock); 795 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 796 struct hv_vmbus_device_id *id = &dynid->id; 797 798 if (guid_equal(&id->guid, &guid)) { 799 list_del(&dynid->node); 800 kfree(dynid); 801 retval = count; 802 break; 803 } 804 } 805 spin_unlock(&drv->dynids.lock); 806 807 return retval; 808 } 809 static DRIVER_ATTR_WO(remove_id); 810 811 static struct attribute *vmbus_drv_attrs[] = { 812 &driver_attr_new_id.attr, 813 &driver_attr_remove_id.attr, 814 NULL, 815 }; 816 ATTRIBUTE_GROUPS(vmbus_drv); 817 818 819 /* 820 * vmbus_match - Attempt to match the specified device to the specified driver 821 */ 822 static int vmbus_match(struct device *device, struct device_driver *driver) 823 { 824 struct hv_driver *drv = drv_to_hv_drv(driver); 825 struct hv_device *hv_dev = device_to_hv_device(device); 826 827 /* The hv_sock driver handles all hv_sock offers. */ 828 if (is_hvsock_channel(hv_dev->channel)) 829 return drv->hvsock; 830 831 if (hv_vmbus_get_id(drv, hv_dev)) 832 return 1; 833 834 return 0; 835 } 836 837 /* 838 * vmbus_probe - Add the new vmbus's child device 839 */ 840 static int vmbus_probe(struct device *child_device) 841 { 842 int ret = 0; 843 struct hv_driver *drv = 844 drv_to_hv_drv(child_device->driver); 845 struct hv_device *dev = device_to_hv_device(child_device); 846 const struct hv_vmbus_device_id *dev_id; 847 848 dev_id = hv_vmbus_get_id(drv, dev); 849 if (drv->probe) { 850 ret = drv->probe(dev, dev_id); 851 if (ret != 0) 852 pr_err("probe failed for device %s (%d)\n", 853 dev_name(child_device), ret); 854 855 } else { 856 pr_err("probe not set for driver %s\n", 857 dev_name(child_device)); 858 ret = -ENODEV; 859 } 860 return ret; 861 } 862 863 /* 864 * vmbus_dma_configure -- Configure DMA coherence for VMbus device 865 */ 866 static int vmbus_dma_configure(struct device *child_device) 867 { 868 /* 869 * On ARM64, propagate the DMA coherence setting from the top level 870 * VMbus ACPI device to the child VMbus device being added here. 871 * On x86/x64 coherence is assumed and these calls have no effect. 872 */ 873 hv_setup_dma_ops(child_device, 874 device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT); 875 return 0; 876 } 877 878 /* 879 * vmbus_remove - Remove a vmbus device 880 */ 881 static void vmbus_remove(struct device *child_device) 882 { 883 struct hv_driver *drv; 884 struct hv_device *dev = device_to_hv_device(child_device); 885 886 if (child_device->driver) { 887 drv = drv_to_hv_drv(child_device->driver); 888 if (drv->remove) 889 drv->remove(dev); 890 } 891 } 892 893 /* 894 * vmbus_shutdown - Shutdown a vmbus device 895 */ 896 static void vmbus_shutdown(struct device *child_device) 897 { 898 struct hv_driver *drv; 899 struct hv_device *dev = device_to_hv_device(child_device); 900 901 902 /* The device may not be attached yet */ 903 if (!child_device->driver) 904 return; 905 906 drv = drv_to_hv_drv(child_device->driver); 907 908 if (drv->shutdown) 909 drv->shutdown(dev); 910 } 911 912 #ifdef CONFIG_PM_SLEEP 913 /* 914 * vmbus_suspend - Suspend a vmbus device 915 */ 916 static int vmbus_suspend(struct device *child_device) 917 { 918 struct hv_driver *drv; 919 struct hv_device *dev = device_to_hv_device(child_device); 920 921 /* The device may not be attached yet */ 922 if (!child_device->driver) 923 return 0; 924 925 drv = drv_to_hv_drv(child_device->driver); 926 if (!drv->suspend) 927 return -EOPNOTSUPP; 928 929 return drv->suspend(dev); 930 } 931 932 /* 933 * vmbus_resume - Resume a vmbus device 934 */ 935 static int vmbus_resume(struct device *child_device) 936 { 937 struct hv_driver *drv; 938 struct hv_device *dev = device_to_hv_device(child_device); 939 940 /* The device may not be attached yet */ 941 if (!child_device->driver) 942 return 0; 943 944 drv = drv_to_hv_drv(child_device->driver); 945 if (!drv->resume) 946 return -EOPNOTSUPP; 947 948 return drv->resume(dev); 949 } 950 #else 951 #define vmbus_suspend NULL 952 #define vmbus_resume NULL 953 #endif /* CONFIG_PM_SLEEP */ 954 955 /* 956 * vmbus_device_release - Final callback release of the vmbus child device 957 */ 958 static void vmbus_device_release(struct device *device) 959 { 960 struct hv_device *hv_dev = device_to_hv_device(device); 961 struct vmbus_channel *channel = hv_dev->channel; 962 963 hv_debug_rm_dev_dir(hv_dev); 964 965 mutex_lock(&vmbus_connection.channel_mutex); 966 hv_process_channel_removal(channel); 967 mutex_unlock(&vmbus_connection.channel_mutex); 968 kfree(hv_dev); 969 } 970 971 /* 972 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm. 973 * 974 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we 975 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there 976 * is no way to wake up a Generation-2 VM. 977 * 978 * The other 4 ops are for hibernation. 979 */ 980 981 static const struct dev_pm_ops vmbus_pm = { 982 .suspend_noirq = NULL, 983 .resume_noirq = NULL, 984 .freeze_noirq = vmbus_suspend, 985 .thaw_noirq = vmbus_resume, 986 .poweroff_noirq = vmbus_suspend, 987 .restore_noirq = vmbus_resume, 988 }; 989 990 /* The one and only one */ 991 static struct bus_type hv_bus = { 992 .name = "vmbus", 993 .match = vmbus_match, 994 .shutdown = vmbus_shutdown, 995 .remove = vmbus_remove, 996 .probe = vmbus_probe, 997 .uevent = vmbus_uevent, 998 .dma_configure = vmbus_dma_configure, 999 .dev_groups = vmbus_dev_groups, 1000 .drv_groups = vmbus_drv_groups, 1001 .bus_groups = vmbus_bus_groups, 1002 .pm = &vmbus_pm, 1003 }; 1004 1005 struct onmessage_work_context { 1006 struct work_struct work; 1007 struct { 1008 struct hv_message_header header; 1009 u8 payload[]; 1010 } msg; 1011 }; 1012 1013 static void vmbus_onmessage_work(struct work_struct *work) 1014 { 1015 struct onmessage_work_context *ctx; 1016 1017 /* Do not process messages if we're in DISCONNECTED state */ 1018 if (vmbus_connection.conn_state == DISCONNECTED) 1019 return; 1020 1021 ctx = container_of(work, struct onmessage_work_context, 1022 work); 1023 vmbus_onmessage((struct vmbus_channel_message_header *) 1024 &ctx->msg.payload); 1025 kfree(ctx); 1026 } 1027 1028 void vmbus_on_msg_dpc(unsigned long data) 1029 { 1030 struct hv_per_cpu_context *hv_cpu = (void *)data; 1031 void *page_addr = hv_cpu->synic_message_page; 1032 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr + 1033 VMBUS_MESSAGE_SINT; 1034 struct vmbus_channel_message_header *hdr; 1035 enum vmbus_channel_message_type msgtype; 1036 const struct vmbus_channel_message_table_entry *entry; 1037 struct onmessage_work_context *ctx; 1038 __u8 payload_size; 1039 u32 message_type; 1040 1041 /* 1042 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as 1043 * it is being used in 'struct vmbus_channel_message_header' definition 1044 * which is supposed to match hypervisor ABI. 1045 */ 1046 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32)); 1047 1048 /* 1049 * Since the message is in memory shared with the host, an erroneous or 1050 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc() 1051 * or individual message handlers are executing; to prevent this, copy 1052 * the message into private memory. 1053 */ 1054 memcpy(&msg_copy, msg, sizeof(struct hv_message)); 1055 1056 message_type = msg_copy.header.message_type; 1057 if (message_type == HVMSG_NONE) 1058 /* no msg */ 1059 return; 1060 1061 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload; 1062 msgtype = hdr->msgtype; 1063 1064 trace_vmbus_on_msg_dpc(hdr); 1065 1066 if (msgtype >= CHANNELMSG_COUNT) { 1067 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype); 1068 goto msg_handled; 1069 } 1070 1071 payload_size = msg_copy.header.payload_size; 1072 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) { 1073 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size); 1074 goto msg_handled; 1075 } 1076 1077 entry = &channel_message_table[msgtype]; 1078 1079 if (!entry->message_handler) 1080 goto msg_handled; 1081 1082 if (payload_size < entry->min_payload_len) { 1083 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size); 1084 goto msg_handled; 1085 } 1086 1087 if (entry->handler_type == VMHT_BLOCKING) { 1088 ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC); 1089 if (ctx == NULL) 1090 return; 1091 1092 INIT_WORK(&ctx->work, vmbus_onmessage_work); 1093 ctx->msg.header = msg_copy.header; 1094 memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size); 1095 1096 /* 1097 * The host can generate a rescind message while we 1098 * may still be handling the original offer. We deal with 1099 * this condition by relying on the synchronization provided 1100 * by offer_in_progress and by channel_mutex. See also the 1101 * inline comments in vmbus_onoffer_rescind(). 1102 */ 1103 switch (msgtype) { 1104 case CHANNELMSG_RESCIND_CHANNELOFFER: 1105 /* 1106 * If we are handling the rescind message; 1107 * schedule the work on the global work queue. 1108 * 1109 * The OFFER message and the RESCIND message should 1110 * not be handled by the same serialized work queue, 1111 * because the OFFER handler may call vmbus_open(), 1112 * which tries to open the channel by sending an 1113 * OPEN_CHANNEL message to the host and waits for 1114 * the host's response; however, if the host has 1115 * rescinded the channel before it receives the 1116 * OPEN_CHANNEL message, the host just silently 1117 * ignores the OPEN_CHANNEL message; as a result, 1118 * the guest's OFFER handler hangs for ever, if we 1119 * handle the RESCIND message in the same serialized 1120 * work queue: the RESCIND handler can not start to 1121 * run before the OFFER handler finishes. 1122 */ 1123 if (vmbus_connection.ignore_any_offer_msg) 1124 break; 1125 queue_work(vmbus_connection.rescind_work_queue, &ctx->work); 1126 break; 1127 1128 case CHANNELMSG_OFFERCHANNEL: 1129 /* 1130 * The host sends the offer message of a given channel 1131 * before sending the rescind message of the same 1132 * channel. These messages are sent to the guest's 1133 * connect CPU; the guest then starts processing them 1134 * in the tasklet handler on this CPU: 1135 * 1136 * VMBUS_CONNECT_CPU 1137 * 1138 * [vmbus_on_msg_dpc()] 1139 * atomic_inc() // CHANNELMSG_OFFERCHANNEL 1140 * queue_work() 1141 * ... 1142 * [vmbus_on_msg_dpc()] 1143 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER 1144 * 1145 * We rely on the memory-ordering properties of the 1146 * queue_work() and schedule_work() primitives, which 1147 * guarantee that the atomic increment will be visible 1148 * to the CPUs which will execute the offer & rescind 1149 * works by the time these works will start execution. 1150 */ 1151 if (vmbus_connection.ignore_any_offer_msg) 1152 break; 1153 atomic_inc(&vmbus_connection.offer_in_progress); 1154 fallthrough; 1155 1156 default: 1157 queue_work(vmbus_connection.work_queue, &ctx->work); 1158 } 1159 } else 1160 entry->message_handler(hdr); 1161 1162 msg_handled: 1163 vmbus_signal_eom(msg, message_type); 1164 } 1165 1166 #ifdef CONFIG_PM_SLEEP 1167 /* 1168 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for 1169 * hibernation, because hv_sock connections can not persist across hibernation. 1170 */ 1171 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel) 1172 { 1173 struct onmessage_work_context *ctx; 1174 struct vmbus_channel_rescind_offer *rescind; 1175 1176 WARN_ON(!is_hvsock_channel(channel)); 1177 1178 /* 1179 * Allocation size is small and the allocation should really not fail, 1180 * otherwise the state of the hv_sock connections ends up in limbo. 1181 */ 1182 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind), 1183 GFP_KERNEL | __GFP_NOFAIL); 1184 1185 /* 1186 * So far, these are not really used by Linux. Just set them to the 1187 * reasonable values conforming to the definitions of the fields. 1188 */ 1189 ctx->msg.header.message_type = 1; 1190 ctx->msg.header.payload_size = sizeof(*rescind); 1191 1192 /* These values are actually used by Linux. */ 1193 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload; 1194 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER; 1195 rescind->child_relid = channel->offermsg.child_relid; 1196 1197 INIT_WORK(&ctx->work, vmbus_onmessage_work); 1198 1199 queue_work(vmbus_connection.work_queue, &ctx->work); 1200 } 1201 #endif /* CONFIG_PM_SLEEP */ 1202 1203 /* 1204 * Schedule all channels with events pending 1205 */ 1206 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) 1207 { 1208 unsigned long *recv_int_page; 1209 u32 maxbits, relid; 1210 1211 /* 1212 * The event page can be directly checked to get the id of 1213 * the channel that has the interrupt pending. 1214 */ 1215 void *page_addr = hv_cpu->synic_event_page; 1216 union hv_synic_event_flags *event 1217 = (union hv_synic_event_flags *)page_addr + 1218 VMBUS_MESSAGE_SINT; 1219 1220 maxbits = HV_EVENT_FLAGS_COUNT; 1221 recv_int_page = event->flags; 1222 1223 if (unlikely(!recv_int_page)) 1224 return; 1225 1226 for_each_set_bit(relid, recv_int_page, maxbits) { 1227 void (*callback_fn)(void *context); 1228 struct vmbus_channel *channel; 1229 1230 if (!sync_test_and_clear_bit(relid, recv_int_page)) 1231 continue; 1232 1233 /* Special case - vmbus channel protocol msg */ 1234 if (relid == 0) 1235 continue; 1236 1237 /* 1238 * Pairs with the kfree_rcu() in vmbus_chan_release(). 1239 * Guarantees that the channel data structure doesn't 1240 * get freed while the channel pointer below is being 1241 * dereferenced. 1242 */ 1243 rcu_read_lock(); 1244 1245 /* Find channel based on relid */ 1246 channel = relid2channel(relid); 1247 if (channel == NULL) 1248 goto sched_unlock_rcu; 1249 1250 if (channel->rescind) 1251 goto sched_unlock_rcu; 1252 1253 /* 1254 * Make sure that the ring buffer data structure doesn't get 1255 * freed while we dereference the ring buffer pointer. Test 1256 * for the channel's onchannel_callback being NULL within a 1257 * sched_lock critical section. See also the inline comments 1258 * in vmbus_reset_channel_cb(). 1259 */ 1260 spin_lock(&channel->sched_lock); 1261 1262 callback_fn = channel->onchannel_callback; 1263 if (unlikely(callback_fn == NULL)) 1264 goto sched_unlock; 1265 1266 trace_vmbus_chan_sched(channel); 1267 1268 ++channel->interrupts; 1269 1270 switch (channel->callback_mode) { 1271 case HV_CALL_ISR: 1272 (*callback_fn)(channel->channel_callback_context); 1273 break; 1274 1275 case HV_CALL_BATCHED: 1276 hv_begin_read(&channel->inbound); 1277 fallthrough; 1278 case HV_CALL_DIRECT: 1279 tasklet_schedule(&channel->callback_event); 1280 } 1281 1282 sched_unlock: 1283 spin_unlock(&channel->sched_lock); 1284 sched_unlock_rcu: 1285 rcu_read_unlock(); 1286 } 1287 } 1288 1289 static void vmbus_isr(void) 1290 { 1291 struct hv_per_cpu_context *hv_cpu 1292 = this_cpu_ptr(hv_context.cpu_context); 1293 void *page_addr; 1294 struct hv_message *msg; 1295 1296 vmbus_chan_sched(hv_cpu); 1297 1298 page_addr = hv_cpu->synic_message_page; 1299 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 1300 1301 /* Check if there are actual msgs to be processed */ 1302 if (msg->header.message_type != HVMSG_NONE) { 1303 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) { 1304 hv_stimer0_isr(); 1305 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED); 1306 } else 1307 tasklet_schedule(&hv_cpu->msg_dpc); 1308 } 1309 1310 add_interrupt_randomness(vmbus_interrupt); 1311 } 1312 1313 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id) 1314 { 1315 vmbus_isr(); 1316 return IRQ_HANDLED; 1317 } 1318 1319 /* 1320 * vmbus_bus_init -Main vmbus driver initialization routine. 1321 * 1322 * Here, we 1323 * - initialize the vmbus driver context 1324 * - invoke the vmbus hv main init routine 1325 * - retrieve the channel offers 1326 */ 1327 static int vmbus_bus_init(void) 1328 { 1329 int ret; 1330 1331 ret = hv_init(); 1332 if (ret != 0) { 1333 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret); 1334 return ret; 1335 } 1336 1337 ret = bus_register(&hv_bus); 1338 if (ret) 1339 return ret; 1340 1341 /* 1342 * VMbus interrupts are best modeled as per-cpu interrupts. If 1343 * on an architecture with support for per-cpu IRQs (e.g. ARM64), 1344 * allocate a per-cpu IRQ using standard Linux kernel functionality. 1345 * If not on such an architecture (e.g., x86/x64), then rely on 1346 * code in the arch-specific portion of the code tree to connect 1347 * the VMbus interrupt handler. 1348 */ 1349 1350 if (vmbus_irq == -1) { 1351 hv_setup_vmbus_handler(vmbus_isr); 1352 } else { 1353 vmbus_evt = alloc_percpu(long); 1354 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr, 1355 "Hyper-V VMbus", vmbus_evt); 1356 if (ret) { 1357 pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d", 1358 vmbus_irq, ret); 1359 free_percpu(vmbus_evt); 1360 goto err_setup; 1361 } 1362 } 1363 1364 ret = hv_synic_alloc(); 1365 if (ret) 1366 goto err_alloc; 1367 1368 /* 1369 * Initialize the per-cpu interrupt state and stimer state. 1370 * Then connect to the host. 1371 */ 1372 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online", 1373 hv_synic_init, hv_synic_cleanup); 1374 if (ret < 0) 1375 goto err_cpuhp; 1376 hyperv_cpuhp_online = ret; 1377 1378 ret = vmbus_connect(); 1379 if (ret) 1380 goto err_connect; 1381 1382 /* 1383 * Always register the vmbus unload panic notifier because we 1384 * need to shut the VMbus channel connection on panic. 1385 */ 1386 atomic_notifier_chain_register(&panic_notifier_list, 1387 &hyperv_panic_vmbus_unload_block); 1388 1389 vmbus_request_offers(); 1390 1391 return 0; 1392 1393 err_connect: 1394 cpuhp_remove_state(hyperv_cpuhp_online); 1395 err_cpuhp: 1396 hv_synic_free(); 1397 err_alloc: 1398 if (vmbus_irq == -1) { 1399 hv_remove_vmbus_handler(); 1400 } else { 1401 free_percpu_irq(vmbus_irq, vmbus_evt); 1402 free_percpu(vmbus_evt); 1403 } 1404 err_setup: 1405 bus_unregister(&hv_bus); 1406 return ret; 1407 } 1408 1409 /** 1410 * __vmbus_driver_register() - Register a vmbus's driver 1411 * @hv_driver: Pointer to driver structure you want to register 1412 * @owner: owner module of the drv 1413 * @mod_name: module name string 1414 * 1415 * Registers the given driver with Linux through the 'driver_register()' call 1416 * and sets up the hyper-v vmbus handling for this driver. 1417 * It will return the state of the 'driver_register()' call. 1418 * 1419 */ 1420 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name) 1421 { 1422 int ret; 1423 1424 pr_info("registering driver %s\n", hv_driver->name); 1425 1426 ret = vmbus_exists(); 1427 if (ret < 0) 1428 return ret; 1429 1430 hv_driver->driver.name = hv_driver->name; 1431 hv_driver->driver.owner = owner; 1432 hv_driver->driver.mod_name = mod_name; 1433 hv_driver->driver.bus = &hv_bus; 1434 1435 spin_lock_init(&hv_driver->dynids.lock); 1436 INIT_LIST_HEAD(&hv_driver->dynids.list); 1437 1438 ret = driver_register(&hv_driver->driver); 1439 1440 return ret; 1441 } 1442 EXPORT_SYMBOL_GPL(__vmbus_driver_register); 1443 1444 /** 1445 * vmbus_driver_unregister() - Unregister a vmbus's driver 1446 * @hv_driver: Pointer to driver structure you want to 1447 * un-register 1448 * 1449 * Un-register the given driver that was previous registered with a call to 1450 * vmbus_driver_register() 1451 */ 1452 void vmbus_driver_unregister(struct hv_driver *hv_driver) 1453 { 1454 pr_info("unregistering driver %s\n", hv_driver->name); 1455 1456 if (!vmbus_exists()) { 1457 driver_unregister(&hv_driver->driver); 1458 vmbus_free_dynids(hv_driver); 1459 } 1460 } 1461 EXPORT_SYMBOL_GPL(vmbus_driver_unregister); 1462 1463 1464 /* 1465 * Called when last reference to channel is gone. 1466 */ 1467 static void vmbus_chan_release(struct kobject *kobj) 1468 { 1469 struct vmbus_channel *channel 1470 = container_of(kobj, struct vmbus_channel, kobj); 1471 1472 kfree_rcu(channel, rcu); 1473 } 1474 1475 struct vmbus_chan_attribute { 1476 struct attribute attr; 1477 ssize_t (*show)(struct vmbus_channel *chan, char *buf); 1478 ssize_t (*store)(struct vmbus_channel *chan, 1479 const char *buf, size_t count); 1480 }; 1481 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \ 1482 struct vmbus_chan_attribute chan_attr_##_name \ 1483 = __ATTR(_name, _mode, _show, _store) 1484 #define VMBUS_CHAN_ATTR_RW(_name) \ 1485 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name) 1486 #define VMBUS_CHAN_ATTR_RO(_name) \ 1487 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name) 1488 #define VMBUS_CHAN_ATTR_WO(_name) \ 1489 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name) 1490 1491 static ssize_t vmbus_chan_attr_show(struct kobject *kobj, 1492 struct attribute *attr, char *buf) 1493 { 1494 const struct vmbus_chan_attribute *attribute 1495 = container_of(attr, struct vmbus_chan_attribute, attr); 1496 struct vmbus_channel *chan 1497 = container_of(kobj, struct vmbus_channel, kobj); 1498 1499 if (!attribute->show) 1500 return -EIO; 1501 1502 return attribute->show(chan, buf); 1503 } 1504 1505 static ssize_t vmbus_chan_attr_store(struct kobject *kobj, 1506 struct attribute *attr, const char *buf, 1507 size_t count) 1508 { 1509 const struct vmbus_chan_attribute *attribute 1510 = container_of(attr, struct vmbus_chan_attribute, attr); 1511 struct vmbus_channel *chan 1512 = container_of(kobj, struct vmbus_channel, kobj); 1513 1514 if (!attribute->store) 1515 return -EIO; 1516 1517 return attribute->store(chan, buf, count); 1518 } 1519 1520 static const struct sysfs_ops vmbus_chan_sysfs_ops = { 1521 .show = vmbus_chan_attr_show, 1522 .store = vmbus_chan_attr_store, 1523 }; 1524 1525 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf) 1526 { 1527 struct hv_ring_buffer_info *rbi = &channel->outbound; 1528 ssize_t ret; 1529 1530 mutex_lock(&rbi->ring_buffer_mutex); 1531 if (!rbi->ring_buffer) { 1532 mutex_unlock(&rbi->ring_buffer_mutex); 1533 return -EINVAL; 1534 } 1535 1536 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask); 1537 mutex_unlock(&rbi->ring_buffer_mutex); 1538 return ret; 1539 } 1540 static VMBUS_CHAN_ATTR_RO(out_mask); 1541 1542 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf) 1543 { 1544 struct hv_ring_buffer_info *rbi = &channel->inbound; 1545 ssize_t ret; 1546 1547 mutex_lock(&rbi->ring_buffer_mutex); 1548 if (!rbi->ring_buffer) { 1549 mutex_unlock(&rbi->ring_buffer_mutex); 1550 return -EINVAL; 1551 } 1552 1553 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask); 1554 mutex_unlock(&rbi->ring_buffer_mutex); 1555 return ret; 1556 } 1557 static VMBUS_CHAN_ATTR_RO(in_mask); 1558 1559 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf) 1560 { 1561 struct hv_ring_buffer_info *rbi = &channel->inbound; 1562 ssize_t ret; 1563 1564 mutex_lock(&rbi->ring_buffer_mutex); 1565 if (!rbi->ring_buffer) { 1566 mutex_unlock(&rbi->ring_buffer_mutex); 1567 return -EINVAL; 1568 } 1569 1570 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi)); 1571 mutex_unlock(&rbi->ring_buffer_mutex); 1572 return ret; 1573 } 1574 static VMBUS_CHAN_ATTR_RO(read_avail); 1575 1576 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf) 1577 { 1578 struct hv_ring_buffer_info *rbi = &channel->outbound; 1579 ssize_t ret; 1580 1581 mutex_lock(&rbi->ring_buffer_mutex); 1582 if (!rbi->ring_buffer) { 1583 mutex_unlock(&rbi->ring_buffer_mutex); 1584 return -EINVAL; 1585 } 1586 1587 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi)); 1588 mutex_unlock(&rbi->ring_buffer_mutex); 1589 return ret; 1590 } 1591 static VMBUS_CHAN_ATTR_RO(write_avail); 1592 1593 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf) 1594 { 1595 return sprintf(buf, "%u\n", channel->target_cpu); 1596 } 1597 static ssize_t target_cpu_store(struct vmbus_channel *channel, 1598 const char *buf, size_t count) 1599 { 1600 u32 target_cpu, origin_cpu; 1601 ssize_t ret = count; 1602 1603 if (vmbus_proto_version < VERSION_WIN10_V4_1) 1604 return -EIO; 1605 1606 if (sscanf(buf, "%uu", &target_cpu) != 1) 1607 return -EIO; 1608 1609 /* Validate target_cpu for the cpumask_test_cpu() operation below. */ 1610 if (target_cpu >= nr_cpumask_bits) 1611 return -EINVAL; 1612 1613 if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ))) 1614 return -EINVAL; 1615 1616 /* No CPUs should come up or down during this. */ 1617 cpus_read_lock(); 1618 1619 if (!cpu_online(target_cpu)) { 1620 cpus_read_unlock(); 1621 return -EINVAL; 1622 } 1623 1624 /* 1625 * Synchronizes target_cpu_store() and channel closure: 1626 * 1627 * { Initially: state = CHANNEL_OPENED } 1628 * 1629 * CPU1 CPU2 1630 * 1631 * [target_cpu_store()] [vmbus_disconnect_ring()] 1632 * 1633 * LOCK channel_mutex LOCK channel_mutex 1634 * LOAD r1 = state LOAD r2 = state 1635 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED) 1636 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN 1637 * [...] SEND CLOSECHANNEL 1638 * UNLOCK channel_mutex UNLOCK channel_mutex 1639 * 1640 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes 1641 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND 1642 * 1643 * Note. The host processes the channel messages "sequentially", in 1644 * the order in which they are received on a per-partition basis. 1645 */ 1646 mutex_lock(&vmbus_connection.channel_mutex); 1647 1648 /* 1649 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels; 1650 * avoid sending the message and fail here for such channels. 1651 */ 1652 if (channel->state != CHANNEL_OPENED_STATE) { 1653 ret = -EIO; 1654 goto cpu_store_unlock; 1655 } 1656 1657 origin_cpu = channel->target_cpu; 1658 if (target_cpu == origin_cpu) 1659 goto cpu_store_unlock; 1660 1661 if (vmbus_send_modifychannel(channel, 1662 hv_cpu_number_to_vp_number(target_cpu))) { 1663 ret = -EIO; 1664 goto cpu_store_unlock; 1665 } 1666 1667 /* 1668 * For version before VERSION_WIN10_V5_3, the following warning holds: 1669 * 1670 * Warning. At this point, there is *no* guarantee that the host will 1671 * have successfully processed the vmbus_send_modifychannel() request. 1672 * See the header comment of vmbus_send_modifychannel() for more info. 1673 * 1674 * Lags in the processing of the above vmbus_send_modifychannel() can 1675 * result in missed interrupts if the "old" target CPU is taken offline 1676 * before Hyper-V starts sending interrupts to the "new" target CPU. 1677 * But apart from this offlining scenario, the code tolerates such 1678 * lags. It will function correctly even if a channel interrupt comes 1679 * in on a CPU that is different from the channel target_cpu value. 1680 */ 1681 1682 channel->target_cpu = target_cpu; 1683 1684 /* See init_vp_index(). */ 1685 if (hv_is_perf_channel(channel)) 1686 hv_update_allocated_cpus(origin_cpu, target_cpu); 1687 1688 /* Currently set only for storvsc channels. */ 1689 if (channel->change_target_cpu_callback) { 1690 (*channel->change_target_cpu_callback)(channel, 1691 origin_cpu, target_cpu); 1692 } 1693 1694 cpu_store_unlock: 1695 mutex_unlock(&vmbus_connection.channel_mutex); 1696 cpus_read_unlock(); 1697 return ret; 1698 } 1699 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store); 1700 1701 static ssize_t channel_pending_show(struct vmbus_channel *channel, 1702 char *buf) 1703 { 1704 return sprintf(buf, "%d\n", 1705 channel_pending(channel, 1706 vmbus_connection.monitor_pages[1])); 1707 } 1708 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL); 1709 1710 static ssize_t channel_latency_show(struct vmbus_channel *channel, 1711 char *buf) 1712 { 1713 return sprintf(buf, "%d\n", 1714 channel_latency(channel, 1715 vmbus_connection.monitor_pages[1])); 1716 } 1717 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL); 1718 1719 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf) 1720 { 1721 return sprintf(buf, "%llu\n", channel->interrupts); 1722 } 1723 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL); 1724 1725 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf) 1726 { 1727 return sprintf(buf, "%llu\n", channel->sig_events); 1728 } 1729 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL); 1730 1731 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel, 1732 char *buf) 1733 { 1734 return sprintf(buf, "%llu\n", 1735 (unsigned long long)channel->intr_in_full); 1736 } 1737 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL); 1738 1739 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel, 1740 char *buf) 1741 { 1742 return sprintf(buf, "%llu\n", 1743 (unsigned long long)channel->intr_out_empty); 1744 } 1745 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL); 1746 1747 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel, 1748 char *buf) 1749 { 1750 return sprintf(buf, "%llu\n", 1751 (unsigned long long)channel->out_full_first); 1752 } 1753 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL); 1754 1755 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel, 1756 char *buf) 1757 { 1758 return sprintf(buf, "%llu\n", 1759 (unsigned long long)channel->out_full_total); 1760 } 1761 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL); 1762 1763 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel, 1764 char *buf) 1765 { 1766 return sprintf(buf, "%u\n", channel->offermsg.monitorid); 1767 } 1768 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL); 1769 1770 static ssize_t subchannel_id_show(struct vmbus_channel *channel, 1771 char *buf) 1772 { 1773 return sprintf(buf, "%u\n", 1774 channel->offermsg.offer.sub_channel_index); 1775 } 1776 static VMBUS_CHAN_ATTR_RO(subchannel_id); 1777 1778 static struct attribute *vmbus_chan_attrs[] = { 1779 &chan_attr_out_mask.attr, 1780 &chan_attr_in_mask.attr, 1781 &chan_attr_read_avail.attr, 1782 &chan_attr_write_avail.attr, 1783 &chan_attr_cpu.attr, 1784 &chan_attr_pending.attr, 1785 &chan_attr_latency.attr, 1786 &chan_attr_interrupts.attr, 1787 &chan_attr_events.attr, 1788 &chan_attr_intr_in_full.attr, 1789 &chan_attr_intr_out_empty.attr, 1790 &chan_attr_out_full_first.attr, 1791 &chan_attr_out_full_total.attr, 1792 &chan_attr_monitor_id.attr, 1793 &chan_attr_subchannel_id.attr, 1794 NULL 1795 }; 1796 1797 /* 1798 * Channel-level attribute_group callback function. Returns the permission for 1799 * each attribute, and returns 0 if an attribute is not visible. 1800 */ 1801 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj, 1802 struct attribute *attr, int idx) 1803 { 1804 const struct vmbus_channel *channel = 1805 container_of(kobj, struct vmbus_channel, kobj); 1806 1807 /* Hide the monitor attributes if the monitor mechanism is not used. */ 1808 if (!channel->offermsg.monitor_allocated && 1809 (attr == &chan_attr_pending.attr || 1810 attr == &chan_attr_latency.attr || 1811 attr == &chan_attr_monitor_id.attr)) 1812 return 0; 1813 1814 return attr->mode; 1815 } 1816 1817 static struct attribute_group vmbus_chan_group = { 1818 .attrs = vmbus_chan_attrs, 1819 .is_visible = vmbus_chan_attr_is_visible 1820 }; 1821 1822 static struct kobj_type vmbus_chan_ktype = { 1823 .sysfs_ops = &vmbus_chan_sysfs_ops, 1824 .release = vmbus_chan_release, 1825 }; 1826 1827 /* 1828 * vmbus_add_channel_kobj - setup a sub-directory under device/channels 1829 */ 1830 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) 1831 { 1832 const struct device *device = &dev->device; 1833 struct kobject *kobj = &channel->kobj; 1834 u32 relid = channel->offermsg.child_relid; 1835 int ret; 1836 1837 kobj->kset = dev->channels_kset; 1838 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, 1839 "%u", relid); 1840 if (ret) { 1841 kobject_put(kobj); 1842 return ret; 1843 } 1844 1845 ret = sysfs_create_group(kobj, &vmbus_chan_group); 1846 1847 if (ret) { 1848 /* 1849 * The calling functions' error handling paths will cleanup the 1850 * empty channel directory. 1851 */ 1852 kobject_put(kobj); 1853 dev_err(device, "Unable to set up channel sysfs files\n"); 1854 return ret; 1855 } 1856 1857 kobject_uevent(kobj, KOBJ_ADD); 1858 1859 return 0; 1860 } 1861 1862 /* 1863 * vmbus_remove_channel_attr_group - remove the channel's attribute group 1864 */ 1865 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel) 1866 { 1867 sysfs_remove_group(&channel->kobj, &vmbus_chan_group); 1868 } 1869 1870 /* 1871 * vmbus_device_create - Creates and registers a new child device 1872 * on the vmbus. 1873 */ 1874 struct hv_device *vmbus_device_create(const guid_t *type, 1875 const guid_t *instance, 1876 struct vmbus_channel *channel) 1877 { 1878 struct hv_device *child_device_obj; 1879 1880 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL); 1881 if (!child_device_obj) { 1882 pr_err("Unable to allocate device object for child device\n"); 1883 return NULL; 1884 } 1885 1886 child_device_obj->channel = channel; 1887 guid_copy(&child_device_obj->dev_type, type); 1888 guid_copy(&child_device_obj->dev_instance, instance); 1889 child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT; 1890 1891 return child_device_obj; 1892 } 1893 1894 /* 1895 * vmbus_device_register - Register the child device 1896 */ 1897 int vmbus_device_register(struct hv_device *child_device_obj) 1898 { 1899 struct kobject *kobj = &child_device_obj->device.kobj; 1900 int ret; 1901 1902 dev_set_name(&child_device_obj->device, "%pUl", 1903 &child_device_obj->channel->offermsg.offer.if_instance); 1904 1905 child_device_obj->device.bus = &hv_bus; 1906 child_device_obj->device.parent = hv_dev; 1907 child_device_obj->device.release = vmbus_device_release; 1908 1909 child_device_obj->device.dma_parms = &child_device_obj->dma_parms; 1910 child_device_obj->device.dma_mask = &child_device_obj->dma_mask; 1911 dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64)); 1912 1913 /* 1914 * Register with the LDM. This will kick off the driver/device 1915 * binding...which will eventually call vmbus_match() and vmbus_probe() 1916 */ 1917 ret = device_register(&child_device_obj->device); 1918 if (ret) { 1919 pr_err("Unable to register child device\n"); 1920 put_device(&child_device_obj->device); 1921 return ret; 1922 } 1923 1924 child_device_obj->channels_kset = kset_create_and_add("channels", 1925 NULL, kobj); 1926 if (!child_device_obj->channels_kset) { 1927 ret = -ENOMEM; 1928 goto err_dev_unregister; 1929 } 1930 1931 ret = vmbus_add_channel_kobj(child_device_obj, 1932 child_device_obj->channel); 1933 if (ret) { 1934 pr_err("Unable to register primary channeln"); 1935 goto err_kset_unregister; 1936 } 1937 hv_debug_add_dev_dir(child_device_obj); 1938 1939 return 0; 1940 1941 err_kset_unregister: 1942 kset_unregister(child_device_obj->channels_kset); 1943 1944 err_dev_unregister: 1945 device_unregister(&child_device_obj->device); 1946 return ret; 1947 } 1948 1949 /* 1950 * vmbus_device_unregister - Remove the specified child device 1951 * from the vmbus. 1952 */ 1953 void vmbus_device_unregister(struct hv_device *device_obj) 1954 { 1955 pr_debug("child device %s unregistered\n", 1956 dev_name(&device_obj->device)); 1957 1958 kset_unregister(device_obj->channels_kset); 1959 1960 /* 1961 * Kick off the process of unregistering the device. 1962 * This will call vmbus_remove() and eventually vmbus_device_release() 1963 */ 1964 device_unregister(&device_obj->device); 1965 } 1966 1967 #ifdef CONFIG_ACPI 1968 /* 1969 * VMBUS is an acpi enumerated device. Get the information we 1970 * need from DSDT. 1971 */ 1972 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) 1973 { 1974 resource_size_t start = 0; 1975 resource_size_t end = 0; 1976 struct resource *new_res; 1977 struct resource **old_res = &hyperv_mmio; 1978 struct resource **prev_res = NULL; 1979 struct resource r; 1980 1981 switch (res->type) { 1982 1983 /* 1984 * "Address" descriptors are for bus windows. Ignore 1985 * "memory" descriptors, which are for registers on 1986 * devices. 1987 */ 1988 case ACPI_RESOURCE_TYPE_ADDRESS32: 1989 start = res->data.address32.address.minimum; 1990 end = res->data.address32.address.maximum; 1991 break; 1992 1993 case ACPI_RESOURCE_TYPE_ADDRESS64: 1994 start = res->data.address64.address.minimum; 1995 end = res->data.address64.address.maximum; 1996 break; 1997 1998 /* 1999 * The IRQ information is needed only on ARM64, which Hyper-V 2000 * sets up in the extended format. IRQ information is present 2001 * on x86/x64 in the non-extended format but it is not used by 2002 * Linux. So don't bother checking for the non-extended format. 2003 */ 2004 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 2005 if (!acpi_dev_resource_interrupt(res, 0, &r)) { 2006 pr_err("Unable to parse Hyper-V ACPI interrupt\n"); 2007 return AE_ERROR; 2008 } 2009 /* ARM64 INTID for VMbus */ 2010 vmbus_interrupt = res->data.extended_irq.interrupts[0]; 2011 /* Linux IRQ number */ 2012 vmbus_irq = r.start; 2013 return AE_OK; 2014 2015 default: 2016 /* Unused resource type */ 2017 return AE_OK; 2018 2019 } 2020 /* 2021 * Ignore ranges that are below 1MB, as they're not 2022 * necessary or useful here. 2023 */ 2024 if (end < 0x100000) 2025 return AE_OK; 2026 2027 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC); 2028 if (!new_res) 2029 return AE_NO_MEMORY; 2030 2031 /* If this range overlaps the virtual TPM, truncate it. */ 2032 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS) 2033 end = VTPM_BASE_ADDRESS; 2034 2035 new_res->name = "hyperv mmio"; 2036 new_res->flags = IORESOURCE_MEM; 2037 new_res->start = start; 2038 new_res->end = end; 2039 2040 /* 2041 * If two ranges are adjacent, merge them. 2042 */ 2043 do { 2044 if (!*old_res) { 2045 *old_res = new_res; 2046 break; 2047 } 2048 2049 if (((*old_res)->end + 1) == new_res->start) { 2050 (*old_res)->end = new_res->end; 2051 kfree(new_res); 2052 break; 2053 } 2054 2055 if ((*old_res)->start == new_res->end + 1) { 2056 (*old_res)->start = new_res->start; 2057 kfree(new_res); 2058 break; 2059 } 2060 2061 if ((*old_res)->start > new_res->end) { 2062 new_res->sibling = *old_res; 2063 if (prev_res) 2064 (*prev_res)->sibling = new_res; 2065 *old_res = new_res; 2066 break; 2067 } 2068 2069 prev_res = old_res; 2070 old_res = &(*old_res)->sibling; 2071 2072 } while (1); 2073 2074 return AE_OK; 2075 } 2076 #endif 2077 2078 static void vmbus_mmio_remove(void) 2079 { 2080 struct resource *cur_res; 2081 struct resource *next_res; 2082 2083 if (hyperv_mmio) { 2084 if (fb_mmio) { 2085 __release_region(hyperv_mmio, fb_mmio->start, 2086 resource_size(fb_mmio)); 2087 fb_mmio = NULL; 2088 } 2089 2090 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { 2091 next_res = cur_res->sibling; 2092 kfree(cur_res); 2093 } 2094 } 2095 } 2096 2097 static void __maybe_unused vmbus_reserve_fb(void) 2098 { 2099 resource_size_t start = 0, size; 2100 struct pci_dev *pdev; 2101 2102 if (efi_enabled(EFI_BOOT)) { 2103 /* Gen2 VM: get FB base from EFI framebuffer */ 2104 start = screen_info.lfb_base; 2105 size = max_t(__u32, screen_info.lfb_size, 0x800000); 2106 } else { 2107 /* Gen1 VM: get FB base from PCI */ 2108 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, 2109 PCI_DEVICE_ID_HYPERV_VIDEO, NULL); 2110 if (!pdev) 2111 return; 2112 2113 if (pdev->resource[0].flags & IORESOURCE_MEM) { 2114 start = pci_resource_start(pdev, 0); 2115 size = pci_resource_len(pdev, 0); 2116 } 2117 2118 /* 2119 * Release the PCI device so hyperv_drm or hyperv_fb driver can 2120 * grab it later. 2121 */ 2122 pci_dev_put(pdev); 2123 } 2124 2125 if (!start) 2126 return; 2127 2128 /* 2129 * Make a claim for the frame buffer in the resource tree under the 2130 * first node, which will be the one below 4GB. The length seems to 2131 * be underreported, particularly in a Generation 1 VM. So start out 2132 * reserving a larger area and make it smaller until it succeeds. 2133 */ 2134 for (; !fb_mmio && (size >= 0x100000); size >>= 1) 2135 fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0); 2136 } 2137 2138 /** 2139 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. 2140 * @new: If successful, supplied a pointer to the 2141 * allocated MMIO space. 2142 * @device_obj: Identifies the caller 2143 * @min: Minimum guest physical address of the 2144 * allocation 2145 * @max: Maximum guest physical address 2146 * @size: Size of the range to be allocated 2147 * @align: Alignment of the range to be allocated 2148 * @fb_overlap_ok: Whether this allocation can be allowed 2149 * to overlap the video frame buffer. 2150 * 2151 * This function walks the resources granted to VMBus by the 2152 * _CRS object in the ACPI namespace underneath the parent 2153 * "bridge" whether that's a root PCI bus in the Generation 1 2154 * case or a Module Device in the Generation 2 case. It then 2155 * attempts to allocate from the global MMIO pool in a way that 2156 * matches the constraints supplied in these parameters and by 2157 * that _CRS. 2158 * 2159 * Return: 0 on success, -errno on failure 2160 */ 2161 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 2162 resource_size_t min, resource_size_t max, 2163 resource_size_t size, resource_size_t align, 2164 bool fb_overlap_ok) 2165 { 2166 struct resource *iter, *shadow; 2167 resource_size_t range_min, range_max, start, end; 2168 const char *dev_n = dev_name(&device_obj->device); 2169 int retval; 2170 2171 retval = -ENXIO; 2172 mutex_lock(&hyperv_mmio_lock); 2173 2174 /* 2175 * If overlaps with frame buffers are allowed, then first attempt to 2176 * make the allocation from within the reserved region. Because it 2177 * is already reserved, no shadow allocation is necessary. 2178 */ 2179 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) && 2180 !(max < fb_mmio->start)) { 2181 2182 range_min = fb_mmio->start; 2183 range_max = fb_mmio->end; 2184 start = (range_min + align - 1) & ~(align - 1); 2185 for (; start + size - 1 <= range_max; start += align) { 2186 *new = request_mem_region_exclusive(start, size, dev_n); 2187 if (*new) { 2188 retval = 0; 2189 goto exit; 2190 } 2191 } 2192 } 2193 2194 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 2195 if ((iter->start >= max) || (iter->end <= min)) 2196 continue; 2197 2198 range_min = iter->start; 2199 range_max = iter->end; 2200 start = (range_min + align - 1) & ~(align - 1); 2201 for (; start + size - 1 <= range_max; start += align) { 2202 end = start + size - 1; 2203 2204 /* Skip the whole fb_mmio region if not fb_overlap_ok */ 2205 if (!fb_overlap_ok && fb_mmio && 2206 (((start >= fb_mmio->start) && (start <= fb_mmio->end)) || 2207 ((end >= fb_mmio->start) && (end <= fb_mmio->end)))) 2208 continue; 2209 2210 shadow = __request_region(iter, start, size, NULL, 2211 IORESOURCE_BUSY); 2212 if (!shadow) 2213 continue; 2214 2215 *new = request_mem_region_exclusive(start, size, dev_n); 2216 if (*new) { 2217 shadow->name = (char *)*new; 2218 retval = 0; 2219 goto exit; 2220 } 2221 2222 __release_region(iter, start, size); 2223 } 2224 } 2225 2226 exit: 2227 mutex_unlock(&hyperv_mmio_lock); 2228 return retval; 2229 } 2230 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); 2231 2232 /** 2233 * vmbus_free_mmio() - Free a memory-mapped I/O range. 2234 * @start: Base address of region to release. 2235 * @size: Size of the range to be allocated 2236 * 2237 * This function releases anything requested by 2238 * vmbus_mmio_allocate(). 2239 */ 2240 void vmbus_free_mmio(resource_size_t start, resource_size_t size) 2241 { 2242 struct resource *iter; 2243 2244 mutex_lock(&hyperv_mmio_lock); 2245 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 2246 if ((iter->start >= start + size) || (iter->end <= start)) 2247 continue; 2248 2249 __release_region(iter, start, size); 2250 } 2251 release_mem_region(start, size); 2252 mutex_unlock(&hyperv_mmio_lock); 2253 2254 } 2255 EXPORT_SYMBOL_GPL(vmbus_free_mmio); 2256 2257 #ifdef CONFIG_ACPI 2258 static int vmbus_acpi_add(struct platform_device *pdev) 2259 { 2260 acpi_status result; 2261 int ret_val = -ENODEV; 2262 struct acpi_device *ancestor; 2263 struct acpi_device *device = ACPI_COMPANION(&pdev->dev); 2264 2265 hv_dev = &device->dev; 2266 2267 /* 2268 * Older versions of Hyper-V for ARM64 fail to include the _CCA 2269 * method on the top level VMbus device in the DSDT. But devices 2270 * are hardware coherent in all current Hyper-V use cases, so fix 2271 * up the ACPI device to behave as if _CCA is present and indicates 2272 * hardware coherence. 2273 */ 2274 ACPI_COMPANION_SET(&device->dev, device); 2275 if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) && 2276 device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) { 2277 pr_info("No ACPI _CCA found; assuming coherent device I/O\n"); 2278 device->flags.cca_seen = true; 2279 device->flags.coherent_dma = true; 2280 } 2281 2282 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 2283 vmbus_walk_resources, NULL); 2284 2285 if (ACPI_FAILURE(result)) 2286 goto acpi_walk_err; 2287 /* 2288 * Some ancestor of the vmbus acpi device (Gen1 or Gen2 2289 * firmware) is the VMOD that has the mmio ranges. Get that. 2290 */ 2291 for (ancestor = acpi_dev_parent(device); ancestor; 2292 ancestor = acpi_dev_parent(ancestor)) { 2293 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, 2294 vmbus_walk_resources, NULL); 2295 2296 if (ACPI_FAILURE(result)) 2297 continue; 2298 if (hyperv_mmio) { 2299 vmbus_reserve_fb(); 2300 break; 2301 } 2302 } 2303 ret_val = 0; 2304 2305 acpi_walk_err: 2306 if (ret_val) 2307 vmbus_mmio_remove(); 2308 return ret_val; 2309 } 2310 #else 2311 static int vmbus_acpi_add(struct platform_device *pdev) 2312 { 2313 return 0; 2314 } 2315 #endif 2316 2317 static int vmbus_device_add(struct platform_device *pdev) 2318 { 2319 struct resource **cur_res = &hyperv_mmio; 2320 struct of_range range; 2321 struct of_range_parser parser; 2322 struct device_node *np = pdev->dev.of_node; 2323 int ret; 2324 2325 hv_dev = &pdev->dev; 2326 2327 ret = of_range_parser_init(&parser, np); 2328 if (ret) 2329 return ret; 2330 2331 for_each_of_range(&parser, &range) { 2332 struct resource *res; 2333 2334 res = kzalloc(sizeof(*res), GFP_KERNEL); 2335 if (!res) { 2336 vmbus_mmio_remove(); 2337 return -ENOMEM; 2338 } 2339 2340 res->name = "hyperv mmio"; 2341 res->flags = range.flags; 2342 res->start = range.cpu_addr; 2343 res->end = range.cpu_addr + range.size; 2344 2345 *cur_res = res; 2346 cur_res = &res->sibling; 2347 } 2348 2349 return ret; 2350 } 2351 2352 static int vmbus_platform_driver_probe(struct platform_device *pdev) 2353 { 2354 if (acpi_disabled) 2355 return vmbus_device_add(pdev); 2356 else 2357 return vmbus_acpi_add(pdev); 2358 } 2359 2360 static int vmbus_platform_driver_remove(struct platform_device *pdev) 2361 { 2362 vmbus_mmio_remove(); 2363 return 0; 2364 } 2365 2366 #ifdef CONFIG_PM_SLEEP 2367 static int vmbus_bus_suspend(struct device *dev) 2368 { 2369 struct hv_per_cpu_context *hv_cpu = per_cpu_ptr( 2370 hv_context.cpu_context, VMBUS_CONNECT_CPU); 2371 struct vmbus_channel *channel, *sc; 2372 2373 tasklet_disable(&hv_cpu->msg_dpc); 2374 vmbus_connection.ignore_any_offer_msg = true; 2375 /* The tasklet_enable() takes care of providing a memory barrier */ 2376 tasklet_enable(&hv_cpu->msg_dpc); 2377 2378 /* Drain all the workqueues as we are in suspend */ 2379 drain_workqueue(vmbus_connection.rescind_work_queue); 2380 drain_workqueue(vmbus_connection.work_queue); 2381 drain_workqueue(vmbus_connection.handle_primary_chan_wq); 2382 drain_workqueue(vmbus_connection.handle_sub_chan_wq); 2383 2384 mutex_lock(&vmbus_connection.channel_mutex); 2385 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 2386 if (!is_hvsock_channel(channel)) 2387 continue; 2388 2389 vmbus_force_channel_rescinded(channel); 2390 } 2391 mutex_unlock(&vmbus_connection.channel_mutex); 2392 2393 /* 2394 * Wait until all the sub-channels and hv_sock channels have been 2395 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise 2396 * they would conflict with the new sub-channels that will be created 2397 * in the resume path. hv_sock channels should also be destroyed, but 2398 * a hv_sock channel of an established hv_sock connection can not be 2399 * really destroyed since it may still be referenced by the userspace 2400 * application, so we just force the hv_sock channel to be rescinded 2401 * by vmbus_force_channel_rescinded(), and the userspace application 2402 * will thoroughly destroy the channel after hibernation. 2403 * 2404 * Note: the counter nr_chan_close_on_suspend may never go above 0 if 2405 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM. 2406 */ 2407 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0) 2408 wait_for_completion(&vmbus_connection.ready_for_suspend_event); 2409 2410 if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) { 2411 pr_err("Can not suspend due to a previous failed resuming\n"); 2412 return -EBUSY; 2413 } 2414 2415 mutex_lock(&vmbus_connection.channel_mutex); 2416 2417 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 2418 /* 2419 * Remove the channel from the array of channels and invalidate 2420 * the channel's relid. Upon resume, vmbus_onoffer() will fix 2421 * up the relid (and other fields, if necessary) and add the 2422 * channel back to the array. 2423 */ 2424 vmbus_channel_unmap_relid(channel); 2425 channel->offermsg.child_relid = INVALID_RELID; 2426 2427 if (is_hvsock_channel(channel)) { 2428 if (!channel->rescind) { 2429 pr_err("hv_sock channel not rescinded!\n"); 2430 WARN_ON_ONCE(1); 2431 } 2432 continue; 2433 } 2434 2435 list_for_each_entry(sc, &channel->sc_list, sc_list) { 2436 pr_err("Sub-channel not deleted!\n"); 2437 WARN_ON_ONCE(1); 2438 } 2439 2440 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume); 2441 } 2442 2443 mutex_unlock(&vmbus_connection.channel_mutex); 2444 2445 vmbus_initiate_unload(false); 2446 2447 /* Reset the event for the next resume. */ 2448 reinit_completion(&vmbus_connection.ready_for_resume_event); 2449 2450 return 0; 2451 } 2452 2453 static int vmbus_bus_resume(struct device *dev) 2454 { 2455 struct vmbus_channel_msginfo *msginfo; 2456 size_t msgsize; 2457 int ret; 2458 2459 vmbus_connection.ignore_any_offer_msg = false; 2460 2461 /* 2462 * We only use the 'vmbus_proto_version', which was in use before 2463 * hibernation, to re-negotiate with the host. 2464 */ 2465 if (!vmbus_proto_version) { 2466 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version); 2467 return -EINVAL; 2468 } 2469 2470 msgsize = sizeof(*msginfo) + 2471 sizeof(struct vmbus_channel_initiate_contact); 2472 2473 msginfo = kzalloc(msgsize, GFP_KERNEL); 2474 2475 if (msginfo == NULL) 2476 return -ENOMEM; 2477 2478 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version); 2479 2480 kfree(msginfo); 2481 2482 if (ret != 0) 2483 return ret; 2484 2485 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0); 2486 2487 vmbus_request_offers(); 2488 2489 if (wait_for_completion_timeout( 2490 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0) 2491 pr_err("Some vmbus device is missing after suspending?\n"); 2492 2493 /* Reset the event for the next suspend. */ 2494 reinit_completion(&vmbus_connection.ready_for_suspend_event); 2495 2496 return 0; 2497 } 2498 #else 2499 #define vmbus_bus_suspend NULL 2500 #define vmbus_bus_resume NULL 2501 #endif /* CONFIG_PM_SLEEP */ 2502 2503 static const __maybe_unused struct of_device_id vmbus_of_match[] = { 2504 { 2505 .compatible = "microsoft,vmbus", 2506 }, 2507 { 2508 /* sentinel */ 2509 }, 2510 }; 2511 MODULE_DEVICE_TABLE(of, vmbus_of_match); 2512 2513 static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] = { 2514 {"VMBUS", 0}, 2515 {"VMBus", 0}, 2516 {"", 0}, 2517 }; 2518 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 2519 2520 /* 2521 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with 2522 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in 2523 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see 2524 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() -> 2525 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's 2526 * resume callback must also run via the "noirq" ops. 2527 * 2528 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment 2529 * earlier in this file before vmbus_pm. 2530 */ 2531 2532 static const struct dev_pm_ops vmbus_bus_pm = { 2533 .suspend_noirq = NULL, 2534 .resume_noirq = NULL, 2535 .freeze_noirq = vmbus_bus_suspend, 2536 .thaw_noirq = vmbus_bus_resume, 2537 .poweroff_noirq = vmbus_bus_suspend, 2538 .restore_noirq = vmbus_bus_resume 2539 }; 2540 2541 static struct platform_driver vmbus_platform_driver = { 2542 .probe = vmbus_platform_driver_probe, 2543 .remove = vmbus_platform_driver_remove, 2544 .driver = { 2545 .name = "vmbus", 2546 .acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids), 2547 .of_match_table = of_match_ptr(vmbus_of_match), 2548 .pm = &vmbus_bus_pm, 2549 .probe_type = PROBE_FORCE_SYNCHRONOUS, 2550 } 2551 }; 2552 2553 static void hv_kexec_handler(void) 2554 { 2555 hv_stimer_global_cleanup(); 2556 vmbus_initiate_unload(false); 2557 /* Make sure conn_state is set as hv_synic_cleanup checks for it */ 2558 mb(); 2559 cpuhp_remove_state(hyperv_cpuhp_online); 2560 }; 2561 2562 static void hv_crash_handler(struct pt_regs *regs) 2563 { 2564 int cpu; 2565 2566 vmbus_initiate_unload(true); 2567 /* 2568 * In crash handler we can't schedule synic cleanup for all CPUs, 2569 * doing the cleanup for current CPU only. This should be sufficient 2570 * for kdump. 2571 */ 2572 cpu = smp_processor_id(); 2573 hv_stimer_cleanup(cpu); 2574 hv_synic_disable_regs(cpu); 2575 }; 2576 2577 static int hv_synic_suspend(void) 2578 { 2579 /* 2580 * When we reach here, all the non-boot CPUs have been offlined. 2581 * If we're in a legacy configuration where stimer Direct Mode is 2582 * not enabled, the stimers on the non-boot CPUs have been unbound 2583 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() -> 2584 * hv_stimer_cleanup() -> clockevents_unbind_device(). 2585 * 2586 * hv_synic_suspend() only runs on CPU0 with interrupts disabled. 2587 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because: 2588 * 1) it's unnecessary as interrupts remain disabled between 2589 * syscore_suspend() and syscore_resume(): see create_image() and 2590 * resume_target_kernel() 2591 * 2) the stimer on CPU0 is automatically disabled later by 2592 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ... 2593 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown() 2594 * 3) a warning would be triggered if we call 2595 * clockevents_unbind_device(), which may sleep, in an 2596 * interrupts-disabled context. 2597 */ 2598 2599 hv_synic_disable_regs(0); 2600 2601 return 0; 2602 } 2603 2604 static void hv_synic_resume(void) 2605 { 2606 hv_synic_enable_regs(0); 2607 2608 /* 2609 * Note: we don't need to call hv_stimer_init(0), because the timer 2610 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is 2611 * automatically re-enabled in timekeeping_resume(). 2612 */ 2613 } 2614 2615 /* The callbacks run only on CPU0, with irqs_disabled. */ 2616 static struct syscore_ops hv_synic_syscore_ops = { 2617 .suspend = hv_synic_suspend, 2618 .resume = hv_synic_resume, 2619 }; 2620 2621 static int __init hv_acpi_init(void) 2622 { 2623 int ret; 2624 2625 if (!hv_is_hyperv_initialized()) 2626 return -ENODEV; 2627 2628 if (hv_root_partition && !hv_nested) 2629 return 0; 2630 2631 /* 2632 * Get ACPI resources first. 2633 */ 2634 ret = platform_driver_register(&vmbus_platform_driver); 2635 if (ret) 2636 return ret; 2637 2638 if (!hv_dev) { 2639 ret = -ENODEV; 2640 goto cleanup; 2641 } 2642 2643 /* 2644 * If we're on an architecture with a hardcoded hypervisor 2645 * vector (i.e. x86/x64), override the VMbus interrupt found 2646 * in the ACPI tables. Ensure vmbus_irq is not set since the 2647 * normal Linux IRQ mechanism is not used in this case. 2648 */ 2649 #ifdef HYPERVISOR_CALLBACK_VECTOR 2650 vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR; 2651 vmbus_irq = -1; 2652 #endif 2653 2654 hv_debug_init(); 2655 2656 ret = vmbus_bus_init(); 2657 if (ret) 2658 goto cleanup; 2659 2660 hv_setup_kexec_handler(hv_kexec_handler); 2661 hv_setup_crash_handler(hv_crash_handler); 2662 2663 register_syscore_ops(&hv_synic_syscore_ops); 2664 2665 return 0; 2666 2667 cleanup: 2668 platform_driver_unregister(&vmbus_platform_driver); 2669 hv_dev = NULL; 2670 return ret; 2671 } 2672 2673 static void __exit vmbus_exit(void) 2674 { 2675 int cpu; 2676 2677 unregister_syscore_ops(&hv_synic_syscore_ops); 2678 2679 hv_remove_kexec_handler(); 2680 hv_remove_crash_handler(); 2681 vmbus_connection.conn_state = DISCONNECTED; 2682 hv_stimer_global_cleanup(); 2683 vmbus_disconnect(); 2684 if (vmbus_irq == -1) { 2685 hv_remove_vmbus_handler(); 2686 } else { 2687 free_percpu_irq(vmbus_irq, vmbus_evt); 2688 free_percpu(vmbus_evt); 2689 } 2690 for_each_online_cpu(cpu) { 2691 struct hv_per_cpu_context *hv_cpu 2692 = per_cpu_ptr(hv_context.cpu_context, cpu); 2693 2694 tasklet_kill(&hv_cpu->msg_dpc); 2695 } 2696 hv_debug_rm_all_dir(); 2697 2698 vmbus_free_channels(); 2699 kfree(vmbus_connection.channels); 2700 2701 /* 2702 * The vmbus panic notifier is always registered, hence we should 2703 * also unconditionally unregister it here as well. 2704 */ 2705 atomic_notifier_chain_unregister(&panic_notifier_list, 2706 &hyperv_panic_vmbus_unload_block); 2707 2708 bus_unregister(&hv_bus); 2709 2710 cpuhp_remove_state(hyperv_cpuhp_online); 2711 hv_synic_free(); 2712 platform_driver_unregister(&vmbus_platform_driver); 2713 } 2714 2715 2716 MODULE_LICENSE("GPL"); 2717 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver"); 2718 2719 subsys_initcall(hv_acpi_init); 2720 module_exit(vmbus_exit); 2721