1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 * K. Y. Srinivasan <kys@microsoft.com> 21 * 22 */ 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/device.h> 28 #include <linux/interrupt.h> 29 #include <linux/sysctl.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/completion.h> 33 #include <linux/hyperv.h> 34 #include <linux/kernel_stat.h> 35 #include <linux/clockchips.h> 36 #include <linux/cpu.h> 37 #include <asm/hyperv.h> 38 #include <asm/hypervisor.h> 39 #include <asm/mshyperv.h> 40 #include <linux/notifier.h> 41 #include <linux/ptrace.h> 42 #include <linux/screen_info.h> 43 #include <linux/kdebug.h> 44 #include <linux/efi.h> 45 #include <linux/random.h> 46 #include "hyperv_vmbus.h" 47 48 struct vmbus_dynid { 49 struct list_head node; 50 struct hv_vmbus_device_id id; 51 }; 52 53 static struct acpi_device *hv_acpi_dev; 54 55 static struct completion probe_event; 56 57 58 static void hyperv_report_panic(struct pt_regs *regs) 59 { 60 static bool panic_reported; 61 62 /* 63 * We prefer to report panic on 'die' chain as we have proper 64 * registers to report, but if we miss it (e.g. on BUG()) we need 65 * to report it on 'panic'. 66 */ 67 if (panic_reported) 68 return; 69 panic_reported = true; 70 71 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip); 72 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax); 73 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx); 74 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx); 75 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx); 76 77 /* 78 * Let Hyper-V know there is crash data available 79 */ 80 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); 81 } 82 83 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, 84 void *args) 85 { 86 struct pt_regs *regs; 87 88 regs = current_pt_regs(); 89 90 hyperv_report_panic(regs); 91 return NOTIFY_DONE; 92 } 93 94 static int hyperv_die_event(struct notifier_block *nb, unsigned long val, 95 void *args) 96 { 97 struct die_args *die = (struct die_args *)args; 98 struct pt_regs *regs = die->regs; 99 100 hyperv_report_panic(regs); 101 return NOTIFY_DONE; 102 } 103 104 static struct notifier_block hyperv_die_block = { 105 .notifier_call = hyperv_die_event, 106 }; 107 static struct notifier_block hyperv_panic_block = { 108 .notifier_call = hyperv_panic_event, 109 }; 110 111 static const char *fb_mmio_name = "fb_range"; 112 static struct resource *fb_mmio; 113 static struct resource *hyperv_mmio; 114 static DEFINE_SEMAPHORE(hyperv_mmio_lock); 115 116 static int vmbus_exists(void) 117 { 118 if (hv_acpi_dev == NULL) 119 return -ENODEV; 120 121 return 0; 122 } 123 124 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2) 125 static void print_alias_name(struct hv_device *hv_dev, char *alias_name) 126 { 127 int i; 128 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2) 129 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]); 130 } 131 132 static u8 channel_monitor_group(struct vmbus_channel *channel) 133 { 134 return (u8)channel->offermsg.monitorid / 32; 135 } 136 137 static u8 channel_monitor_offset(struct vmbus_channel *channel) 138 { 139 return (u8)channel->offermsg.monitorid % 32; 140 } 141 142 static u32 channel_pending(struct vmbus_channel *channel, 143 struct hv_monitor_page *monitor_page) 144 { 145 u8 monitor_group = channel_monitor_group(channel); 146 return monitor_page->trigger_group[monitor_group].pending; 147 } 148 149 static u32 channel_latency(struct vmbus_channel *channel, 150 struct hv_monitor_page *monitor_page) 151 { 152 u8 monitor_group = channel_monitor_group(channel); 153 u8 monitor_offset = channel_monitor_offset(channel); 154 return monitor_page->latency[monitor_group][monitor_offset]; 155 } 156 157 static u32 channel_conn_id(struct vmbus_channel *channel, 158 struct hv_monitor_page *monitor_page) 159 { 160 u8 monitor_group = channel_monitor_group(channel); 161 u8 monitor_offset = channel_monitor_offset(channel); 162 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id; 163 } 164 165 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr, 166 char *buf) 167 { 168 struct hv_device *hv_dev = device_to_hv_device(dev); 169 170 if (!hv_dev->channel) 171 return -ENODEV; 172 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid); 173 } 174 static DEVICE_ATTR_RO(id); 175 176 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr, 177 char *buf) 178 { 179 struct hv_device *hv_dev = device_to_hv_device(dev); 180 181 if (!hv_dev->channel) 182 return -ENODEV; 183 return sprintf(buf, "%d\n", hv_dev->channel->state); 184 } 185 static DEVICE_ATTR_RO(state); 186 187 static ssize_t monitor_id_show(struct device *dev, 188 struct device_attribute *dev_attr, char *buf) 189 { 190 struct hv_device *hv_dev = device_to_hv_device(dev); 191 192 if (!hv_dev->channel) 193 return -ENODEV; 194 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid); 195 } 196 static DEVICE_ATTR_RO(monitor_id); 197 198 static ssize_t class_id_show(struct device *dev, 199 struct device_attribute *dev_attr, char *buf) 200 { 201 struct hv_device *hv_dev = device_to_hv_device(dev); 202 203 if (!hv_dev->channel) 204 return -ENODEV; 205 return sprintf(buf, "{%pUl}\n", 206 hv_dev->channel->offermsg.offer.if_type.b); 207 } 208 static DEVICE_ATTR_RO(class_id); 209 210 static ssize_t device_id_show(struct device *dev, 211 struct device_attribute *dev_attr, char *buf) 212 { 213 struct hv_device *hv_dev = device_to_hv_device(dev); 214 215 if (!hv_dev->channel) 216 return -ENODEV; 217 return sprintf(buf, "{%pUl}\n", 218 hv_dev->channel->offermsg.offer.if_instance.b); 219 } 220 static DEVICE_ATTR_RO(device_id); 221 222 static ssize_t modalias_show(struct device *dev, 223 struct device_attribute *dev_attr, char *buf) 224 { 225 struct hv_device *hv_dev = device_to_hv_device(dev); 226 char alias_name[VMBUS_ALIAS_LEN + 1]; 227 228 print_alias_name(hv_dev, alias_name); 229 return sprintf(buf, "vmbus:%s\n", alias_name); 230 } 231 static DEVICE_ATTR_RO(modalias); 232 233 static ssize_t server_monitor_pending_show(struct device *dev, 234 struct device_attribute *dev_attr, 235 char *buf) 236 { 237 struct hv_device *hv_dev = device_to_hv_device(dev); 238 239 if (!hv_dev->channel) 240 return -ENODEV; 241 return sprintf(buf, "%d\n", 242 channel_pending(hv_dev->channel, 243 vmbus_connection.monitor_pages[1])); 244 } 245 static DEVICE_ATTR_RO(server_monitor_pending); 246 247 static ssize_t client_monitor_pending_show(struct device *dev, 248 struct device_attribute *dev_attr, 249 char *buf) 250 { 251 struct hv_device *hv_dev = device_to_hv_device(dev); 252 253 if (!hv_dev->channel) 254 return -ENODEV; 255 return sprintf(buf, "%d\n", 256 channel_pending(hv_dev->channel, 257 vmbus_connection.monitor_pages[1])); 258 } 259 static DEVICE_ATTR_RO(client_monitor_pending); 260 261 static ssize_t server_monitor_latency_show(struct device *dev, 262 struct device_attribute *dev_attr, 263 char *buf) 264 { 265 struct hv_device *hv_dev = device_to_hv_device(dev); 266 267 if (!hv_dev->channel) 268 return -ENODEV; 269 return sprintf(buf, "%d\n", 270 channel_latency(hv_dev->channel, 271 vmbus_connection.monitor_pages[0])); 272 } 273 static DEVICE_ATTR_RO(server_monitor_latency); 274 275 static ssize_t client_monitor_latency_show(struct device *dev, 276 struct device_attribute *dev_attr, 277 char *buf) 278 { 279 struct hv_device *hv_dev = device_to_hv_device(dev); 280 281 if (!hv_dev->channel) 282 return -ENODEV; 283 return sprintf(buf, "%d\n", 284 channel_latency(hv_dev->channel, 285 vmbus_connection.monitor_pages[1])); 286 } 287 static DEVICE_ATTR_RO(client_monitor_latency); 288 289 static ssize_t server_monitor_conn_id_show(struct device *dev, 290 struct device_attribute *dev_attr, 291 char *buf) 292 { 293 struct hv_device *hv_dev = device_to_hv_device(dev); 294 295 if (!hv_dev->channel) 296 return -ENODEV; 297 return sprintf(buf, "%d\n", 298 channel_conn_id(hv_dev->channel, 299 vmbus_connection.monitor_pages[0])); 300 } 301 static DEVICE_ATTR_RO(server_monitor_conn_id); 302 303 static ssize_t client_monitor_conn_id_show(struct device *dev, 304 struct device_attribute *dev_attr, 305 char *buf) 306 { 307 struct hv_device *hv_dev = device_to_hv_device(dev); 308 309 if (!hv_dev->channel) 310 return -ENODEV; 311 return sprintf(buf, "%d\n", 312 channel_conn_id(hv_dev->channel, 313 vmbus_connection.monitor_pages[1])); 314 } 315 static DEVICE_ATTR_RO(client_monitor_conn_id); 316 317 static ssize_t out_intr_mask_show(struct device *dev, 318 struct device_attribute *dev_attr, char *buf) 319 { 320 struct hv_device *hv_dev = device_to_hv_device(dev); 321 struct hv_ring_buffer_debug_info outbound; 322 323 if (!hv_dev->channel) 324 return -ENODEV; 325 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 326 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 327 } 328 static DEVICE_ATTR_RO(out_intr_mask); 329 330 static ssize_t out_read_index_show(struct device *dev, 331 struct device_attribute *dev_attr, char *buf) 332 { 333 struct hv_device *hv_dev = device_to_hv_device(dev); 334 struct hv_ring_buffer_debug_info outbound; 335 336 if (!hv_dev->channel) 337 return -ENODEV; 338 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 339 return sprintf(buf, "%d\n", outbound.current_read_index); 340 } 341 static DEVICE_ATTR_RO(out_read_index); 342 343 static ssize_t out_write_index_show(struct device *dev, 344 struct device_attribute *dev_attr, 345 char *buf) 346 { 347 struct hv_device *hv_dev = device_to_hv_device(dev); 348 struct hv_ring_buffer_debug_info outbound; 349 350 if (!hv_dev->channel) 351 return -ENODEV; 352 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 353 return sprintf(buf, "%d\n", outbound.current_write_index); 354 } 355 static DEVICE_ATTR_RO(out_write_index); 356 357 static ssize_t out_read_bytes_avail_show(struct device *dev, 358 struct device_attribute *dev_attr, 359 char *buf) 360 { 361 struct hv_device *hv_dev = device_to_hv_device(dev); 362 struct hv_ring_buffer_debug_info outbound; 363 364 if (!hv_dev->channel) 365 return -ENODEV; 366 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 367 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 368 } 369 static DEVICE_ATTR_RO(out_read_bytes_avail); 370 371 static ssize_t out_write_bytes_avail_show(struct device *dev, 372 struct device_attribute *dev_attr, 373 char *buf) 374 { 375 struct hv_device *hv_dev = device_to_hv_device(dev); 376 struct hv_ring_buffer_debug_info outbound; 377 378 if (!hv_dev->channel) 379 return -ENODEV; 380 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 381 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 382 } 383 static DEVICE_ATTR_RO(out_write_bytes_avail); 384 385 static ssize_t in_intr_mask_show(struct device *dev, 386 struct device_attribute *dev_attr, char *buf) 387 { 388 struct hv_device *hv_dev = device_to_hv_device(dev); 389 struct hv_ring_buffer_debug_info inbound; 390 391 if (!hv_dev->channel) 392 return -ENODEV; 393 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 394 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 395 } 396 static DEVICE_ATTR_RO(in_intr_mask); 397 398 static ssize_t in_read_index_show(struct device *dev, 399 struct device_attribute *dev_attr, char *buf) 400 { 401 struct hv_device *hv_dev = device_to_hv_device(dev); 402 struct hv_ring_buffer_debug_info inbound; 403 404 if (!hv_dev->channel) 405 return -ENODEV; 406 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 407 return sprintf(buf, "%d\n", inbound.current_read_index); 408 } 409 static DEVICE_ATTR_RO(in_read_index); 410 411 static ssize_t in_write_index_show(struct device *dev, 412 struct device_attribute *dev_attr, char *buf) 413 { 414 struct hv_device *hv_dev = device_to_hv_device(dev); 415 struct hv_ring_buffer_debug_info inbound; 416 417 if (!hv_dev->channel) 418 return -ENODEV; 419 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 420 return sprintf(buf, "%d\n", inbound.current_write_index); 421 } 422 static DEVICE_ATTR_RO(in_write_index); 423 424 static ssize_t in_read_bytes_avail_show(struct device *dev, 425 struct device_attribute *dev_attr, 426 char *buf) 427 { 428 struct hv_device *hv_dev = device_to_hv_device(dev); 429 struct hv_ring_buffer_debug_info inbound; 430 431 if (!hv_dev->channel) 432 return -ENODEV; 433 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 434 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 435 } 436 static DEVICE_ATTR_RO(in_read_bytes_avail); 437 438 static ssize_t in_write_bytes_avail_show(struct device *dev, 439 struct device_attribute *dev_attr, 440 char *buf) 441 { 442 struct hv_device *hv_dev = device_to_hv_device(dev); 443 struct hv_ring_buffer_debug_info inbound; 444 445 if (!hv_dev->channel) 446 return -ENODEV; 447 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 448 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 449 } 450 static DEVICE_ATTR_RO(in_write_bytes_avail); 451 452 static ssize_t channel_vp_mapping_show(struct device *dev, 453 struct device_attribute *dev_attr, 454 char *buf) 455 { 456 struct hv_device *hv_dev = device_to_hv_device(dev); 457 struct vmbus_channel *channel = hv_dev->channel, *cur_sc; 458 unsigned long flags; 459 int buf_size = PAGE_SIZE, n_written, tot_written; 460 struct list_head *cur; 461 462 if (!channel) 463 return -ENODEV; 464 465 tot_written = snprintf(buf, buf_size, "%u:%u\n", 466 channel->offermsg.child_relid, channel->target_cpu); 467 468 spin_lock_irqsave(&channel->lock, flags); 469 470 list_for_each(cur, &channel->sc_list) { 471 if (tot_written >= buf_size - 1) 472 break; 473 474 cur_sc = list_entry(cur, struct vmbus_channel, sc_list); 475 n_written = scnprintf(buf + tot_written, 476 buf_size - tot_written, 477 "%u:%u\n", 478 cur_sc->offermsg.child_relid, 479 cur_sc->target_cpu); 480 tot_written += n_written; 481 } 482 483 spin_unlock_irqrestore(&channel->lock, flags); 484 485 return tot_written; 486 } 487 static DEVICE_ATTR_RO(channel_vp_mapping); 488 489 static ssize_t vendor_show(struct device *dev, 490 struct device_attribute *dev_attr, 491 char *buf) 492 { 493 struct hv_device *hv_dev = device_to_hv_device(dev); 494 return sprintf(buf, "0x%x\n", hv_dev->vendor_id); 495 } 496 static DEVICE_ATTR_RO(vendor); 497 498 static ssize_t device_show(struct device *dev, 499 struct device_attribute *dev_attr, 500 char *buf) 501 { 502 struct hv_device *hv_dev = device_to_hv_device(dev); 503 return sprintf(buf, "0x%x\n", hv_dev->device_id); 504 } 505 static DEVICE_ATTR_RO(device); 506 507 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ 508 static struct attribute *vmbus_dev_attrs[] = { 509 &dev_attr_id.attr, 510 &dev_attr_state.attr, 511 &dev_attr_monitor_id.attr, 512 &dev_attr_class_id.attr, 513 &dev_attr_device_id.attr, 514 &dev_attr_modalias.attr, 515 &dev_attr_server_monitor_pending.attr, 516 &dev_attr_client_monitor_pending.attr, 517 &dev_attr_server_monitor_latency.attr, 518 &dev_attr_client_monitor_latency.attr, 519 &dev_attr_server_monitor_conn_id.attr, 520 &dev_attr_client_monitor_conn_id.attr, 521 &dev_attr_out_intr_mask.attr, 522 &dev_attr_out_read_index.attr, 523 &dev_attr_out_write_index.attr, 524 &dev_attr_out_read_bytes_avail.attr, 525 &dev_attr_out_write_bytes_avail.attr, 526 &dev_attr_in_intr_mask.attr, 527 &dev_attr_in_read_index.attr, 528 &dev_attr_in_write_index.attr, 529 &dev_attr_in_read_bytes_avail.attr, 530 &dev_attr_in_write_bytes_avail.attr, 531 &dev_attr_channel_vp_mapping.attr, 532 &dev_attr_vendor.attr, 533 &dev_attr_device.attr, 534 NULL, 535 }; 536 ATTRIBUTE_GROUPS(vmbus_dev); 537 538 /* 539 * vmbus_uevent - add uevent for our device 540 * 541 * This routine is invoked when a device is added or removed on the vmbus to 542 * generate a uevent to udev in the userspace. The udev will then look at its 543 * rule and the uevent generated here to load the appropriate driver 544 * 545 * The alias string will be of the form vmbus:guid where guid is the string 546 * representation of the device guid (each byte of the guid will be 547 * represented with two hex characters. 548 */ 549 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) 550 { 551 struct hv_device *dev = device_to_hv_device(device); 552 int ret; 553 char alias_name[VMBUS_ALIAS_LEN + 1]; 554 555 print_alias_name(dev, alias_name); 556 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name); 557 return ret; 558 } 559 560 static const uuid_le null_guid; 561 562 static inline bool is_null_guid(const uuid_le *guid) 563 { 564 if (uuid_le_cmp(*guid, null_guid)) 565 return false; 566 return true; 567 } 568 569 /* 570 * Return a matching hv_vmbus_device_id pointer. 571 * If there is no match, return NULL. 572 */ 573 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, 574 const uuid_le *guid) 575 { 576 const struct hv_vmbus_device_id *id = NULL; 577 struct vmbus_dynid *dynid; 578 579 /* Look at the dynamic ids first, before the static ones */ 580 spin_lock(&drv->dynids.lock); 581 list_for_each_entry(dynid, &drv->dynids.list, node) { 582 if (!uuid_le_cmp(dynid->id.guid, *guid)) { 583 id = &dynid->id; 584 break; 585 } 586 } 587 spin_unlock(&drv->dynids.lock); 588 589 if (id) 590 return id; 591 592 id = drv->id_table; 593 if (id == NULL) 594 return NULL; /* empty device table */ 595 596 for (; !is_null_guid(&id->guid); id++) 597 if (!uuid_le_cmp(id->guid, *guid)) 598 return id; 599 600 return NULL; 601 } 602 603 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ 604 static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid) 605 { 606 struct vmbus_dynid *dynid; 607 608 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 609 if (!dynid) 610 return -ENOMEM; 611 612 dynid->id.guid = *guid; 613 614 spin_lock(&drv->dynids.lock); 615 list_add_tail(&dynid->node, &drv->dynids.list); 616 spin_unlock(&drv->dynids.lock); 617 618 return driver_attach(&drv->driver); 619 } 620 621 static void vmbus_free_dynids(struct hv_driver *drv) 622 { 623 struct vmbus_dynid *dynid, *n; 624 625 spin_lock(&drv->dynids.lock); 626 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 627 list_del(&dynid->node); 628 kfree(dynid); 629 } 630 spin_unlock(&drv->dynids.lock); 631 } 632 633 /* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */ 634 static int get_uuid_le(const char *str, uuid_le *uu) 635 { 636 unsigned int b[16]; 637 int i; 638 639 if (strlen(str) < 37) 640 return -1; 641 642 for (i = 0; i < 36; i++) { 643 switch (i) { 644 case 8: case 13: case 18: case 23: 645 if (str[i] != '-') 646 return -1; 647 break; 648 default: 649 if (!isxdigit(str[i])) 650 return -1; 651 } 652 } 653 654 /* unparse little endian output byte order */ 655 if (sscanf(str, 656 "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x", 657 &b[3], &b[2], &b[1], &b[0], 658 &b[5], &b[4], &b[7], &b[6], &b[8], &b[9], 659 &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16) 660 return -1; 661 662 for (i = 0; i < 16; i++) 663 uu->b[i] = b[i]; 664 return 0; 665 } 666 667 /* 668 * store_new_id - sysfs frontend to vmbus_add_dynid() 669 * 670 * Allow GUIDs to be added to an existing driver via sysfs. 671 */ 672 static ssize_t new_id_store(struct device_driver *driver, const char *buf, 673 size_t count) 674 { 675 struct hv_driver *drv = drv_to_hv_drv(driver); 676 uuid_le guid = NULL_UUID_LE; 677 ssize_t retval; 678 679 if (get_uuid_le(buf, &guid) != 0) 680 return -EINVAL; 681 682 if (hv_vmbus_get_id(drv, &guid)) 683 return -EEXIST; 684 685 retval = vmbus_add_dynid(drv, &guid); 686 if (retval) 687 return retval; 688 return count; 689 } 690 static DRIVER_ATTR_WO(new_id); 691 692 /* 693 * store_remove_id - remove a PCI device ID from this driver 694 * 695 * Removes a dynamic pci device ID to this driver. 696 */ 697 static ssize_t remove_id_store(struct device_driver *driver, const char *buf, 698 size_t count) 699 { 700 struct hv_driver *drv = drv_to_hv_drv(driver); 701 struct vmbus_dynid *dynid, *n; 702 uuid_le guid = NULL_UUID_LE; 703 size_t retval = -ENODEV; 704 705 if (get_uuid_le(buf, &guid)) 706 return -EINVAL; 707 708 spin_lock(&drv->dynids.lock); 709 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 710 struct hv_vmbus_device_id *id = &dynid->id; 711 712 if (!uuid_le_cmp(id->guid, guid)) { 713 list_del(&dynid->node); 714 kfree(dynid); 715 retval = count; 716 break; 717 } 718 } 719 spin_unlock(&drv->dynids.lock); 720 721 return retval; 722 } 723 static DRIVER_ATTR_WO(remove_id); 724 725 static struct attribute *vmbus_drv_attrs[] = { 726 &driver_attr_new_id.attr, 727 &driver_attr_remove_id.attr, 728 NULL, 729 }; 730 ATTRIBUTE_GROUPS(vmbus_drv); 731 732 733 /* 734 * vmbus_match - Attempt to match the specified device to the specified driver 735 */ 736 static int vmbus_match(struct device *device, struct device_driver *driver) 737 { 738 struct hv_driver *drv = drv_to_hv_drv(driver); 739 struct hv_device *hv_dev = device_to_hv_device(device); 740 741 /* The hv_sock driver handles all hv_sock offers. */ 742 if (is_hvsock_channel(hv_dev->channel)) 743 return drv->hvsock; 744 745 if (hv_vmbus_get_id(drv, &hv_dev->dev_type)) 746 return 1; 747 748 return 0; 749 } 750 751 /* 752 * vmbus_probe - Add the new vmbus's child device 753 */ 754 static int vmbus_probe(struct device *child_device) 755 { 756 int ret = 0; 757 struct hv_driver *drv = 758 drv_to_hv_drv(child_device->driver); 759 struct hv_device *dev = device_to_hv_device(child_device); 760 const struct hv_vmbus_device_id *dev_id; 761 762 dev_id = hv_vmbus_get_id(drv, &dev->dev_type); 763 if (drv->probe) { 764 ret = drv->probe(dev, dev_id); 765 if (ret != 0) 766 pr_err("probe failed for device %s (%d)\n", 767 dev_name(child_device), ret); 768 769 } else { 770 pr_err("probe not set for driver %s\n", 771 dev_name(child_device)); 772 ret = -ENODEV; 773 } 774 return ret; 775 } 776 777 /* 778 * vmbus_remove - Remove a vmbus device 779 */ 780 static int vmbus_remove(struct device *child_device) 781 { 782 struct hv_driver *drv; 783 struct hv_device *dev = device_to_hv_device(child_device); 784 785 if (child_device->driver) { 786 drv = drv_to_hv_drv(child_device->driver); 787 if (drv->remove) 788 drv->remove(dev); 789 } 790 791 return 0; 792 } 793 794 795 /* 796 * vmbus_shutdown - Shutdown a vmbus device 797 */ 798 static void vmbus_shutdown(struct device *child_device) 799 { 800 struct hv_driver *drv; 801 struct hv_device *dev = device_to_hv_device(child_device); 802 803 804 /* The device may not be attached yet */ 805 if (!child_device->driver) 806 return; 807 808 drv = drv_to_hv_drv(child_device->driver); 809 810 if (drv->shutdown) 811 drv->shutdown(dev); 812 813 return; 814 } 815 816 817 /* 818 * vmbus_device_release - Final callback release of the vmbus child device 819 */ 820 static void vmbus_device_release(struct device *device) 821 { 822 struct hv_device *hv_dev = device_to_hv_device(device); 823 struct vmbus_channel *channel = hv_dev->channel; 824 825 hv_process_channel_removal(channel, 826 channel->offermsg.child_relid); 827 kfree(hv_dev); 828 829 } 830 831 /* The one and only one */ 832 static struct bus_type hv_bus = { 833 .name = "vmbus", 834 .match = vmbus_match, 835 .shutdown = vmbus_shutdown, 836 .remove = vmbus_remove, 837 .probe = vmbus_probe, 838 .uevent = vmbus_uevent, 839 .dev_groups = vmbus_dev_groups, 840 .drv_groups = vmbus_drv_groups, 841 }; 842 843 struct onmessage_work_context { 844 struct work_struct work; 845 struct hv_message msg; 846 }; 847 848 static void vmbus_onmessage_work(struct work_struct *work) 849 { 850 struct onmessage_work_context *ctx; 851 852 /* Do not process messages if we're in DISCONNECTED state */ 853 if (vmbus_connection.conn_state == DISCONNECTED) 854 return; 855 856 ctx = container_of(work, struct onmessage_work_context, 857 work); 858 vmbus_onmessage(&ctx->msg); 859 kfree(ctx); 860 } 861 862 static void hv_process_timer_expiration(struct hv_message *msg, int cpu) 863 { 864 struct clock_event_device *dev = hv_context.clk_evt[cpu]; 865 866 if (dev->event_handler) 867 dev->event_handler(dev); 868 869 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED); 870 } 871 872 void vmbus_on_msg_dpc(unsigned long data) 873 { 874 int cpu = smp_processor_id(); 875 void *page_addr = hv_context.synic_message_page[cpu]; 876 struct hv_message *msg = (struct hv_message *)page_addr + 877 VMBUS_MESSAGE_SINT; 878 struct vmbus_channel_message_header *hdr; 879 struct vmbus_channel_message_table_entry *entry; 880 struct onmessage_work_context *ctx; 881 u32 message_type = msg->header.message_type; 882 883 if (message_type == HVMSG_NONE) 884 /* no msg */ 885 return; 886 887 hdr = (struct vmbus_channel_message_header *)msg->u.payload; 888 889 if (hdr->msgtype >= CHANNELMSG_COUNT) { 890 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype); 891 goto msg_handled; 892 } 893 894 entry = &channel_message_table[hdr->msgtype]; 895 if (entry->handler_type == VMHT_BLOCKING) { 896 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); 897 if (ctx == NULL) 898 return; 899 900 INIT_WORK(&ctx->work, vmbus_onmessage_work); 901 memcpy(&ctx->msg, msg, sizeof(*msg)); 902 903 queue_work(vmbus_connection.work_queue, &ctx->work); 904 } else 905 entry->message_handler(hdr); 906 907 msg_handled: 908 vmbus_signal_eom(msg, message_type); 909 } 910 911 static void vmbus_isr(void) 912 { 913 int cpu = smp_processor_id(); 914 void *page_addr; 915 struct hv_message *msg; 916 union hv_synic_event_flags *event; 917 bool handled = false; 918 919 page_addr = hv_context.synic_event_page[cpu]; 920 if (page_addr == NULL) 921 return; 922 923 event = (union hv_synic_event_flags *)page_addr + 924 VMBUS_MESSAGE_SINT; 925 /* 926 * Check for events before checking for messages. This is the order 927 * in which events and messages are checked in Windows guests on 928 * Hyper-V, and the Windows team suggested we do the same. 929 */ 930 931 if ((vmbus_proto_version == VERSION_WS2008) || 932 (vmbus_proto_version == VERSION_WIN7)) { 933 934 /* Since we are a child, we only need to check bit 0 */ 935 if (sync_test_and_clear_bit(0, 936 (unsigned long *) &event->flags32[0])) { 937 handled = true; 938 } 939 } else { 940 /* 941 * Our host is win8 or above. The signaling mechanism 942 * has changed and we can directly look at the event page. 943 * If bit n is set then we have an interrup on the channel 944 * whose id is n. 945 */ 946 handled = true; 947 } 948 949 if (handled) 950 tasklet_schedule(hv_context.event_dpc[cpu]); 951 952 953 page_addr = hv_context.synic_message_page[cpu]; 954 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 955 956 /* Check if there are actual msgs to be processed */ 957 if (msg->header.message_type != HVMSG_NONE) { 958 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) 959 hv_process_timer_expiration(msg, cpu); 960 else 961 tasklet_schedule(hv_context.msg_dpc[cpu]); 962 } 963 964 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); 965 } 966 967 968 /* 969 * vmbus_bus_init -Main vmbus driver initialization routine. 970 * 971 * Here, we 972 * - initialize the vmbus driver context 973 * - invoke the vmbus hv main init routine 974 * - retrieve the channel offers 975 */ 976 static int vmbus_bus_init(void) 977 { 978 int ret; 979 980 /* Hypervisor initialization...setup hypercall page..etc */ 981 ret = hv_init(); 982 if (ret != 0) { 983 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret); 984 return ret; 985 } 986 987 ret = bus_register(&hv_bus); 988 if (ret) 989 goto err_cleanup; 990 991 hv_setup_vmbus_irq(vmbus_isr); 992 993 ret = hv_synic_alloc(); 994 if (ret) 995 goto err_alloc; 996 /* 997 * Initialize the per-cpu interrupt state and 998 * connect to the host. 999 */ 1000 on_each_cpu(hv_synic_init, NULL, 1); 1001 ret = vmbus_connect(); 1002 if (ret) 1003 goto err_connect; 1004 1005 if (vmbus_proto_version > VERSION_WIN7) 1006 cpu_hotplug_disable(); 1007 1008 /* 1009 * Only register if the crash MSRs are available 1010 */ 1011 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 1012 register_die_notifier(&hyperv_die_block); 1013 atomic_notifier_chain_register(&panic_notifier_list, 1014 &hyperv_panic_block); 1015 } 1016 1017 vmbus_request_offers(); 1018 1019 return 0; 1020 1021 err_connect: 1022 on_each_cpu(hv_synic_cleanup, NULL, 1); 1023 err_alloc: 1024 hv_synic_free(); 1025 hv_remove_vmbus_irq(); 1026 1027 bus_unregister(&hv_bus); 1028 1029 err_cleanup: 1030 hv_cleanup(false); 1031 1032 return ret; 1033 } 1034 1035 /** 1036 * __vmbus_child_driver_register() - Register a vmbus's driver 1037 * @hv_driver: Pointer to driver structure you want to register 1038 * @owner: owner module of the drv 1039 * @mod_name: module name string 1040 * 1041 * Registers the given driver with Linux through the 'driver_register()' call 1042 * and sets up the hyper-v vmbus handling for this driver. 1043 * It will return the state of the 'driver_register()' call. 1044 * 1045 */ 1046 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name) 1047 { 1048 int ret; 1049 1050 pr_info("registering driver %s\n", hv_driver->name); 1051 1052 ret = vmbus_exists(); 1053 if (ret < 0) 1054 return ret; 1055 1056 hv_driver->driver.name = hv_driver->name; 1057 hv_driver->driver.owner = owner; 1058 hv_driver->driver.mod_name = mod_name; 1059 hv_driver->driver.bus = &hv_bus; 1060 1061 spin_lock_init(&hv_driver->dynids.lock); 1062 INIT_LIST_HEAD(&hv_driver->dynids.list); 1063 1064 ret = driver_register(&hv_driver->driver); 1065 1066 return ret; 1067 } 1068 EXPORT_SYMBOL_GPL(__vmbus_driver_register); 1069 1070 /** 1071 * vmbus_driver_unregister() - Unregister a vmbus's driver 1072 * @hv_driver: Pointer to driver structure you want to 1073 * un-register 1074 * 1075 * Un-register the given driver that was previous registered with a call to 1076 * vmbus_driver_register() 1077 */ 1078 void vmbus_driver_unregister(struct hv_driver *hv_driver) 1079 { 1080 pr_info("unregistering driver %s\n", hv_driver->name); 1081 1082 if (!vmbus_exists()) { 1083 driver_unregister(&hv_driver->driver); 1084 vmbus_free_dynids(hv_driver); 1085 } 1086 } 1087 EXPORT_SYMBOL_GPL(vmbus_driver_unregister); 1088 1089 /* 1090 * vmbus_device_create - Creates and registers a new child device 1091 * on the vmbus. 1092 */ 1093 struct hv_device *vmbus_device_create(const uuid_le *type, 1094 const uuid_le *instance, 1095 struct vmbus_channel *channel) 1096 { 1097 struct hv_device *child_device_obj; 1098 1099 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL); 1100 if (!child_device_obj) { 1101 pr_err("Unable to allocate device object for child device\n"); 1102 return NULL; 1103 } 1104 1105 child_device_obj->channel = channel; 1106 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); 1107 memcpy(&child_device_obj->dev_instance, instance, 1108 sizeof(uuid_le)); 1109 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ 1110 1111 1112 return child_device_obj; 1113 } 1114 1115 /* 1116 * vmbus_device_register - Register the child device 1117 */ 1118 int vmbus_device_register(struct hv_device *child_device_obj) 1119 { 1120 int ret = 0; 1121 1122 dev_set_name(&child_device_obj->device, "%pUl", 1123 child_device_obj->channel->offermsg.offer.if_instance.b); 1124 1125 child_device_obj->device.bus = &hv_bus; 1126 child_device_obj->device.parent = &hv_acpi_dev->dev; 1127 child_device_obj->device.release = vmbus_device_release; 1128 1129 /* 1130 * Register with the LDM. This will kick off the driver/device 1131 * binding...which will eventually call vmbus_match() and vmbus_probe() 1132 */ 1133 ret = device_register(&child_device_obj->device); 1134 1135 if (ret) 1136 pr_err("Unable to register child device\n"); 1137 else 1138 pr_debug("child device %s registered\n", 1139 dev_name(&child_device_obj->device)); 1140 1141 return ret; 1142 } 1143 1144 /* 1145 * vmbus_device_unregister - Remove the specified child device 1146 * from the vmbus. 1147 */ 1148 void vmbus_device_unregister(struct hv_device *device_obj) 1149 { 1150 pr_debug("child device %s unregistered\n", 1151 dev_name(&device_obj->device)); 1152 1153 /* 1154 * Kick off the process of unregistering the device. 1155 * This will call vmbus_remove() and eventually vmbus_device_release() 1156 */ 1157 device_unregister(&device_obj->device); 1158 } 1159 1160 1161 /* 1162 * VMBUS is an acpi enumerated device. Get the information we 1163 * need from DSDT. 1164 */ 1165 #define VTPM_BASE_ADDRESS 0xfed40000 1166 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) 1167 { 1168 resource_size_t start = 0; 1169 resource_size_t end = 0; 1170 struct resource *new_res; 1171 struct resource **old_res = &hyperv_mmio; 1172 struct resource **prev_res = NULL; 1173 1174 switch (res->type) { 1175 1176 /* 1177 * "Address" descriptors are for bus windows. Ignore 1178 * "memory" descriptors, which are for registers on 1179 * devices. 1180 */ 1181 case ACPI_RESOURCE_TYPE_ADDRESS32: 1182 start = res->data.address32.address.minimum; 1183 end = res->data.address32.address.maximum; 1184 break; 1185 1186 case ACPI_RESOURCE_TYPE_ADDRESS64: 1187 start = res->data.address64.address.minimum; 1188 end = res->data.address64.address.maximum; 1189 break; 1190 1191 default: 1192 /* Unused resource type */ 1193 return AE_OK; 1194 1195 } 1196 /* 1197 * Ignore ranges that are below 1MB, as they're not 1198 * necessary or useful here. 1199 */ 1200 if (end < 0x100000) 1201 return AE_OK; 1202 1203 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC); 1204 if (!new_res) 1205 return AE_NO_MEMORY; 1206 1207 /* If this range overlaps the virtual TPM, truncate it. */ 1208 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS) 1209 end = VTPM_BASE_ADDRESS; 1210 1211 new_res->name = "hyperv mmio"; 1212 new_res->flags = IORESOURCE_MEM; 1213 new_res->start = start; 1214 new_res->end = end; 1215 1216 /* 1217 * If two ranges are adjacent, merge them. 1218 */ 1219 do { 1220 if (!*old_res) { 1221 *old_res = new_res; 1222 break; 1223 } 1224 1225 if (((*old_res)->end + 1) == new_res->start) { 1226 (*old_res)->end = new_res->end; 1227 kfree(new_res); 1228 break; 1229 } 1230 1231 if ((*old_res)->start == new_res->end + 1) { 1232 (*old_res)->start = new_res->start; 1233 kfree(new_res); 1234 break; 1235 } 1236 1237 if ((*old_res)->start > new_res->end) { 1238 new_res->sibling = *old_res; 1239 if (prev_res) 1240 (*prev_res)->sibling = new_res; 1241 *old_res = new_res; 1242 break; 1243 } 1244 1245 prev_res = old_res; 1246 old_res = &(*old_res)->sibling; 1247 1248 } while (1); 1249 1250 return AE_OK; 1251 } 1252 1253 static int vmbus_acpi_remove(struct acpi_device *device) 1254 { 1255 struct resource *cur_res; 1256 struct resource *next_res; 1257 1258 if (hyperv_mmio) { 1259 if (fb_mmio) { 1260 __release_region(hyperv_mmio, fb_mmio->start, 1261 resource_size(fb_mmio)); 1262 fb_mmio = NULL; 1263 } 1264 1265 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { 1266 next_res = cur_res->sibling; 1267 kfree(cur_res); 1268 } 1269 } 1270 1271 return 0; 1272 } 1273 1274 static void vmbus_reserve_fb(void) 1275 { 1276 int size; 1277 /* 1278 * Make a claim for the frame buffer in the resource tree under the 1279 * first node, which will be the one below 4GB. The length seems to 1280 * be underreported, particularly in a Generation 1 VM. So start out 1281 * reserving a larger area and make it smaller until it succeeds. 1282 */ 1283 1284 if (screen_info.lfb_base) { 1285 if (efi_enabled(EFI_BOOT)) 1286 size = max_t(__u32, screen_info.lfb_size, 0x800000); 1287 else 1288 size = max_t(__u32, screen_info.lfb_size, 0x4000000); 1289 1290 for (; !fb_mmio && (size >= 0x100000); size >>= 1) { 1291 fb_mmio = __request_region(hyperv_mmio, 1292 screen_info.lfb_base, size, 1293 fb_mmio_name, 0); 1294 } 1295 } 1296 } 1297 1298 /** 1299 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. 1300 * @new: If successful, supplied a pointer to the 1301 * allocated MMIO space. 1302 * @device_obj: Identifies the caller 1303 * @min: Minimum guest physical address of the 1304 * allocation 1305 * @max: Maximum guest physical address 1306 * @size: Size of the range to be allocated 1307 * @align: Alignment of the range to be allocated 1308 * @fb_overlap_ok: Whether this allocation can be allowed 1309 * to overlap the video frame buffer. 1310 * 1311 * This function walks the resources granted to VMBus by the 1312 * _CRS object in the ACPI namespace underneath the parent 1313 * "bridge" whether that's a root PCI bus in the Generation 1 1314 * case or a Module Device in the Generation 2 case. It then 1315 * attempts to allocate from the global MMIO pool in a way that 1316 * matches the constraints supplied in these parameters and by 1317 * that _CRS. 1318 * 1319 * Return: 0 on success, -errno on failure 1320 */ 1321 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1322 resource_size_t min, resource_size_t max, 1323 resource_size_t size, resource_size_t align, 1324 bool fb_overlap_ok) 1325 { 1326 struct resource *iter, *shadow; 1327 resource_size_t range_min, range_max, start; 1328 const char *dev_n = dev_name(&device_obj->device); 1329 int retval; 1330 1331 retval = -ENXIO; 1332 down(&hyperv_mmio_lock); 1333 1334 /* 1335 * If overlaps with frame buffers are allowed, then first attempt to 1336 * make the allocation from within the reserved region. Because it 1337 * is already reserved, no shadow allocation is necessary. 1338 */ 1339 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) && 1340 !(max < fb_mmio->start)) { 1341 1342 range_min = fb_mmio->start; 1343 range_max = fb_mmio->end; 1344 start = (range_min + align - 1) & ~(align - 1); 1345 for (; start + size - 1 <= range_max; start += align) { 1346 *new = request_mem_region_exclusive(start, size, dev_n); 1347 if (*new) { 1348 retval = 0; 1349 goto exit; 1350 } 1351 } 1352 } 1353 1354 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 1355 if ((iter->start >= max) || (iter->end <= min)) 1356 continue; 1357 1358 range_min = iter->start; 1359 range_max = iter->end; 1360 start = (range_min + align - 1) & ~(align - 1); 1361 for (; start + size - 1 <= range_max; start += align) { 1362 shadow = __request_region(iter, start, size, NULL, 1363 IORESOURCE_BUSY); 1364 if (!shadow) 1365 continue; 1366 1367 *new = request_mem_region_exclusive(start, size, dev_n); 1368 if (*new) { 1369 shadow->name = (char *)*new; 1370 retval = 0; 1371 goto exit; 1372 } 1373 1374 __release_region(iter, start, size); 1375 } 1376 } 1377 1378 exit: 1379 up(&hyperv_mmio_lock); 1380 return retval; 1381 } 1382 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); 1383 1384 /** 1385 * vmbus_free_mmio() - Free a memory-mapped I/O range. 1386 * @start: Base address of region to release. 1387 * @size: Size of the range to be allocated 1388 * 1389 * This function releases anything requested by 1390 * vmbus_mmio_allocate(). 1391 */ 1392 void vmbus_free_mmio(resource_size_t start, resource_size_t size) 1393 { 1394 struct resource *iter; 1395 1396 down(&hyperv_mmio_lock); 1397 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 1398 if ((iter->start >= start + size) || (iter->end <= start)) 1399 continue; 1400 1401 __release_region(iter, start, size); 1402 } 1403 release_mem_region(start, size); 1404 up(&hyperv_mmio_lock); 1405 1406 } 1407 EXPORT_SYMBOL_GPL(vmbus_free_mmio); 1408 1409 /** 1410 * vmbus_cpu_number_to_vp_number() - Map CPU to VP. 1411 * @cpu_number: CPU number in Linux terms 1412 * 1413 * This function returns the mapping between the Linux processor 1414 * number and the hypervisor's virtual processor number, useful 1415 * in making hypercalls and such that talk about specific 1416 * processors. 1417 * 1418 * Return: Virtual processor number in Hyper-V terms 1419 */ 1420 int vmbus_cpu_number_to_vp_number(int cpu_number) 1421 { 1422 return hv_context.vp_index[cpu_number]; 1423 } 1424 EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number); 1425 1426 static int vmbus_acpi_add(struct acpi_device *device) 1427 { 1428 acpi_status result; 1429 int ret_val = -ENODEV; 1430 struct acpi_device *ancestor; 1431 1432 hv_acpi_dev = device; 1433 1434 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 1435 vmbus_walk_resources, NULL); 1436 1437 if (ACPI_FAILURE(result)) 1438 goto acpi_walk_err; 1439 /* 1440 * Some ancestor of the vmbus acpi device (Gen1 or Gen2 1441 * firmware) is the VMOD that has the mmio ranges. Get that. 1442 */ 1443 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) { 1444 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, 1445 vmbus_walk_resources, NULL); 1446 1447 if (ACPI_FAILURE(result)) 1448 continue; 1449 if (hyperv_mmio) { 1450 vmbus_reserve_fb(); 1451 break; 1452 } 1453 } 1454 ret_val = 0; 1455 1456 acpi_walk_err: 1457 complete(&probe_event); 1458 if (ret_val) 1459 vmbus_acpi_remove(device); 1460 return ret_val; 1461 } 1462 1463 static const struct acpi_device_id vmbus_acpi_device_ids[] = { 1464 {"VMBUS", 0}, 1465 {"VMBus", 0}, 1466 {"", 0}, 1467 }; 1468 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 1469 1470 static struct acpi_driver vmbus_acpi_driver = { 1471 .name = "vmbus", 1472 .ids = vmbus_acpi_device_ids, 1473 .ops = { 1474 .add = vmbus_acpi_add, 1475 .remove = vmbus_acpi_remove, 1476 }, 1477 }; 1478 1479 static void hv_kexec_handler(void) 1480 { 1481 int cpu; 1482 1483 hv_synic_clockevents_cleanup(); 1484 vmbus_initiate_unload(false); 1485 for_each_online_cpu(cpu) 1486 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); 1487 hv_cleanup(false); 1488 }; 1489 1490 static void hv_crash_handler(struct pt_regs *regs) 1491 { 1492 vmbus_initiate_unload(true); 1493 /* 1494 * In crash handler we can't schedule synic cleanup for all CPUs, 1495 * doing the cleanup for current CPU only. This should be sufficient 1496 * for kdump. 1497 */ 1498 hv_synic_cleanup(NULL); 1499 hv_cleanup(true); 1500 }; 1501 1502 static int __init hv_acpi_init(void) 1503 { 1504 int ret, t; 1505 1506 if (x86_hyper != &x86_hyper_ms_hyperv) 1507 return -ENODEV; 1508 1509 init_completion(&probe_event); 1510 1511 /* 1512 * Get ACPI resources first. 1513 */ 1514 ret = acpi_bus_register_driver(&vmbus_acpi_driver); 1515 1516 if (ret) 1517 return ret; 1518 1519 t = wait_for_completion_timeout(&probe_event, 5*HZ); 1520 if (t == 0) { 1521 ret = -ETIMEDOUT; 1522 goto cleanup; 1523 } 1524 1525 ret = vmbus_bus_init(); 1526 if (ret) 1527 goto cleanup; 1528 1529 hv_setup_kexec_handler(hv_kexec_handler); 1530 hv_setup_crash_handler(hv_crash_handler); 1531 1532 return 0; 1533 1534 cleanup: 1535 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1536 hv_acpi_dev = NULL; 1537 return ret; 1538 } 1539 1540 static void __exit vmbus_exit(void) 1541 { 1542 int cpu; 1543 1544 hv_remove_kexec_handler(); 1545 hv_remove_crash_handler(); 1546 vmbus_connection.conn_state = DISCONNECTED; 1547 hv_synic_clockevents_cleanup(); 1548 vmbus_disconnect(); 1549 hv_remove_vmbus_irq(); 1550 for_each_online_cpu(cpu) 1551 tasklet_kill(hv_context.msg_dpc[cpu]); 1552 vmbus_free_channels(); 1553 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 1554 unregister_die_notifier(&hyperv_die_block); 1555 atomic_notifier_chain_unregister(&panic_notifier_list, 1556 &hyperv_panic_block); 1557 } 1558 bus_unregister(&hv_bus); 1559 hv_cleanup(false); 1560 for_each_online_cpu(cpu) { 1561 tasklet_kill(hv_context.event_dpc[cpu]); 1562 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); 1563 } 1564 hv_synic_free(); 1565 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1566 if (vmbus_proto_version > VERSION_WIN7) 1567 cpu_hotplug_enable(); 1568 } 1569 1570 1571 MODULE_LICENSE("GPL"); 1572 1573 subsys_initcall(hv_acpi_init); 1574 module_exit(vmbus_exit); 1575