1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) Microsoft Corporation. 4 * 5 * Author: 6 * Jake Oshins <jakeo@microsoft.com> 7 * 8 * This driver acts as a paravirtual front-end for PCI Express root buses. 9 * When a PCI Express function (either an entire device or an SR-IOV 10 * Virtual Function) is being passed through to the VM, this driver exposes 11 * a new bus to the guest VM. This is modeled as a root PCI bus because 12 * no bridges are being exposed to the VM. In fact, with a "Generation 2" 13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM 14 * until a device as been exposed using this driver. 15 * 16 * Each root PCI bus has its own PCI domain, which is called "Segment" in 17 * the PCI Firmware Specifications. Thus while each device passed through 18 * to the VM using this front-end will appear at "device 0", the domain will 19 * be unique. Typically, each bus will have one PCI function on it, though 20 * this driver does support more than one. 21 * 22 * In order to map the interrupts from the device through to the guest VM, 23 * this driver also implements an IRQ Domain, which handles interrupts (either 24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are 25 * set up, torn down, or reaffined, this driver communicates with the 26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each 27 * interrupt will be delivered to the correct virtual processor at the right 28 * vector. This driver does not support level-triggered (line-based) 29 * interrupts, and will report that the Interrupt Line register in the 30 * function's configuration space is zero. 31 * 32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V 33 * facilities. For instance, the configuration space of a function exposed 34 * by Hyper-V is mapped into a single page of memory space, and the 35 * read and write handlers for config space must be aware of this mechanism. 36 * Similarly, device setup and teardown involves messages sent to and from 37 * the PCI back-end driver in Hyper-V. 38 */ 39 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/pci-ecam.h> 44 #include <linux/delay.h> 45 #include <linux/semaphore.h> 46 #include <linux/irq.h> 47 #include <linux/irqchip/irq-msi-lib.h> 48 #include <linux/msi.h> 49 #include <linux/hyperv.h> 50 #include <linux/refcount.h> 51 #include <linux/irqdomain.h> 52 #include <linux/acpi.h> 53 #include <linux/sizes.h> 54 #include <linux/of_irq.h> 55 #include <asm/mshyperv.h> 56 57 /* 58 * Protocol versions. The low word is the minor version, the high word the 59 * major version. 60 */ 61 62 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) 63 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) 64 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) 65 66 enum pci_protocol_version_t { 67 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ 68 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ 69 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */ 70 PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */ 71 }; 72 73 #define CPU_AFFINITY_ALL -1ULL 74 75 /* 76 * Supported protocol versions in the order of probing - highest go 77 * first. 78 */ 79 static enum pci_protocol_version_t pci_protocol_versions[] = { 80 PCI_PROTOCOL_VERSION_1_4, 81 PCI_PROTOCOL_VERSION_1_3, 82 PCI_PROTOCOL_VERSION_1_2, 83 PCI_PROTOCOL_VERSION_1_1, 84 }; 85 86 #define PCI_CONFIG_MMIO_LENGTH 0x2000 87 #define CFG_PAGE_OFFSET 0x1000 88 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) 89 90 #define MAX_SUPPORTED_MSI_MESSAGES 0x400 91 92 #define STATUS_REVISION_MISMATCH 0xC0000059 93 94 /* space for 32bit serial number as string */ 95 #define SLOT_NAME_SIZE 11 96 97 /* 98 * Size of requestor for VMbus; the value is based on the observation 99 * that having more than one request outstanding is 'rare', and so 64 100 * should be generous in ensuring that we don't ever run out. 101 */ 102 #define HV_PCI_RQSTOR_SIZE 64 103 104 /* 105 * Message Types 106 */ 107 108 enum pci_message_type { 109 /* 110 * Version 1.1 111 */ 112 PCI_MESSAGE_BASE = 0x42490000, 113 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, 114 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, 115 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, 116 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, 117 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, 118 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, 119 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, 120 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, 121 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, 122 PCI_EJECT = PCI_MESSAGE_BASE + 0xB, 123 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, 124 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, 125 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, 126 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, 127 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, 128 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, 129 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, 130 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, 131 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, 132 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, 133 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, 134 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, 135 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ 136 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19, 137 PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A, 138 PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B, 139 PCI_MESSAGE_MAXIMUM 140 }; 141 142 /* 143 * Structures defining the virtual PCI Express protocol. 144 */ 145 146 union pci_version { 147 struct { 148 u16 minor_version; 149 u16 major_version; 150 } parts; 151 u32 version; 152 } __packed; 153 154 /* 155 * Function numbers are 8-bits wide on Express, as interpreted through ARI, 156 * which is all this driver does. This representation is the one used in 157 * Windows, which is what is expected when sending this back and forth with 158 * the Hyper-V parent partition. 159 */ 160 union win_slot_encoding { 161 struct { 162 u32 dev:5; 163 u32 func:3; 164 u32 reserved:24; 165 } bits; 166 u32 slot; 167 } __packed; 168 169 /* 170 * Pretty much as defined in the PCI Specifications. 171 */ 172 struct pci_function_description { 173 u16 v_id; /* vendor ID */ 174 u16 d_id; /* device ID */ 175 u8 rev; 176 u8 prog_intf; 177 u8 subclass; 178 u8 base_class; 179 u32 subsystem_id; 180 union win_slot_encoding win_slot; 181 u32 ser; /* serial number */ 182 } __packed; 183 184 enum pci_device_description_flags { 185 HV_PCI_DEVICE_FLAG_NONE = 0x0, 186 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1, 187 }; 188 189 struct pci_function_description2 { 190 u16 v_id; /* vendor ID */ 191 u16 d_id; /* device ID */ 192 u8 rev; 193 u8 prog_intf; 194 u8 subclass; 195 u8 base_class; 196 u32 subsystem_id; 197 union win_slot_encoding win_slot; 198 u32 ser; /* serial number */ 199 u32 flags; 200 u16 virtual_numa_node; 201 u16 reserved; 202 } __packed; 203 204 /** 205 * struct hv_msi_desc 206 * @vector: IDT entry 207 * @delivery_mode: As defined in Intel's Programmer's 208 * Reference Manual, Volume 3, Chapter 8. 209 * @vector_count: Number of contiguous entries in the 210 * Interrupt Descriptor Table that are 211 * occupied by this Message-Signaled 212 * Interrupt. For "MSI", as first defined 213 * in PCI 2.2, this can be between 1 and 214 * 32. For "MSI-X," as first defined in PCI 215 * 3.0, this must be 1, as each MSI-X table 216 * entry would have its own descriptor. 217 * @reserved: Empty space 218 * @cpu_mask: All the target virtual processors. 219 */ 220 struct hv_msi_desc { 221 u8 vector; 222 u8 delivery_mode; 223 u16 vector_count; 224 u32 reserved; 225 u64 cpu_mask; 226 } __packed; 227 228 /** 229 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc 230 * @vector: IDT entry 231 * @delivery_mode: As defined in Intel's Programmer's 232 * Reference Manual, Volume 3, Chapter 8. 233 * @vector_count: Number of contiguous entries in the 234 * Interrupt Descriptor Table that are 235 * occupied by this Message-Signaled 236 * Interrupt. For "MSI", as first defined 237 * in PCI 2.2, this can be between 1 and 238 * 32. For "MSI-X," as first defined in PCI 239 * 3.0, this must be 1, as each MSI-X table 240 * entry would have its own descriptor. 241 * @processor_count: number of bits enabled in array. 242 * @processor_array: All the target virtual processors. 243 */ 244 struct hv_msi_desc2 { 245 u8 vector; 246 u8 delivery_mode; 247 u16 vector_count; 248 u16 processor_count; 249 u16 processor_array[32]; 250 } __packed; 251 252 /* 253 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc 254 * Everything is the same as in 'hv_msi_desc2' except that the size of the 255 * 'vector' field is larger to support bigger vector values. For ex: LPI 256 * vectors on ARM. 257 */ 258 struct hv_msi_desc3 { 259 u32 vector; 260 u8 delivery_mode; 261 u8 reserved; 262 u16 vector_count; 263 u16 processor_count; 264 u16 processor_array[32]; 265 } __packed; 266 267 /** 268 * struct tran_int_desc 269 * @reserved: unused, padding 270 * @vector_count: same as in hv_msi_desc 271 * @data: This is the "data payload" value that is 272 * written by the device when it generates 273 * a message-signaled interrupt, either MSI 274 * or MSI-X. 275 * @address: This is the address to which the data 276 * payload is written on interrupt 277 * generation. 278 */ 279 struct tran_int_desc { 280 u16 reserved; 281 u16 vector_count; 282 u32 data; 283 u64 address; 284 } __packed; 285 286 /* 287 * A generic message format for virtual PCI. 288 * Specific message formats are defined later in the file. 289 */ 290 291 struct pci_message { 292 u32 type; 293 } __packed; 294 295 struct pci_child_message { 296 struct pci_message message_type; 297 union win_slot_encoding wslot; 298 } __packed; 299 300 struct pci_incoming_message { 301 struct vmpacket_descriptor hdr; 302 struct pci_message message_type; 303 } __packed; 304 305 struct pci_response { 306 struct vmpacket_descriptor hdr; 307 s32 status; /* negative values are failures */ 308 } __packed; 309 310 struct pci_packet { 311 void (*completion_func)(void *context, struct pci_response *resp, 312 int resp_packet_size); 313 void *compl_ctxt; 314 }; 315 316 /* 317 * Specific message types supporting the PCI protocol. 318 */ 319 320 /* 321 * Version negotiation message. Sent from the guest to the host. 322 * The guest is free to try different versions until the host 323 * accepts the version. 324 * 325 * pci_version: The protocol version requested. 326 * is_last_attempt: If TRUE, this is the last version guest will request. 327 * reservedz: Reserved field, set to zero. 328 */ 329 330 struct pci_version_request { 331 struct pci_message message_type; 332 u32 protocol_version; 333 } __packed; 334 335 /* 336 * Bus D0 Entry. This is sent from the guest to the host when the virtual 337 * bus (PCI Express port) is ready for action. 338 */ 339 340 struct pci_bus_d0_entry { 341 struct pci_message message_type; 342 u32 reserved; 343 u64 mmio_base; 344 } __packed; 345 346 struct pci_bus_relations { 347 struct pci_incoming_message incoming; 348 u32 device_count; 349 struct pci_function_description func[]; 350 } __packed; 351 352 struct pci_bus_relations2 { 353 struct pci_incoming_message incoming; 354 u32 device_count; 355 struct pci_function_description2 func[]; 356 } __packed; 357 358 struct pci_q_res_req_response { 359 struct vmpacket_descriptor hdr; 360 s32 status; /* negative values are failures */ 361 u32 probed_bar[PCI_STD_NUM_BARS]; 362 } __packed; 363 364 struct pci_set_power { 365 struct pci_message message_type; 366 union win_slot_encoding wslot; 367 u32 power_state; /* In Windows terms */ 368 u32 reserved; 369 } __packed; 370 371 struct pci_set_power_response { 372 struct vmpacket_descriptor hdr; 373 s32 status; /* negative values are failures */ 374 union win_slot_encoding wslot; 375 u32 resultant_state; /* In Windows terms */ 376 u32 reserved; 377 } __packed; 378 379 struct pci_resources_assigned { 380 struct pci_message message_type; 381 union win_slot_encoding wslot; 382 u8 memory_range[0x14][6]; /* not used here */ 383 u32 msi_descriptors; 384 u32 reserved[4]; 385 } __packed; 386 387 struct pci_resources_assigned2 { 388 struct pci_message message_type; 389 union win_slot_encoding wslot; 390 u8 memory_range[0x14][6]; /* not used here */ 391 u32 msi_descriptor_count; 392 u8 reserved[70]; 393 } __packed; 394 395 struct pci_create_interrupt { 396 struct pci_message message_type; 397 union win_slot_encoding wslot; 398 struct hv_msi_desc int_desc; 399 } __packed; 400 401 struct pci_create_int_response { 402 struct pci_response response; 403 u32 reserved; 404 struct tran_int_desc int_desc; 405 } __packed; 406 407 struct pci_create_interrupt2 { 408 struct pci_message message_type; 409 union win_slot_encoding wslot; 410 struct hv_msi_desc2 int_desc; 411 } __packed; 412 413 struct pci_create_interrupt3 { 414 struct pci_message message_type; 415 union win_slot_encoding wslot; 416 struct hv_msi_desc3 int_desc; 417 } __packed; 418 419 struct pci_delete_interrupt { 420 struct pci_message message_type; 421 union win_slot_encoding wslot; 422 struct tran_int_desc int_desc; 423 } __packed; 424 425 /* 426 * Note: the VM must pass a valid block id, wslot and bytes_requested. 427 */ 428 struct pci_read_block { 429 struct pci_message message_type; 430 u32 block_id; 431 union win_slot_encoding wslot; 432 u32 bytes_requested; 433 } __packed; 434 435 struct pci_read_block_response { 436 struct vmpacket_descriptor hdr; 437 u32 status; 438 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; 439 } __packed; 440 441 /* 442 * Note: the VM must pass a valid block id, wslot and byte_count. 443 */ 444 struct pci_write_block { 445 struct pci_message message_type; 446 u32 block_id; 447 union win_slot_encoding wslot; 448 u32 byte_count; 449 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; 450 } __packed; 451 452 struct pci_dev_inval_block { 453 struct pci_incoming_message incoming; 454 union win_slot_encoding wslot; 455 u64 block_mask; 456 } __packed; 457 458 struct pci_dev_incoming { 459 struct pci_incoming_message incoming; 460 union win_slot_encoding wslot; 461 } __packed; 462 463 struct pci_eject_response { 464 struct pci_message message_type; 465 union win_slot_encoding wslot; 466 u32 status; 467 } __packed; 468 469 static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K); 470 471 /* 472 * Driver specific state. 473 */ 474 475 enum hv_pcibus_state { 476 hv_pcibus_init = 0, 477 hv_pcibus_probed, 478 hv_pcibus_installed, 479 hv_pcibus_removing, 480 hv_pcibus_maximum 481 }; 482 483 struct hv_pcibus_device { 484 #ifdef CONFIG_X86 485 struct pci_sysdata sysdata; 486 #elif defined(CONFIG_ARM64) 487 struct pci_config_window sysdata; 488 #endif 489 struct pci_host_bridge *bridge; 490 struct fwnode_handle *fwnode; 491 /* Protocol version negotiated with the host */ 492 enum pci_protocol_version_t protocol_version; 493 494 struct mutex state_lock; 495 enum hv_pcibus_state state; 496 497 struct hv_device *hdev; 498 resource_size_t low_mmio_space; 499 resource_size_t high_mmio_space; 500 struct resource *mem_config; 501 struct resource *low_mmio_res; 502 struct resource *high_mmio_res; 503 struct completion *survey_event; 504 struct pci_bus *pci_bus; 505 spinlock_t config_lock; /* Avoid two threads writing index page */ 506 spinlock_t device_list_lock; /* Protect lists below */ 507 void __iomem *cfg_addr; 508 509 struct list_head children; 510 struct list_head dr_list; 511 512 struct irq_domain *irq_domain; 513 514 struct workqueue_struct *wq; 515 516 /* Highest slot of child device with resources allocated */ 517 int wslot_res_allocated; 518 bool use_calls; /* Use hypercalls to access mmio cfg space */ 519 }; 520 521 /* 522 * Tracks "Device Relations" messages from the host, which must be both 523 * processed in order and deferred so that they don't run in the context 524 * of the incoming packet callback. 525 */ 526 struct hv_dr_work { 527 struct work_struct wrk; 528 struct hv_pcibus_device *bus; 529 }; 530 531 struct hv_pcidev_description { 532 u16 v_id; /* vendor ID */ 533 u16 d_id; /* device ID */ 534 u8 rev; 535 u8 prog_intf; 536 u8 subclass; 537 u8 base_class; 538 u32 subsystem_id; 539 union win_slot_encoding win_slot; 540 u32 ser; /* serial number */ 541 u32 flags; 542 u16 virtual_numa_node; 543 }; 544 545 struct hv_dr_state { 546 struct list_head list_entry; 547 u32 device_count; 548 struct hv_pcidev_description func[] __counted_by(device_count); 549 }; 550 551 struct hv_pci_dev { 552 /* List protected by pci_rescan_remove_lock */ 553 struct list_head list_entry; 554 refcount_t refs; 555 struct pci_slot *pci_slot; 556 struct hv_pcidev_description desc; 557 bool reported_missing; 558 struct hv_pcibus_device *hbus; 559 struct work_struct wrk; 560 561 void (*block_invalidate)(void *context, u64 block_mask); 562 void *invalidate_context; 563 564 /* 565 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then 566 * read it back, for each of the BAR offsets within config space. 567 */ 568 u32 probed_bar[PCI_STD_NUM_BARS]; 569 }; 570 571 struct hv_pci_compl { 572 struct completion host_event; 573 s32 completion_status; 574 }; 575 576 static void hv_pci_onchannelcallback(void *context); 577 578 #ifdef CONFIG_X86 579 #define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED 580 #define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK 581 582 static int hv_pci_irqchip_init(void) 583 { 584 return 0; 585 } 586 587 static struct irq_domain *hv_pci_get_root_domain(void) 588 { 589 return x86_vector_domain; 590 } 591 592 static unsigned int hv_msi_get_int_vector(struct irq_data *data) 593 { 594 struct irq_cfg *cfg = irqd_cfg(data); 595 596 return cfg->vector; 597 } 598 599 #define hv_msi_prepare pci_msi_prepare 600 601 /** 602 * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current 603 * affinity. 604 * @data: Describes the IRQ 605 * 606 * Build new a destination for the MSI and make a hypercall to 607 * update the Interrupt Redirection Table. "Device Logical ID" 608 * is built out of this PCI bus's instance GUID and the function 609 * number of the device. 610 */ 611 static void hv_arch_irq_unmask(struct irq_data *data) 612 { 613 struct msi_desc *msi_desc = irq_data_get_msi_desc(data); 614 struct hv_retarget_device_interrupt *params; 615 struct tran_int_desc *int_desc; 616 struct hv_pcibus_device *hbus; 617 const struct cpumask *dest; 618 cpumask_var_t tmp; 619 struct pci_bus *pbus; 620 struct pci_dev *pdev; 621 unsigned long flags; 622 u32 var_size = 0; 623 int cpu, nr_bank; 624 u64 res; 625 626 dest = irq_data_get_effective_affinity_mask(data); 627 pdev = msi_desc_to_pci_dev(msi_desc); 628 pbus = pdev->bus; 629 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); 630 int_desc = data->chip_data; 631 if (!int_desc) { 632 dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n", 633 __func__, data->irq); 634 return; 635 } 636 637 local_irq_save(flags); 638 639 params = *this_cpu_ptr(hyperv_pcpu_input_arg); 640 memset(params, 0, sizeof(*params)); 641 params->partition_id = HV_PARTITION_ID_SELF; 642 params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; 643 params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff; 644 params->int_entry.msi_entry.data.as_uint32 = int_desc->data; 645 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | 646 (hbus->hdev->dev_instance.b[4] << 16) | 647 (hbus->hdev->dev_instance.b[7] << 8) | 648 (hbus->hdev->dev_instance.b[6] & 0xf8) | 649 PCI_FUNC(pdev->devfn); 650 params->int_target.vector = hv_msi_get_int_vector(data); 651 652 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { 653 /* 654 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the 655 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides 656 * with >64 VP support. 657 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED 658 * is not sufficient for this hypercall. 659 */ 660 params->int_target.flags |= 661 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; 662 663 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { 664 res = 1; 665 goto out; 666 } 667 668 cpumask_and(tmp, dest, cpu_online_mask); 669 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); 670 free_cpumask_var(tmp); 671 672 if (nr_bank <= 0) { 673 res = 1; 674 goto out; 675 } 676 677 /* 678 * var-sized hypercall, var-size starts after vp_mask (thus 679 * vp_set.format does not count, but vp_set.valid_bank_mask 680 * does). 681 */ 682 var_size = 1 + nr_bank; 683 } else { 684 for_each_cpu_and(cpu, dest, cpu_online_mask) { 685 params->int_target.vp_mask |= 686 (1ULL << hv_cpu_number_to_vp_number(cpu)); 687 } 688 } 689 690 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), 691 params, NULL); 692 693 out: 694 local_irq_restore(flags); 695 696 /* 697 * During hibernation, when a CPU is offlined, the kernel tries 698 * to move the interrupt to the remaining CPUs that haven't 699 * been offlined yet. In this case, the below hv_do_hypercall() 700 * always fails since the vmbus channel has been closed: 701 * refer to cpu_disable_common() -> fixup_irqs() -> 702 * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). 703 * 704 * Suppress the error message for hibernation because the failure 705 * during hibernation does not matter (at this time all the devices 706 * have been frozen). Note: the correct affinity info is still updated 707 * into the irqdata data structure in migrate_one_irq() -> 708 * irq_do_set_affinity(), so later when the VM resumes, 709 * hv_pci_restore_msi_state() is able to correctly restore the 710 * interrupt with the correct affinity. 711 */ 712 if (!hv_result_success(res) && hbus->state != hv_pcibus_removing) 713 dev_err(&hbus->hdev->device, 714 "%s() failed: %#llx", __func__, res); 715 } 716 #elif defined(CONFIG_ARM64) 717 /* 718 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit 719 * of room at the start to allow for SPIs to be specified through ACPI and 720 * starting with a power of two to satisfy power of 2 multi-MSI requirement. 721 */ 722 #define HV_PCI_MSI_SPI_START 64 723 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) 724 #define DELIVERY_MODE 0 725 #define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI 726 #define hv_msi_prepare NULL 727 728 struct hv_pci_chip_data { 729 DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR); 730 struct mutex map_lock; 731 }; 732 733 /* Hyper-V vPCI MSI GIC IRQ domain */ 734 static struct irq_domain *hv_msi_gic_irq_domain; 735 736 /* Hyper-V PCI MSI IRQ chip */ 737 static struct irq_chip hv_arm64_msi_irq_chip = { 738 .name = "MSI", 739 .irq_set_affinity = irq_chip_set_affinity_parent, 740 .irq_eoi = irq_chip_eoi_parent, 741 .irq_mask = irq_chip_mask_parent, 742 .irq_unmask = irq_chip_unmask_parent 743 }; 744 745 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) 746 { 747 return irqd->parent_data->hwirq; 748 } 749 750 /* 751 * @nr_bm_irqs: Indicates the number of IRQs that were allocated from 752 * the bitmap. 753 * @nr_dom_irqs: Indicates the number of IRQs that were allocated from 754 * the parent domain. 755 */ 756 static void hv_pci_vec_irq_free(struct irq_domain *domain, 757 unsigned int virq, 758 unsigned int nr_bm_irqs, 759 unsigned int nr_dom_irqs) 760 { 761 struct hv_pci_chip_data *chip_data = domain->host_data; 762 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 763 int first = d->hwirq - HV_PCI_MSI_SPI_START; 764 int i; 765 766 mutex_lock(&chip_data->map_lock); 767 bitmap_release_region(chip_data->spi_map, 768 first, 769 get_count_order(nr_bm_irqs)); 770 mutex_unlock(&chip_data->map_lock); 771 for (i = 0; i < nr_dom_irqs; i++) { 772 if (i) 773 d = irq_domain_get_irq_data(domain, virq + i); 774 irq_domain_reset_irq_data(d); 775 } 776 777 irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs); 778 } 779 780 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain, 781 unsigned int virq, 782 unsigned int nr_irqs) 783 { 784 hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs); 785 } 786 787 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain, 788 unsigned int nr_irqs, 789 irq_hw_number_t *hwirq) 790 { 791 struct hv_pci_chip_data *chip_data = domain->host_data; 792 int index; 793 794 /* Find and allocate region from the SPI bitmap */ 795 mutex_lock(&chip_data->map_lock); 796 index = bitmap_find_free_region(chip_data->spi_map, 797 HV_PCI_MSI_SPI_NR, 798 get_count_order(nr_irqs)); 799 mutex_unlock(&chip_data->map_lock); 800 if (index < 0) 801 return -ENOSPC; 802 803 *hwirq = index + HV_PCI_MSI_SPI_START; 804 805 return 0; 806 } 807 808 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, 809 unsigned int virq, 810 irq_hw_number_t hwirq) 811 { 812 struct irq_fwspec fwspec; 813 struct irq_data *d; 814 int ret; 815 816 fwspec.fwnode = domain->parent->fwnode; 817 if (is_of_node(fwspec.fwnode)) { 818 /* SPI lines for OF translations start at offset 32 */ 819 fwspec.param_count = 3; 820 fwspec.param[0] = 0; 821 fwspec.param[1] = hwirq - 32; 822 fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 823 } else { 824 fwspec.param_count = 2; 825 fwspec.param[0] = hwirq; 826 fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 827 } 828 829 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 830 if (ret) 831 return ret; 832 833 /* 834 * Since the interrupt specifier is not coming from ACPI or DT, the 835 * trigger type will need to be set explicitly. Otherwise, it will be 836 * set to whatever is in the GIC configuration. 837 */ 838 d = irq_domain_get_irq_data(domain->parent, virq); 839 840 return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); 841 } 842 843 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain, 844 unsigned int virq, unsigned int nr_irqs, 845 void *args) 846 { 847 irq_hw_number_t hwirq; 848 unsigned int i; 849 int ret; 850 851 ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); 852 if (ret) 853 return ret; 854 855 for (i = 0; i < nr_irqs; i++) { 856 ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i, 857 hwirq + i); 858 if (ret) { 859 hv_pci_vec_irq_free(domain, virq, nr_irqs, i); 860 return ret; 861 } 862 863 irq_domain_set_hwirq_and_chip(domain, virq + i, 864 hwirq + i, 865 &hv_arm64_msi_irq_chip, 866 domain->host_data); 867 pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i); 868 } 869 870 return 0; 871 } 872 873 /* 874 * Pick the first cpu as the irq affinity that can be temporarily used for 875 * composing MSI from the hypervisor. GIC will eventually set the right 876 * affinity for the irq and the 'unmask' will retarget the interrupt to that 877 * cpu. 878 */ 879 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain, 880 struct irq_data *irqd, bool reserve) 881 { 882 int cpu = cpumask_first(cpu_present_mask); 883 884 irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); 885 886 return 0; 887 } 888 889 static const struct irq_domain_ops hv_pci_domain_ops = { 890 .alloc = hv_pci_vec_irq_domain_alloc, 891 .free = hv_pci_vec_irq_domain_free, 892 .activate = hv_pci_vec_irq_domain_activate, 893 }; 894 895 #ifdef CONFIG_OF 896 897 static struct irq_domain *hv_pci_of_irq_domain_parent(void) 898 { 899 struct device_node *parent; 900 struct irq_domain *domain; 901 902 parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node); 903 if (!parent) 904 return NULL; 905 domain = irq_find_host(parent); 906 of_node_put(parent); 907 908 return domain; 909 } 910 911 #endif 912 913 #ifdef CONFIG_ACPI 914 915 static struct irq_domain *hv_pci_acpi_irq_domain_parent(void) 916 { 917 acpi_gsi_domain_disp_fn gsi_domain_disp_fn; 918 919 gsi_domain_disp_fn = acpi_get_gsi_dispatcher(); 920 if (!gsi_domain_disp_fn) 921 return NULL; 922 return irq_find_matching_fwnode(gsi_domain_disp_fn(0), 923 DOMAIN_BUS_ANY); 924 } 925 926 #endif 927 928 static int hv_pci_irqchip_init(void) 929 { 930 static struct hv_pci_chip_data *chip_data; 931 struct fwnode_handle *fn = NULL; 932 struct irq_domain *irq_domain_parent = NULL; 933 int ret = -ENOMEM; 934 935 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); 936 if (!chip_data) 937 return ret; 938 939 mutex_init(&chip_data->map_lock); 940 fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); 941 if (!fn) 942 goto free_chip; 943 944 /* 945 * IRQ domain once enabled, should not be removed since there is no 946 * way to ensure that all the corresponding devices are also gone and 947 * no interrupts will be generated. 948 */ 949 #ifdef CONFIG_ACPI 950 if (!acpi_disabled) 951 irq_domain_parent = hv_pci_acpi_irq_domain_parent(); 952 #endif 953 #ifdef CONFIG_OF 954 if (!irq_domain_parent) 955 irq_domain_parent = hv_pci_of_irq_domain_parent(); 956 #endif 957 if (!irq_domain_parent) { 958 WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n"); 959 ret = -EINVAL; 960 goto free_chip; 961 } 962 963 hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0, 964 HV_PCI_MSI_SPI_NR, 965 fn, &hv_pci_domain_ops, 966 chip_data); 967 968 if (!hv_msi_gic_irq_domain) { 969 pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); 970 goto free_chip; 971 } 972 973 return 0; 974 975 free_chip: 976 kfree(chip_data); 977 if (fn) 978 irq_domain_free_fwnode(fn); 979 980 return ret; 981 } 982 983 static struct irq_domain *hv_pci_get_root_domain(void) 984 { 985 return hv_msi_gic_irq_domain; 986 } 987 988 /* 989 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD 990 * registers which Hyper-V already supports, so no hypercall needed. 991 */ 992 static void hv_arch_irq_unmask(struct irq_data *data) { } 993 #endif /* CONFIG_ARM64 */ 994 995 /** 996 * hv_pci_generic_compl() - Invoked for a completion packet 997 * @context: Set up by the sender of the packet. 998 * @resp: The response packet 999 * @resp_packet_size: Size in bytes of the packet 1000 * 1001 * This function is used to trigger an event and report status 1002 * for any message for which the completion packet contains a 1003 * status and nothing else. 1004 */ 1005 static void hv_pci_generic_compl(void *context, struct pci_response *resp, 1006 int resp_packet_size) 1007 { 1008 struct hv_pci_compl *comp_pkt = context; 1009 1010 comp_pkt->completion_status = resp->status; 1011 complete(&comp_pkt->host_event); 1012 } 1013 1014 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, 1015 u32 wslot); 1016 1017 static void get_pcichild(struct hv_pci_dev *hpdev) 1018 { 1019 refcount_inc(&hpdev->refs); 1020 } 1021 1022 static void put_pcichild(struct hv_pci_dev *hpdev) 1023 { 1024 if (refcount_dec_and_test(&hpdev->refs)) 1025 kfree(hpdev); 1026 } 1027 1028 /* 1029 * There is no good way to get notified from vmbus_onoffer_rescind(), 1030 * so let's use polling here, since this is not a hot path. 1031 */ 1032 static int wait_for_response(struct hv_device *hdev, 1033 struct completion *comp) 1034 { 1035 while (true) { 1036 if (hdev->channel->rescind) { 1037 dev_warn_once(&hdev->device, "The device is gone.\n"); 1038 return -ENODEV; 1039 } 1040 1041 if (wait_for_completion_timeout(comp, HZ / 10)) 1042 break; 1043 } 1044 1045 return 0; 1046 } 1047 1048 /** 1049 * devfn_to_wslot() - Convert from Linux PCI slot to Windows 1050 * @devfn: The Linux representation of PCI slot 1051 * 1052 * Windows uses a slightly different representation of PCI slot. 1053 * 1054 * Return: The Windows representation 1055 */ 1056 static u32 devfn_to_wslot(int devfn) 1057 { 1058 union win_slot_encoding wslot; 1059 1060 wslot.slot = 0; 1061 wslot.bits.dev = PCI_SLOT(devfn); 1062 wslot.bits.func = PCI_FUNC(devfn); 1063 1064 return wslot.slot; 1065 } 1066 1067 /** 1068 * wslot_to_devfn() - Convert from Windows PCI slot to Linux 1069 * @wslot: The Windows representation of PCI slot 1070 * 1071 * Windows uses a slightly different representation of PCI slot. 1072 * 1073 * Return: The Linux representation 1074 */ 1075 static int wslot_to_devfn(u32 wslot) 1076 { 1077 union win_slot_encoding slot_no; 1078 1079 slot_no.slot = wslot; 1080 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func); 1081 } 1082 1083 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val) 1084 { 1085 struct hv_mmio_read_input *in; 1086 struct hv_mmio_read_output *out; 1087 u64 ret; 1088 1089 /* 1090 * Must be called with interrupts disabled so it is safe 1091 * to use the per-cpu input argument page. Use it for 1092 * both input and output. 1093 */ 1094 in = *this_cpu_ptr(hyperv_pcpu_input_arg); 1095 out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in); 1096 in->gpa = gpa; 1097 in->size = size; 1098 1099 ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out); 1100 if (hv_result_success(ret)) { 1101 switch (size) { 1102 case 1: 1103 *val = *(u8 *)(out->data); 1104 break; 1105 case 2: 1106 *val = *(u16 *)(out->data); 1107 break; 1108 default: 1109 *val = *(u32 *)(out->data); 1110 break; 1111 } 1112 } else 1113 dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n", 1114 ret, gpa, size); 1115 } 1116 1117 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val) 1118 { 1119 struct hv_mmio_write_input *in; 1120 u64 ret; 1121 1122 /* 1123 * Must be called with interrupts disabled so it is safe 1124 * to use the per-cpu input argument memory. 1125 */ 1126 in = *this_cpu_ptr(hyperv_pcpu_input_arg); 1127 in->gpa = gpa; 1128 in->size = size; 1129 switch (size) { 1130 case 1: 1131 *(u8 *)(in->data) = val; 1132 break; 1133 case 2: 1134 *(u16 *)(in->data) = val; 1135 break; 1136 default: 1137 *(u32 *)(in->data) = val; 1138 break; 1139 } 1140 1141 ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL); 1142 if (!hv_result_success(ret)) 1143 dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n", 1144 ret, gpa, size); 1145 } 1146 1147 /* 1148 * PCI Configuration Space for these root PCI buses is implemented as a pair 1149 * of pages in memory-mapped I/O space. Writing to the first page chooses 1150 * the PCI function being written or read. Once the first page has been 1151 * written to, the following page maps in the entire configuration space of 1152 * the function. 1153 */ 1154 1155 /** 1156 * _hv_pcifront_read_config() - Internal PCI config read 1157 * @hpdev: The PCI driver's representation of the device 1158 * @where: Offset within config space 1159 * @size: Size of the transfer 1160 * @val: Pointer to the buffer receiving the data 1161 */ 1162 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, 1163 int size, u32 *val) 1164 { 1165 struct hv_pcibus_device *hbus = hpdev->hbus; 1166 struct device *dev = &hbus->hdev->device; 1167 int offset = where + CFG_PAGE_OFFSET; 1168 unsigned long flags; 1169 1170 /* 1171 * If the attempt is to read the IDs or the ROM BAR, simulate that. 1172 */ 1173 if (where + size <= PCI_COMMAND) { 1174 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size); 1175 } else if (where >= PCI_CLASS_REVISION && where + size <= 1176 PCI_CACHE_LINE_SIZE) { 1177 memcpy(val, ((u8 *)&hpdev->desc.rev) + where - 1178 PCI_CLASS_REVISION, size); 1179 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <= 1180 PCI_ROM_ADDRESS) { 1181 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where - 1182 PCI_SUBSYSTEM_VENDOR_ID, size); 1183 } else if (where >= PCI_ROM_ADDRESS && where + size <= 1184 PCI_CAPABILITY_LIST) { 1185 /* ROM BARs are unimplemented */ 1186 *val = 0; 1187 } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) || 1188 (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) { 1189 /* 1190 * Interrupt Line and Interrupt PIN are hard-wired to zero 1191 * because this front-end only supports message-signaled 1192 * interrupts. 1193 */ 1194 *val = 0; 1195 } else if (where + size <= CFG_PAGE_SIZE) { 1196 1197 spin_lock_irqsave(&hbus->config_lock, flags); 1198 if (hbus->use_calls) { 1199 phys_addr_t addr = hbus->mem_config->start + offset; 1200 1201 hv_pci_write_mmio(dev, hbus->mem_config->start, 4, 1202 hpdev->desc.win_slot.slot); 1203 hv_pci_read_mmio(dev, addr, size, val); 1204 } else { 1205 void __iomem *addr = hbus->cfg_addr + offset; 1206 1207 /* Choose the function to be read. (See comment above) */ 1208 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); 1209 /* Make sure the function was chosen before reading. */ 1210 mb(); 1211 /* Read from that function's config space. */ 1212 switch (size) { 1213 case 1: 1214 *val = readb(addr); 1215 break; 1216 case 2: 1217 *val = readw(addr); 1218 break; 1219 default: 1220 *val = readl(addr); 1221 break; 1222 } 1223 /* 1224 * Make sure the read was done before we release the 1225 * spinlock allowing consecutive reads/writes. 1226 */ 1227 mb(); 1228 } 1229 spin_unlock_irqrestore(&hbus->config_lock, flags); 1230 } else { 1231 dev_err(dev, "Attempt to read beyond a function's config space.\n"); 1232 } 1233 } 1234 1235 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) 1236 { 1237 struct hv_pcibus_device *hbus = hpdev->hbus; 1238 struct device *dev = &hbus->hdev->device; 1239 u32 val; 1240 u16 ret; 1241 unsigned long flags; 1242 1243 spin_lock_irqsave(&hbus->config_lock, flags); 1244 1245 if (hbus->use_calls) { 1246 phys_addr_t addr = hbus->mem_config->start + 1247 CFG_PAGE_OFFSET + PCI_VENDOR_ID; 1248 1249 hv_pci_write_mmio(dev, hbus->mem_config->start, 4, 1250 hpdev->desc.win_slot.slot); 1251 hv_pci_read_mmio(dev, addr, 2, &val); 1252 ret = val; /* Truncates to 16 bits */ 1253 } else { 1254 void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET + 1255 PCI_VENDOR_ID; 1256 /* Choose the function to be read. (See comment above) */ 1257 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); 1258 /* Make sure the function was chosen before we start reading. */ 1259 mb(); 1260 /* Read from that function's config space. */ 1261 ret = readw(addr); 1262 /* 1263 * mb() is not required here, because the 1264 * spin_unlock_irqrestore() is a barrier. 1265 */ 1266 } 1267 1268 spin_unlock_irqrestore(&hbus->config_lock, flags); 1269 1270 return ret; 1271 } 1272 1273 /** 1274 * _hv_pcifront_write_config() - Internal PCI config write 1275 * @hpdev: The PCI driver's representation of the device 1276 * @where: Offset within config space 1277 * @size: Size of the transfer 1278 * @val: The data being transferred 1279 */ 1280 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, 1281 int size, u32 val) 1282 { 1283 struct hv_pcibus_device *hbus = hpdev->hbus; 1284 struct device *dev = &hbus->hdev->device; 1285 int offset = where + CFG_PAGE_OFFSET; 1286 unsigned long flags; 1287 1288 if (where >= PCI_SUBSYSTEM_VENDOR_ID && 1289 where + size <= PCI_CAPABILITY_LIST) { 1290 /* SSIDs and ROM BARs are read-only */ 1291 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) { 1292 spin_lock_irqsave(&hbus->config_lock, flags); 1293 1294 if (hbus->use_calls) { 1295 phys_addr_t addr = hbus->mem_config->start + offset; 1296 1297 hv_pci_write_mmio(dev, hbus->mem_config->start, 4, 1298 hpdev->desc.win_slot.slot); 1299 hv_pci_write_mmio(dev, addr, size, val); 1300 } else { 1301 void __iomem *addr = hbus->cfg_addr + offset; 1302 1303 /* Choose the function to write. (See comment above) */ 1304 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); 1305 /* Make sure the function was chosen before writing. */ 1306 wmb(); 1307 /* Write to that function's config space. */ 1308 switch (size) { 1309 case 1: 1310 writeb(val, addr); 1311 break; 1312 case 2: 1313 writew(val, addr); 1314 break; 1315 default: 1316 writel(val, addr); 1317 break; 1318 } 1319 /* 1320 * Make sure the write was done before we release the 1321 * spinlock allowing consecutive reads/writes. 1322 */ 1323 mb(); 1324 } 1325 spin_unlock_irqrestore(&hbus->config_lock, flags); 1326 } else { 1327 dev_err(dev, "Attempt to write beyond a function's config space.\n"); 1328 } 1329 } 1330 1331 /** 1332 * hv_pcifront_read_config() - Read configuration space 1333 * @bus: PCI Bus structure 1334 * @devfn: Device/function 1335 * @where: Offset from base 1336 * @size: Byte/word/dword 1337 * @val: Value to be read 1338 * 1339 * Return: PCIBIOS_SUCCESSFUL on success 1340 * PCIBIOS_DEVICE_NOT_FOUND on failure 1341 */ 1342 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn, 1343 int where, int size, u32 *val) 1344 { 1345 struct hv_pcibus_device *hbus = 1346 container_of(bus->sysdata, struct hv_pcibus_device, sysdata); 1347 struct hv_pci_dev *hpdev; 1348 1349 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); 1350 if (!hpdev) 1351 return PCIBIOS_DEVICE_NOT_FOUND; 1352 1353 _hv_pcifront_read_config(hpdev, where, size, val); 1354 1355 put_pcichild(hpdev); 1356 return PCIBIOS_SUCCESSFUL; 1357 } 1358 1359 /** 1360 * hv_pcifront_write_config() - Write configuration space 1361 * @bus: PCI Bus structure 1362 * @devfn: Device/function 1363 * @where: Offset from base 1364 * @size: Byte/word/dword 1365 * @val: Value to be written to device 1366 * 1367 * Return: PCIBIOS_SUCCESSFUL on success 1368 * PCIBIOS_DEVICE_NOT_FOUND on failure 1369 */ 1370 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn, 1371 int where, int size, u32 val) 1372 { 1373 struct hv_pcibus_device *hbus = 1374 container_of(bus->sysdata, struct hv_pcibus_device, sysdata); 1375 struct hv_pci_dev *hpdev; 1376 1377 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); 1378 if (!hpdev) 1379 return PCIBIOS_DEVICE_NOT_FOUND; 1380 1381 _hv_pcifront_write_config(hpdev, where, size, val); 1382 1383 put_pcichild(hpdev); 1384 return PCIBIOS_SUCCESSFUL; 1385 } 1386 1387 /* PCIe operations */ 1388 static struct pci_ops hv_pcifront_ops = { 1389 .read = hv_pcifront_read_config, 1390 .write = hv_pcifront_write_config, 1391 }; 1392 1393 /* 1394 * Paravirtual backchannel 1395 * 1396 * Hyper-V SR-IOV provides a backchannel mechanism in software for 1397 * communication between a VF driver and a PF driver. These 1398 * "configuration blocks" are similar in concept to PCI configuration space, 1399 * but instead of doing reads and writes in 32-bit chunks through a very slow 1400 * path, packets of up to 128 bytes can be sent or received asynchronously. 1401 * 1402 * Nearly every SR-IOV device contains just such a communications channel in 1403 * hardware, so using this one in software is usually optional. Using the 1404 * software channel, however, allows driver implementers to leverage software 1405 * tools that fuzz the communications channel looking for vulnerabilities. 1406 * 1407 * The usage model for these packets puts the responsibility for reading or 1408 * writing on the VF driver. The VF driver sends a read or a write packet, 1409 * indicating which "block" is being referred to by number. 1410 * 1411 * If the PF driver wishes to initiate communication, it can "invalidate" one or 1412 * more of the first 64 blocks. This invalidation is delivered via a callback 1413 * supplied to the VF driver by this driver. 1414 * 1415 * No protocol is implied, except that supplied by the PF and VF drivers. 1416 */ 1417 1418 struct hv_read_config_compl { 1419 struct hv_pci_compl comp_pkt; 1420 void *buf; 1421 unsigned int len; 1422 unsigned int bytes_returned; 1423 }; 1424 1425 /** 1426 * hv_pci_read_config_compl() - Invoked when a response packet 1427 * for a read config block operation arrives. 1428 * @context: Identifies the read config operation 1429 * @resp: The response packet itself 1430 * @resp_packet_size: Size in bytes of the response packet 1431 */ 1432 static void hv_pci_read_config_compl(void *context, struct pci_response *resp, 1433 int resp_packet_size) 1434 { 1435 struct hv_read_config_compl *comp = context; 1436 struct pci_read_block_response *read_resp = 1437 (struct pci_read_block_response *)resp; 1438 unsigned int data_len, hdr_len; 1439 1440 hdr_len = offsetof(struct pci_read_block_response, bytes); 1441 if (resp_packet_size < hdr_len) { 1442 comp->comp_pkt.completion_status = -1; 1443 goto out; 1444 } 1445 1446 data_len = resp_packet_size - hdr_len; 1447 if (data_len > 0 && read_resp->status == 0) { 1448 comp->bytes_returned = min(comp->len, data_len); 1449 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned); 1450 } else { 1451 comp->bytes_returned = 0; 1452 } 1453 1454 comp->comp_pkt.completion_status = read_resp->status; 1455 out: 1456 complete(&comp->comp_pkt.host_event); 1457 } 1458 1459 /** 1460 * hv_read_config_block() - Sends a read config block request to 1461 * the back-end driver running in the Hyper-V parent partition. 1462 * @pdev: The PCI driver's representation for this device. 1463 * @buf: Buffer into which the config block will be copied. 1464 * @len: Size in bytes of buf. 1465 * @block_id: Identifies the config block which has been requested. 1466 * @bytes_returned: Size which came back from the back-end driver. 1467 * 1468 * Return: 0 on success, -errno on failure 1469 */ 1470 static int hv_read_config_block(struct pci_dev *pdev, void *buf, 1471 unsigned int len, unsigned int block_id, 1472 unsigned int *bytes_returned) 1473 { 1474 struct hv_pcibus_device *hbus = 1475 container_of(pdev->bus->sysdata, struct hv_pcibus_device, 1476 sysdata); 1477 struct { 1478 struct pci_packet pkt; 1479 char buf[sizeof(struct pci_read_block)]; 1480 } pkt; 1481 struct hv_read_config_compl comp_pkt; 1482 struct pci_read_block *read_blk; 1483 int ret; 1484 1485 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) 1486 return -EINVAL; 1487 1488 init_completion(&comp_pkt.comp_pkt.host_event); 1489 comp_pkt.buf = buf; 1490 comp_pkt.len = len; 1491 1492 memset(&pkt, 0, sizeof(pkt)); 1493 pkt.pkt.completion_func = hv_pci_read_config_compl; 1494 pkt.pkt.compl_ctxt = &comp_pkt; 1495 read_blk = (struct pci_read_block *)pkt.buf; 1496 read_blk->message_type.type = PCI_READ_BLOCK; 1497 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn); 1498 read_blk->block_id = block_id; 1499 read_blk->bytes_requested = len; 1500 1501 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk, 1502 sizeof(*read_blk), (unsigned long)&pkt.pkt, 1503 VM_PKT_DATA_INBAND, 1504 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1505 if (ret) 1506 return ret; 1507 1508 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event); 1509 if (ret) 1510 return ret; 1511 1512 if (comp_pkt.comp_pkt.completion_status != 0 || 1513 comp_pkt.bytes_returned == 0) { 1514 dev_err(&hbus->hdev->device, 1515 "Read Config Block failed: 0x%x, bytes_returned=%d\n", 1516 comp_pkt.comp_pkt.completion_status, 1517 comp_pkt.bytes_returned); 1518 return -EIO; 1519 } 1520 1521 *bytes_returned = comp_pkt.bytes_returned; 1522 return 0; 1523 } 1524 1525 /** 1526 * hv_pci_write_config_compl() - Invoked when a response packet for a write 1527 * config block operation arrives. 1528 * @context: Identifies the write config operation 1529 * @resp: The response packet itself 1530 * @resp_packet_size: Size in bytes of the response packet 1531 */ 1532 static void hv_pci_write_config_compl(void *context, struct pci_response *resp, 1533 int resp_packet_size) 1534 { 1535 struct hv_pci_compl *comp_pkt = context; 1536 1537 comp_pkt->completion_status = resp->status; 1538 complete(&comp_pkt->host_event); 1539 } 1540 1541 /** 1542 * hv_write_config_block() - Sends a write config block request to the 1543 * back-end driver running in the Hyper-V parent partition. 1544 * @pdev: The PCI driver's representation for this device. 1545 * @buf: Buffer from which the config block will be copied. 1546 * @len: Size in bytes of buf. 1547 * @block_id: Identifies the config block which is being written. 1548 * 1549 * Return: 0 on success, -errno on failure 1550 */ 1551 static int hv_write_config_block(struct pci_dev *pdev, void *buf, 1552 unsigned int len, unsigned int block_id) 1553 { 1554 struct hv_pcibus_device *hbus = 1555 container_of(pdev->bus->sysdata, struct hv_pcibus_device, 1556 sysdata); 1557 struct { 1558 struct pci_packet pkt; 1559 char buf[sizeof(struct pci_write_block)]; 1560 u32 reserved; 1561 } pkt; 1562 struct hv_pci_compl comp_pkt; 1563 struct pci_write_block *write_blk; 1564 u32 pkt_size; 1565 int ret; 1566 1567 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) 1568 return -EINVAL; 1569 1570 init_completion(&comp_pkt.host_event); 1571 1572 memset(&pkt, 0, sizeof(pkt)); 1573 pkt.pkt.completion_func = hv_pci_write_config_compl; 1574 pkt.pkt.compl_ctxt = &comp_pkt; 1575 write_blk = (struct pci_write_block *)pkt.buf; 1576 write_blk->message_type.type = PCI_WRITE_BLOCK; 1577 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn); 1578 write_blk->block_id = block_id; 1579 write_blk->byte_count = len; 1580 memcpy(write_blk->bytes, buf, len); 1581 pkt_size = offsetof(struct pci_write_block, bytes) + len; 1582 /* 1583 * This quirk is required on some hosts shipped around 2018, because 1584 * these hosts don't check the pkt_size correctly (new hosts have been 1585 * fixed since early 2019). The quirk is also safe on very old hosts 1586 * and new hosts, because, on them, what really matters is the length 1587 * specified in write_blk->byte_count. 1588 */ 1589 pkt_size += sizeof(pkt.reserved); 1590 1591 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size, 1592 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND, 1593 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1594 if (ret) 1595 return ret; 1596 1597 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event); 1598 if (ret) 1599 return ret; 1600 1601 if (comp_pkt.completion_status != 0) { 1602 dev_err(&hbus->hdev->device, 1603 "Write Config Block failed: 0x%x\n", 1604 comp_pkt.completion_status); 1605 return -EIO; 1606 } 1607 1608 return 0; 1609 } 1610 1611 /** 1612 * hv_register_block_invalidate() - Invoked when a config block invalidation 1613 * arrives from the back-end driver. 1614 * @pdev: The PCI driver's representation for this device. 1615 * @context: Identifies the device. 1616 * @block_invalidate: Identifies all of the blocks being invalidated. 1617 * 1618 * Return: 0 on success, -errno on failure 1619 */ 1620 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context, 1621 void (*block_invalidate)(void *context, 1622 u64 block_mask)) 1623 { 1624 struct hv_pcibus_device *hbus = 1625 container_of(pdev->bus->sysdata, struct hv_pcibus_device, 1626 sysdata); 1627 struct hv_pci_dev *hpdev; 1628 1629 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); 1630 if (!hpdev) 1631 return -ENODEV; 1632 1633 hpdev->block_invalidate = block_invalidate; 1634 hpdev->invalidate_context = context; 1635 1636 put_pcichild(hpdev); 1637 return 0; 1638 1639 } 1640 1641 /* Interrupt management hooks */ 1642 static void hv_int_desc_free(struct hv_pci_dev *hpdev, 1643 struct tran_int_desc *int_desc) 1644 { 1645 struct pci_delete_interrupt *int_pkt; 1646 struct { 1647 struct pci_packet pkt; 1648 u8 buffer[sizeof(struct pci_delete_interrupt)]; 1649 } ctxt; 1650 1651 if (!int_desc->vector_count) { 1652 kfree(int_desc); 1653 return; 1654 } 1655 memset(&ctxt, 0, sizeof(ctxt)); 1656 int_pkt = (struct pci_delete_interrupt *)ctxt.buffer; 1657 int_pkt->message_type.type = 1658 PCI_DELETE_INTERRUPT_MESSAGE; 1659 int_pkt->wslot.slot = hpdev->desc.win_slot.slot; 1660 int_pkt->int_desc = *int_desc; 1661 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), 1662 0, VM_PKT_DATA_INBAND, 0); 1663 kfree(int_desc); 1664 } 1665 1666 /** 1667 * hv_msi_free() - Free the MSI. 1668 * @domain: The interrupt domain pointer 1669 * @info: Extra MSI-related context 1670 * @irq: Identifies the IRQ. 1671 * 1672 * The Hyper-V parent partition and hypervisor are tracking the 1673 * messages that are in use, keeping the interrupt redirection 1674 * table up to date. This callback sends a message that frees 1675 * the IRT entry and related tracking nonsense. 1676 */ 1677 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, 1678 unsigned int irq) 1679 { 1680 struct hv_pcibus_device *hbus; 1681 struct hv_pci_dev *hpdev; 1682 struct pci_dev *pdev; 1683 struct tran_int_desc *int_desc; 1684 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq); 1685 struct msi_desc *msi = irq_data_get_msi_desc(irq_data); 1686 1687 pdev = msi_desc_to_pci_dev(msi); 1688 hbus = domain->host_data; 1689 int_desc = irq_data_get_irq_chip_data(irq_data); 1690 if (!int_desc) 1691 return; 1692 1693 irq_data->chip_data = NULL; 1694 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); 1695 if (!hpdev) { 1696 kfree(int_desc); 1697 return; 1698 } 1699 1700 hv_int_desc_free(hpdev, int_desc); 1701 put_pcichild(hpdev); 1702 } 1703 1704 static void hv_irq_mask(struct irq_data *data) 1705 { 1706 if (data->parent_data->chip->irq_mask) 1707 irq_chip_mask_parent(data); 1708 } 1709 1710 static void hv_irq_unmask(struct irq_data *data) 1711 { 1712 hv_arch_irq_unmask(data); 1713 1714 if (data->parent_data->chip->irq_unmask) 1715 irq_chip_unmask_parent(data); 1716 } 1717 1718 struct compose_comp_ctxt { 1719 struct hv_pci_compl comp_pkt; 1720 struct tran_int_desc int_desc; 1721 }; 1722 1723 static void hv_pci_compose_compl(void *context, struct pci_response *resp, 1724 int resp_packet_size) 1725 { 1726 struct compose_comp_ctxt *comp_pkt = context; 1727 struct pci_create_int_response *int_resp = 1728 (struct pci_create_int_response *)resp; 1729 1730 if (resp_packet_size < sizeof(*int_resp)) { 1731 comp_pkt->comp_pkt.completion_status = -1; 1732 goto out; 1733 } 1734 comp_pkt->comp_pkt.completion_status = resp->status; 1735 comp_pkt->int_desc = int_resp->int_desc; 1736 out: 1737 complete(&comp_pkt->comp_pkt.host_event); 1738 } 1739 1740 static u32 hv_compose_msi_req_v1( 1741 struct pci_create_interrupt *int_pkt, 1742 u32 slot, u8 vector, u16 vector_count) 1743 { 1744 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; 1745 int_pkt->wslot.slot = slot; 1746 int_pkt->int_desc.vector = vector; 1747 int_pkt->int_desc.vector_count = vector_count; 1748 int_pkt->int_desc.delivery_mode = DELIVERY_MODE; 1749 1750 /* 1751 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in 1752 * hv_irq_unmask(). 1753 */ 1754 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; 1755 1756 return sizeof(*int_pkt); 1757 } 1758 1759 /* 1760 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and 1761 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be 1762 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V 1763 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is 1764 * not irrelevant because Hyper-V chooses the physical CPU to handle the 1765 * interrupts based on the vCPU specified in message sent to the vPCI VSP in 1766 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest, 1767 * but assigning too many vPCI device interrupts to the same pCPU can cause a 1768 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V 1769 * to spread out the pCPUs that it selects. 1770 * 1771 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu() 1772 * to always return the same dummy vCPU, because a second call to 1773 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a 1774 * new pCPU for the interrupt. But for the multi-MSI case, the second call to 1775 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the 1776 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that 1777 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using 1778 * the same pCPU, even though the vCPUs will be spread out by later calls 1779 * to hv_irq_unmask(), but that is the best we can do now. 1780 * 1781 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not* 1782 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an 1783 * enhancement is planned for a future version. With that enhancement, the 1784 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI 1785 * device will be spread across multiple pCPUs. 1786 */ 1787 1788 /* 1789 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten 1790 * by subsequent retarget in hv_irq_unmask(). 1791 */ 1792 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) 1793 { 1794 return cpumask_first_and(affinity, cpu_online_mask); 1795 } 1796 1797 /* 1798 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0. 1799 */ 1800 static int hv_compose_multi_msi_req_get_cpu(void) 1801 { 1802 static DEFINE_SPINLOCK(multi_msi_cpu_lock); 1803 1804 /* -1 means starting with CPU 0 */ 1805 static int cpu_next = -1; 1806 1807 unsigned long flags; 1808 int cpu; 1809 1810 spin_lock_irqsave(&multi_msi_cpu_lock, flags); 1811 1812 cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask); 1813 cpu = cpu_next; 1814 1815 spin_unlock_irqrestore(&multi_msi_cpu_lock, flags); 1816 1817 return cpu; 1818 } 1819 1820 static u32 hv_compose_msi_req_v2( 1821 struct pci_create_interrupt2 *int_pkt, int cpu, 1822 u32 slot, u8 vector, u16 vector_count) 1823 { 1824 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; 1825 int_pkt->wslot.slot = slot; 1826 int_pkt->int_desc.vector = vector; 1827 int_pkt->int_desc.vector_count = vector_count; 1828 int_pkt->int_desc.delivery_mode = DELIVERY_MODE; 1829 int_pkt->int_desc.processor_array[0] = 1830 hv_cpu_number_to_vp_number(cpu); 1831 int_pkt->int_desc.processor_count = 1; 1832 1833 return sizeof(*int_pkt); 1834 } 1835 1836 static u32 hv_compose_msi_req_v3( 1837 struct pci_create_interrupt3 *int_pkt, int cpu, 1838 u32 slot, u32 vector, u16 vector_count) 1839 { 1840 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3; 1841 int_pkt->wslot.slot = slot; 1842 int_pkt->int_desc.vector = vector; 1843 int_pkt->int_desc.reserved = 0; 1844 int_pkt->int_desc.vector_count = vector_count; 1845 int_pkt->int_desc.delivery_mode = DELIVERY_MODE; 1846 int_pkt->int_desc.processor_array[0] = 1847 hv_cpu_number_to_vp_number(cpu); 1848 int_pkt->int_desc.processor_count = 1; 1849 1850 return sizeof(*int_pkt); 1851 } 1852 1853 /** 1854 * hv_compose_msi_msg() - Supplies a valid MSI address/data 1855 * @data: Everything about this MSI 1856 * @msg: Buffer that is filled in by this function 1857 * 1858 * This function unpacks the IRQ looking for target CPU set, IDT 1859 * vector and mode and sends a message to the parent partition 1860 * asking for a mapping for that tuple in this partition. The 1861 * response supplies a data value and address to which that data 1862 * should be written to trigger that interrupt. 1863 */ 1864 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1865 { 1866 struct hv_pcibus_device *hbus; 1867 struct vmbus_channel *channel; 1868 struct hv_pci_dev *hpdev; 1869 struct pci_bus *pbus; 1870 struct pci_dev *pdev; 1871 const struct cpumask *dest; 1872 struct compose_comp_ctxt comp; 1873 struct tran_int_desc *int_desc; 1874 struct msi_desc *msi_desc; 1875 /* 1876 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2 1877 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3. 1878 */ 1879 u16 vector_count; 1880 u32 vector; 1881 struct { 1882 struct pci_packet pci_pkt; 1883 union { 1884 struct pci_create_interrupt v1; 1885 struct pci_create_interrupt2 v2; 1886 struct pci_create_interrupt3 v3; 1887 } int_pkts; 1888 } __packed ctxt; 1889 bool multi_msi; 1890 u64 trans_id; 1891 u32 size; 1892 int ret; 1893 int cpu; 1894 1895 msi_desc = irq_data_get_msi_desc(data); 1896 multi_msi = !msi_desc->pci.msi_attrib.is_msix && 1897 msi_desc->nvec_used > 1; 1898 1899 /* Reuse the previous allocation */ 1900 if (data->chip_data && multi_msi) { 1901 int_desc = data->chip_data; 1902 msg->address_hi = int_desc->address >> 32; 1903 msg->address_lo = int_desc->address & 0xffffffff; 1904 msg->data = int_desc->data; 1905 return; 1906 } 1907 1908 pdev = msi_desc_to_pci_dev(msi_desc); 1909 dest = irq_data_get_effective_affinity_mask(data); 1910 pbus = pdev->bus; 1911 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); 1912 channel = hbus->hdev->channel; 1913 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); 1914 if (!hpdev) 1915 goto return_null_message; 1916 1917 /* Free any previous message that might have already been composed. */ 1918 if (data->chip_data && !multi_msi) { 1919 int_desc = data->chip_data; 1920 data->chip_data = NULL; 1921 hv_int_desc_free(hpdev, int_desc); 1922 } 1923 1924 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); 1925 if (!int_desc) 1926 goto drop_reference; 1927 1928 if (multi_msi) { 1929 /* 1930 * If this is not the first MSI of Multi MSI, we already have 1931 * a mapping. Can exit early. 1932 */ 1933 if (msi_desc->irq != data->irq) { 1934 data->chip_data = int_desc; 1935 int_desc->address = msi_desc->msg.address_lo | 1936 (u64)msi_desc->msg.address_hi << 32; 1937 int_desc->data = msi_desc->msg.data + 1938 (data->irq - msi_desc->irq); 1939 msg->address_hi = msi_desc->msg.address_hi; 1940 msg->address_lo = msi_desc->msg.address_lo; 1941 msg->data = int_desc->data; 1942 put_pcichild(hpdev); 1943 return; 1944 } 1945 /* 1946 * The vector we select here is a dummy value. The correct 1947 * value gets sent to the hypervisor in unmask(). This needs 1948 * to be aligned with the count, and also not zero. Multi-msi 1949 * is powers of 2 up to 32, so 32 will always work here. 1950 */ 1951 vector = 32; 1952 vector_count = msi_desc->nvec_used; 1953 cpu = hv_compose_multi_msi_req_get_cpu(); 1954 } else { 1955 vector = hv_msi_get_int_vector(data); 1956 vector_count = 1; 1957 cpu = hv_compose_msi_req_get_cpu(dest); 1958 } 1959 1960 /* 1961 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector' 1962 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly 1963 * for better readability. 1964 */ 1965 memset(&ctxt, 0, sizeof(ctxt)); 1966 init_completion(&comp.comp_pkt.host_event); 1967 ctxt.pci_pkt.completion_func = hv_pci_compose_compl; 1968 ctxt.pci_pkt.compl_ctxt = ∁ 1969 1970 switch (hbus->protocol_version) { 1971 case PCI_PROTOCOL_VERSION_1_1: 1972 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, 1973 hpdev->desc.win_slot.slot, 1974 (u8)vector, 1975 vector_count); 1976 break; 1977 1978 case PCI_PROTOCOL_VERSION_1_2: 1979 case PCI_PROTOCOL_VERSION_1_3: 1980 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, 1981 cpu, 1982 hpdev->desc.win_slot.slot, 1983 (u8)vector, 1984 vector_count); 1985 break; 1986 1987 case PCI_PROTOCOL_VERSION_1_4: 1988 size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, 1989 cpu, 1990 hpdev->desc.win_slot.slot, 1991 vector, 1992 vector_count); 1993 break; 1994 1995 default: 1996 /* As we only negotiate protocol versions known to this driver, 1997 * this path should never hit. However, this is it not a hot 1998 * path so we print a message to aid future updates. 1999 */ 2000 dev_err(&hbus->hdev->device, 2001 "Unexpected vPCI protocol, update driver."); 2002 goto free_int_desc; 2003 } 2004 2005 ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts, 2006 size, (unsigned long)&ctxt.pci_pkt, 2007 &trans_id, VM_PKT_DATA_INBAND, 2008 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 2009 if (ret) { 2010 dev_err(&hbus->hdev->device, 2011 "Sending request for interrupt failed: 0x%x", 2012 comp.comp_pkt.completion_status); 2013 goto free_int_desc; 2014 } 2015 2016 /* 2017 * Prevents hv_pci_onchannelcallback() from running concurrently 2018 * in the tasklet. 2019 */ 2020 tasklet_disable_in_atomic(&channel->callback_event); 2021 2022 /* 2023 * Since this function is called with IRQ locks held, can't 2024 * do normal wait for completion; instead poll. 2025 */ 2026 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { 2027 unsigned long flags; 2028 2029 /* 0xFFFF means an invalid PCI VENDOR ID. */ 2030 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { 2031 dev_err_once(&hbus->hdev->device, 2032 "the device has gone\n"); 2033 goto enable_tasklet; 2034 } 2035 2036 /* 2037 * Make sure that the ring buffer data structure doesn't get 2038 * freed while we dereference the ring buffer pointer. Test 2039 * for the channel's onchannel_callback being NULL within a 2040 * sched_lock critical section. See also the inline comments 2041 * in vmbus_reset_channel_cb(). 2042 */ 2043 spin_lock_irqsave(&channel->sched_lock, flags); 2044 if (unlikely(channel->onchannel_callback == NULL)) { 2045 spin_unlock_irqrestore(&channel->sched_lock, flags); 2046 goto enable_tasklet; 2047 } 2048 hv_pci_onchannelcallback(hbus); 2049 spin_unlock_irqrestore(&channel->sched_lock, flags); 2050 2051 udelay(100); 2052 } 2053 2054 tasklet_enable(&channel->callback_event); 2055 2056 if (comp.comp_pkt.completion_status < 0) { 2057 dev_err(&hbus->hdev->device, 2058 "Request for interrupt failed: 0x%x", 2059 comp.comp_pkt.completion_status); 2060 goto free_int_desc; 2061 } 2062 2063 /* 2064 * Record the assignment so that this can be unwound later. Using 2065 * irq_set_chip_data() here would be appropriate, but the lock it takes 2066 * is already held. 2067 */ 2068 *int_desc = comp.int_desc; 2069 data->chip_data = int_desc; 2070 2071 /* Pass up the result. */ 2072 msg->address_hi = comp.int_desc.address >> 32; 2073 msg->address_lo = comp.int_desc.address & 0xffffffff; 2074 msg->data = comp.int_desc.data; 2075 2076 put_pcichild(hpdev); 2077 return; 2078 2079 enable_tasklet: 2080 tasklet_enable(&channel->callback_event); 2081 /* 2082 * The completion packet on the stack becomes invalid after 'return'; 2083 * remove the ID from the VMbus requestor if the identifier is still 2084 * mapped to/associated with the packet. (The identifier could have 2085 * been 're-used', i.e., already removed and (re-)mapped.) 2086 * 2087 * Cf. hv_pci_onchannelcallback(). 2088 */ 2089 vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt); 2090 free_int_desc: 2091 kfree(int_desc); 2092 drop_reference: 2093 put_pcichild(hpdev); 2094 return_null_message: 2095 msg->address_hi = 0; 2096 msg->address_lo = 0; 2097 msg->data = 0; 2098 } 2099 2100 static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain, 2101 struct irq_domain *real_parent, struct msi_domain_info *info) 2102 { 2103 struct irq_chip *chip = info->chip; 2104 2105 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) 2106 return false; 2107 2108 info->ops->msi_prepare = hv_msi_prepare; 2109 2110 chip->irq_set_affinity = irq_chip_set_affinity_parent; 2111 2112 if (IS_ENABLED(CONFIG_X86)) 2113 chip->flags |= IRQCHIP_MOVE_DEFERRED; 2114 2115 return true; 2116 } 2117 2118 #define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 2119 MSI_FLAG_USE_DEF_CHIP_OPS | \ 2120 MSI_FLAG_PCI_MSI_MASK_PARENT) 2121 #define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ 2122 MSI_FLAG_PCI_MSIX | \ 2123 MSI_FLAG_PCI_MSIX_ALLOC_DYN | \ 2124 MSI_GENERIC_FLAGS_MASK) 2125 2126 static const struct msi_parent_ops hv_pcie_msi_parent_ops = { 2127 .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED, 2128 .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED, 2129 .bus_select_token = DOMAIN_BUS_PCI_MSI, 2130 .chip_flags = HV_MSI_CHIP_FLAGS, 2131 .prefix = "HV-", 2132 .init_dev_msi_info = hv_pcie_init_dev_msi_info, 2133 }; 2134 2135 /* HW Interrupt Chip Descriptor */ 2136 static struct irq_chip hv_msi_irq_chip = { 2137 .name = "Hyper-V PCIe MSI", 2138 .irq_compose_msi_msg = hv_compose_msi_msg, 2139 .irq_set_affinity = irq_chip_set_affinity_parent, 2140 .irq_ack = irq_chip_ack_parent, 2141 .irq_eoi = irq_chip_eoi_parent, 2142 .irq_mask = hv_irq_mask, 2143 .irq_unmask = hv_irq_unmask, 2144 }; 2145 2146 static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, 2147 void *arg) 2148 { 2149 /* 2150 * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg() 2151 * should be moved here. 2152 */ 2153 int ret; 2154 2155 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg); 2156 if (ret < 0) 2157 return ret; 2158 2159 for (int i = 0; i < nr_irqs; i++) { 2160 irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL); 2161 if (IS_ENABLED(CONFIG_X86)) 2162 __irq_set_handler(virq + i, handle_edge_irq, 0, "edge"); 2163 } 2164 2165 return 0; 2166 } 2167 2168 static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) 2169 { 2170 struct msi_domain_info *info = d->host_data; 2171 2172 for (int i = 0; i < nr_irqs; i++) 2173 hv_msi_free(d, info, virq + i); 2174 2175 irq_domain_free_irqs_top(d, virq, nr_irqs); 2176 } 2177 2178 static const struct irq_domain_ops hv_pcie_domain_ops = { 2179 .alloc = hv_pcie_domain_alloc, 2180 .free = hv_pcie_domain_free, 2181 }; 2182 2183 /** 2184 * hv_pcie_init_irq_domain() - Initialize IRQ domain 2185 * @hbus: The root PCI bus 2186 * 2187 * This function creates an IRQ domain which will be used for 2188 * interrupts from devices that have been passed through. These 2189 * devices only support MSI and MSI-X, not line-based interrupts 2190 * or simulations of line-based interrupts through PCIe's 2191 * fabric-layer messages. Because interrupts are remapped, we 2192 * can support multi-message MSI here. 2193 * 2194 * Return: '0' on success and error value on failure 2195 */ 2196 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) 2197 { 2198 struct irq_domain_info info = { 2199 .fwnode = hbus->fwnode, 2200 .ops = &hv_pcie_domain_ops, 2201 .host_data = hbus, 2202 .parent = hv_pci_get_root_domain(), 2203 }; 2204 2205 hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops); 2206 if (!hbus->irq_domain) { 2207 dev_err(&hbus->hdev->device, 2208 "Failed to build an MSI IRQ domain\n"); 2209 return -ENODEV; 2210 } 2211 2212 dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain); 2213 2214 return 0; 2215 } 2216 2217 /** 2218 * get_bar_size() - Get the address space consumed by a BAR 2219 * @bar_val: Value that a BAR returned after -1 was written 2220 * to it. 2221 * 2222 * This function returns the size of the BAR, rounded up to 1 2223 * page. It has to be rounded up because the hypervisor's page 2224 * table entry that maps the BAR into the VM can't specify an 2225 * offset within a page. The invariant is that the hypervisor 2226 * must place any BARs of smaller than page length at the 2227 * beginning of a page. 2228 * 2229 * Return: Size in bytes of the consumed MMIO space. 2230 */ 2231 static u64 get_bar_size(u64 bar_val) 2232 { 2233 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)), 2234 PAGE_SIZE); 2235 } 2236 2237 /** 2238 * survey_child_resources() - Total all MMIO requirements 2239 * @hbus: Root PCI bus, as understood by this driver 2240 */ 2241 static void survey_child_resources(struct hv_pcibus_device *hbus) 2242 { 2243 struct hv_pci_dev *hpdev; 2244 resource_size_t bar_size = 0; 2245 unsigned long flags; 2246 struct completion *event; 2247 u64 bar_val; 2248 int i; 2249 2250 /* If nobody is waiting on the answer, don't compute it. */ 2251 event = xchg(&hbus->survey_event, NULL); 2252 if (!event) 2253 return; 2254 2255 /* If the answer has already been computed, go with it. */ 2256 if (hbus->low_mmio_space || hbus->high_mmio_space) { 2257 complete(event); 2258 return; 2259 } 2260 2261 spin_lock_irqsave(&hbus->device_list_lock, flags); 2262 2263 /* 2264 * Due to an interesting quirk of the PCI spec, all memory regions 2265 * for a child device are a power of 2 in size and aligned in memory, 2266 * so it's sufficient to just add them up without tracking alignment. 2267 */ 2268 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2269 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 2270 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO) 2271 dev_err(&hbus->hdev->device, 2272 "There's an I/O BAR in this list!\n"); 2273 2274 if (hpdev->probed_bar[i] != 0) { 2275 /* 2276 * A probed BAR has all the upper bits set that 2277 * can be changed. 2278 */ 2279 2280 bar_val = hpdev->probed_bar[i]; 2281 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) 2282 bar_val |= 2283 ((u64)hpdev->probed_bar[++i] << 32); 2284 else 2285 bar_val |= 0xffffffff00000000ULL; 2286 2287 bar_size = get_bar_size(bar_val); 2288 2289 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) 2290 hbus->high_mmio_space += bar_size; 2291 else 2292 hbus->low_mmio_space += bar_size; 2293 } 2294 } 2295 } 2296 2297 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2298 complete(event); 2299 } 2300 2301 /** 2302 * prepopulate_bars() - Fill in BARs with defaults 2303 * @hbus: Root PCI bus, as understood by this driver 2304 * 2305 * The core PCI driver code seems much, much happier if the BARs 2306 * for a device have values upon first scan. So fill them in. 2307 * The algorithm below works down from large sizes to small, 2308 * attempting to pack the assignments optimally. The assumption, 2309 * enforced in other parts of the code, is that the beginning of 2310 * the memory-mapped I/O space will be aligned on the largest 2311 * BAR size. 2312 */ 2313 static void prepopulate_bars(struct hv_pcibus_device *hbus) 2314 { 2315 resource_size_t high_size = 0; 2316 resource_size_t low_size = 0; 2317 resource_size_t high_base = 0; 2318 resource_size_t low_base = 0; 2319 resource_size_t bar_size; 2320 struct hv_pci_dev *hpdev; 2321 unsigned long flags; 2322 u64 bar_val; 2323 u32 command; 2324 bool high; 2325 int i; 2326 2327 if (hbus->low_mmio_space) { 2328 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); 2329 low_base = hbus->low_mmio_res->start; 2330 } 2331 2332 if (hbus->high_mmio_space) { 2333 high_size = 1ULL << 2334 (63 - __builtin_clzll(hbus->high_mmio_space)); 2335 high_base = hbus->high_mmio_res->start; 2336 } 2337 2338 spin_lock_irqsave(&hbus->device_list_lock, flags); 2339 2340 /* 2341 * Clear the memory enable bit, in case it's already set. This occurs 2342 * in the suspend path of hibernation, where the device is suspended, 2343 * resumed and suspended again: see hibernation_snapshot() and 2344 * hibernation_platform_enter(). 2345 * 2346 * If the memory enable bit is already set, Hyper-V silently ignores 2347 * the below BAR updates, and the related PCI device driver can not 2348 * work, because reading from the device register(s) always returns 2349 * 0xFFFFFFFF (PCI_ERROR_RESPONSE). 2350 */ 2351 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2352 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command); 2353 command &= ~PCI_COMMAND_MEMORY; 2354 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command); 2355 } 2356 2357 /* Pick addresses for the BARs. */ 2358 do { 2359 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2360 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 2361 bar_val = hpdev->probed_bar[i]; 2362 if (bar_val == 0) 2363 continue; 2364 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64; 2365 if (high) { 2366 bar_val |= 2367 ((u64)hpdev->probed_bar[i + 1] 2368 << 32); 2369 } else { 2370 bar_val |= 0xffffffffULL << 32; 2371 } 2372 bar_size = get_bar_size(bar_val); 2373 if (high) { 2374 if (high_size != bar_size) { 2375 i++; 2376 continue; 2377 } 2378 _hv_pcifront_write_config(hpdev, 2379 PCI_BASE_ADDRESS_0 + (4 * i), 2380 4, 2381 (u32)(high_base & 0xffffff00)); 2382 i++; 2383 _hv_pcifront_write_config(hpdev, 2384 PCI_BASE_ADDRESS_0 + (4 * i), 2385 4, (u32)(high_base >> 32)); 2386 high_base += bar_size; 2387 } else { 2388 if (low_size != bar_size) 2389 continue; 2390 _hv_pcifront_write_config(hpdev, 2391 PCI_BASE_ADDRESS_0 + (4 * i), 2392 4, 2393 (u32)(low_base & 0xffffff00)); 2394 low_base += bar_size; 2395 } 2396 } 2397 if (high_size <= 1 && low_size <= 1) { 2398 /* 2399 * No need to set the PCI_COMMAND_MEMORY bit as 2400 * the core PCI driver doesn't require the bit 2401 * to be pre-set. Actually here we intentionally 2402 * keep the bit off so that the PCI BAR probing 2403 * in the core PCI driver doesn't cause Hyper-V 2404 * to unnecessarily unmap/map the virtual BARs 2405 * from/to the physical BARs multiple times. 2406 * This reduces the VM boot time significantly 2407 * if the BAR sizes are huge. 2408 */ 2409 break; 2410 } 2411 } 2412 2413 high_size >>= 1; 2414 low_size >>= 1; 2415 } while (high_size || low_size); 2416 2417 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2418 } 2419 2420 /* 2421 * Assign entries in sysfs pci slot directory. 2422 * 2423 * Note that this function does not need to lock the children list 2424 * because it is called from pci_devices_present_work which 2425 * is serialized with hv_eject_device_work because they are on the 2426 * same ordered workqueue. Therefore hbus->children list will not change 2427 * even when pci_create_slot sleeps. 2428 */ 2429 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) 2430 { 2431 struct hv_pci_dev *hpdev; 2432 char name[SLOT_NAME_SIZE]; 2433 int slot_nr; 2434 2435 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2436 if (hpdev->pci_slot) 2437 continue; 2438 2439 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); 2440 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); 2441 hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr, 2442 name, NULL); 2443 if (IS_ERR(hpdev->pci_slot)) { 2444 pr_warn("pci_create slot %s failed\n", name); 2445 hpdev->pci_slot = NULL; 2446 } 2447 } 2448 } 2449 2450 /* 2451 * Remove entries in sysfs pci slot directory. 2452 */ 2453 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus) 2454 { 2455 struct hv_pci_dev *hpdev; 2456 2457 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2458 if (!hpdev->pci_slot) 2459 continue; 2460 pci_destroy_slot(hpdev->pci_slot); 2461 hpdev->pci_slot = NULL; 2462 } 2463 } 2464 2465 /* 2466 * Set NUMA node for the devices on the bus 2467 */ 2468 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) 2469 { 2470 struct pci_dev *dev; 2471 struct pci_bus *bus = hbus->bridge->bus; 2472 struct hv_pci_dev *hv_dev; 2473 2474 list_for_each_entry(dev, &bus->devices, bus_list) { 2475 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn)); 2476 if (!hv_dev) 2477 continue; 2478 2479 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && 2480 hv_dev->desc.virtual_numa_node < num_possible_nodes()) 2481 /* 2482 * The kernel may boot with some NUMA nodes offline 2483 * (e.g. in a KDUMP kernel) or with NUMA disabled via 2484 * "numa=off". In those cases, adjust the host provided 2485 * NUMA node to a valid NUMA node used by the kernel. 2486 */ 2487 set_dev_node(&dev->dev, 2488 numa_map_to_online_node( 2489 hv_dev->desc.virtual_numa_node)); 2490 2491 put_pcichild(hv_dev); 2492 } 2493 } 2494 2495 /** 2496 * create_root_hv_pci_bus() - Expose a new root PCI bus 2497 * @hbus: Root PCI bus, as understood by this driver 2498 * 2499 * Return: 0 on success, -errno on failure 2500 */ 2501 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) 2502 { 2503 int error; 2504 struct pci_host_bridge *bridge = hbus->bridge; 2505 2506 bridge->dev.parent = &hbus->hdev->device; 2507 bridge->sysdata = &hbus->sysdata; 2508 bridge->ops = &hv_pcifront_ops; 2509 2510 error = pci_scan_root_bus_bridge(bridge); 2511 if (error) 2512 return error; 2513 2514 pci_lock_rescan_remove(); 2515 hv_pci_assign_numa_node(hbus); 2516 pci_bus_assign_resources(bridge->bus); 2517 hv_pci_assign_slots(hbus); 2518 pci_bus_add_devices(bridge->bus); 2519 pci_unlock_rescan_remove(); 2520 hbus->state = hv_pcibus_installed; 2521 return 0; 2522 } 2523 2524 struct q_res_req_compl { 2525 struct completion host_event; 2526 struct hv_pci_dev *hpdev; 2527 }; 2528 2529 /** 2530 * q_resource_requirements() - Query Resource Requirements 2531 * @context: The completion context. 2532 * @resp: The response that came from the host. 2533 * @resp_packet_size: The size in bytes of resp. 2534 * 2535 * This function is invoked on completion of a Query Resource 2536 * Requirements packet. 2537 */ 2538 static void q_resource_requirements(void *context, struct pci_response *resp, 2539 int resp_packet_size) 2540 { 2541 struct q_res_req_compl *completion = context; 2542 struct pci_q_res_req_response *q_res_req = 2543 (struct pci_q_res_req_response *)resp; 2544 s32 status; 2545 int i; 2546 2547 status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status; 2548 if (status < 0) { 2549 dev_err(&completion->hpdev->hbus->hdev->device, 2550 "query resource requirements failed: %x\n", 2551 status); 2552 } else { 2553 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 2554 completion->hpdev->probed_bar[i] = 2555 q_res_req->probed_bar[i]; 2556 } 2557 } 2558 2559 complete(&completion->host_event); 2560 } 2561 2562 /** 2563 * new_pcichild_device() - Create a new child device 2564 * @hbus: The internal struct tracking this root PCI bus. 2565 * @desc: The information supplied so far from the host 2566 * about the device. 2567 * 2568 * This function creates the tracking structure for a new child 2569 * device and kicks off the process of figuring out what it is. 2570 * 2571 * Return: Pointer to the new tracking struct 2572 */ 2573 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, 2574 struct hv_pcidev_description *desc) 2575 { 2576 struct hv_pci_dev *hpdev; 2577 struct pci_child_message *res_req; 2578 struct q_res_req_compl comp_pkt; 2579 struct { 2580 struct pci_packet init_packet; 2581 u8 buffer[sizeof(struct pci_child_message)]; 2582 } pkt; 2583 unsigned long flags; 2584 int ret; 2585 2586 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL); 2587 if (!hpdev) 2588 return NULL; 2589 2590 hpdev->hbus = hbus; 2591 2592 memset(&pkt, 0, sizeof(pkt)); 2593 init_completion(&comp_pkt.host_event); 2594 comp_pkt.hpdev = hpdev; 2595 pkt.init_packet.compl_ctxt = &comp_pkt; 2596 pkt.init_packet.completion_func = q_resource_requirements; 2597 res_req = (struct pci_child_message *)pkt.buffer; 2598 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; 2599 res_req->wslot.slot = desc->win_slot.slot; 2600 2601 ret = vmbus_sendpacket(hbus->hdev->channel, res_req, 2602 sizeof(struct pci_child_message), 2603 (unsigned long)&pkt.init_packet, 2604 VM_PKT_DATA_INBAND, 2605 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 2606 if (ret) 2607 goto error; 2608 2609 if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) 2610 goto error; 2611 2612 hpdev->desc = *desc; 2613 refcount_set(&hpdev->refs, 1); 2614 get_pcichild(hpdev); 2615 spin_lock_irqsave(&hbus->device_list_lock, flags); 2616 2617 list_add_tail(&hpdev->list_entry, &hbus->children); 2618 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2619 return hpdev; 2620 2621 error: 2622 kfree(hpdev); 2623 return NULL; 2624 } 2625 2626 /** 2627 * get_pcichild_wslot() - Find device from slot 2628 * @hbus: Root PCI bus, as understood by this driver 2629 * @wslot: Location on the bus 2630 * 2631 * This function looks up a PCI device and returns the internal 2632 * representation of it. It acquires a reference on it, so that 2633 * the device won't be deleted while somebody is using it. The 2634 * caller is responsible for calling put_pcichild() to release 2635 * this reference. 2636 * 2637 * Return: Internal representation of a PCI device 2638 */ 2639 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, 2640 u32 wslot) 2641 { 2642 unsigned long flags; 2643 struct hv_pci_dev *iter, *hpdev = NULL; 2644 2645 spin_lock_irqsave(&hbus->device_list_lock, flags); 2646 list_for_each_entry(iter, &hbus->children, list_entry) { 2647 if (iter->desc.win_slot.slot == wslot) { 2648 hpdev = iter; 2649 get_pcichild(hpdev); 2650 break; 2651 } 2652 } 2653 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2654 2655 return hpdev; 2656 } 2657 2658 /** 2659 * pci_devices_present_work() - Handle new list of child devices 2660 * @work: Work struct embedded in struct hv_dr_work 2661 * 2662 * "Bus Relations" is the Windows term for "children of this 2663 * bus." The terminology is preserved here for people trying to 2664 * debug the interaction between Hyper-V and Linux. This 2665 * function is called when the parent partition reports a list 2666 * of functions that should be observed under this PCI Express 2667 * port (bus). 2668 * 2669 * This function updates the list, and must tolerate being 2670 * called multiple times with the same information. The typical 2671 * number of child devices is one, with very atypical cases 2672 * involving three or four, so the algorithms used here can be 2673 * simple and inefficient. 2674 * 2675 * It must also treat the omission of a previously observed device as 2676 * notification that the device no longer exists. 2677 * 2678 * Note that this function is serialized with hv_eject_device_work(), 2679 * because both are pushed to the ordered workqueue hbus->wq. 2680 */ 2681 static void pci_devices_present_work(struct work_struct *work) 2682 { 2683 u32 child_no; 2684 bool found; 2685 struct hv_pcidev_description *new_desc; 2686 struct hv_pci_dev *hpdev; 2687 struct hv_pcibus_device *hbus; 2688 struct list_head removed; 2689 struct hv_dr_work *dr_wrk; 2690 struct hv_dr_state *dr = NULL; 2691 unsigned long flags; 2692 2693 dr_wrk = container_of(work, struct hv_dr_work, wrk); 2694 hbus = dr_wrk->bus; 2695 kfree(dr_wrk); 2696 2697 INIT_LIST_HEAD(&removed); 2698 2699 /* Pull this off the queue and process it if it was the last one. */ 2700 spin_lock_irqsave(&hbus->device_list_lock, flags); 2701 while (!list_empty(&hbus->dr_list)) { 2702 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state, 2703 list_entry); 2704 list_del(&dr->list_entry); 2705 2706 /* Throw this away if the list still has stuff in it. */ 2707 if (!list_empty(&hbus->dr_list)) { 2708 kfree(dr); 2709 continue; 2710 } 2711 } 2712 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2713 2714 if (!dr) 2715 return; 2716 2717 mutex_lock(&hbus->state_lock); 2718 2719 /* First, mark all existing children as reported missing. */ 2720 spin_lock_irqsave(&hbus->device_list_lock, flags); 2721 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2722 hpdev->reported_missing = true; 2723 } 2724 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2725 2726 /* Next, add back any reported devices. */ 2727 for (child_no = 0; child_no < dr->device_count; child_no++) { 2728 found = false; 2729 new_desc = &dr->func[child_no]; 2730 2731 spin_lock_irqsave(&hbus->device_list_lock, flags); 2732 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2733 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) && 2734 (hpdev->desc.v_id == new_desc->v_id) && 2735 (hpdev->desc.d_id == new_desc->d_id) && 2736 (hpdev->desc.ser == new_desc->ser)) { 2737 hpdev->reported_missing = false; 2738 found = true; 2739 } 2740 } 2741 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2742 2743 if (!found) { 2744 hpdev = new_pcichild_device(hbus, new_desc); 2745 if (!hpdev) 2746 dev_err(&hbus->hdev->device, 2747 "couldn't record a child device.\n"); 2748 } 2749 } 2750 2751 /* Move missing children to a list on the stack. */ 2752 spin_lock_irqsave(&hbus->device_list_lock, flags); 2753 do { 2754 found = false; 2755 list_for_each_entry(hpdev, &hbus->children, list_entry) { 2756 if (hpdev->reported_missing) { 2757 found = true; 2758 put_pcichild(hpdev); 2759 list_move_tail(&hpdev->list_entry, &removed); 2760 break; 2761 } 2762 } 2763 } while (found); 2764 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2765 2766 /* Delete everything that should no longer exist. */ 2767 while (!list_empty(&removed)) { 2768 hpdev = list_first_entry(&removed, struct hv_pci_dev, 2769 list_entry); 2770 list_del(&hpdev->list_entry); 2771 2772 if (hpdev->pci_slot) 2773 pci_destroy_slot(hpdev->pci_slot); 2774 2775 put_pcichild(hpdev); 2776 } 2777 2778 switch (hbus->state) { 2779 case hv_pcibus_installed: 2780 /* 2781 * Tell the core to rescan bus 2782 * because there may have been changes. 2783 */ 2784 pci_lock_rescan_remove(); 2785 pci_scan_child_bus(hbus->bridge->bus); 2786 hv_pci_assign_numa_node(hbus); 2787 hv_pci_assign_slots(hbus); 2788 pci_unlock_rescan_remove(); 2789 break; 2790 2791 case hv_pcibus_init: 2792 case hv_pcibus_probed: 2793 survey_child_resources(hbus); 2794 break; 2795 2796 default: 2797 break; 2798 } 2799 2800 mutex_unlock(&hbus->state_lock); 2801 2802 kfree(dr); 2803 } 2804 2805 /** 2806 * hv_pci_start_relations_work() - Queue work to start device discovery 2807 * @hbus: Root PCI bus, as understood by this driver 2808 * @dr: The list of children returned from host 2809 * 2810 * Return: 0 on success, -errno on failure 2811 */ 2812 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus, 2813 struct hv_dr_state *dr) 2814 { 2815 struct hv_dr_work *dr_wrk; 2816 unsigned long flags; 2817 bool pending_dr; 2818 2819 if (hbus->state == hv_pcibus_removing) { 2820 dev_info(&hbus->hdev->device, 2821 "PCI VMBus BUS_RELATIONS: ignored\n"); 2822 return -ENOENT; 2823 } 2824 2825 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT); 2826 if (!dr_wrk) 2827 return -ENOMEM; 2828 2829 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); 2830 dr_wrk->bus = hbus; 2831 2832 spin_lock_irqsave(&hbus->device_list_lock, flags); 2833 /* 2834 * If pending_dr is true, we have already queued a work, 2835 * which will see the new dr. Otherwise, we need to 2836 * queue a new work. 2837 */ 2838 pending_dr = !list_empty(&hbus->dr_list); 2839 list_add_tail(&dr->list_entry, &hbus->dr_list); 2840 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2841 2842 if (pending_dr) 2843 kfree(dr_wrk); 2844 else 2845 queue_work(hbus->wq, &dr_wrk->wrk); 2846 2847 return 0; 2848 } 2849 2850 /** 2851 * hv_pci_devices_present() - Handle list of new children 2852 * @hbus: Root PCI bus, as understood by this driver 2853 * @relations: Packet from host listing children 2854 * 2855 * Process a new list of devices on the bus. The list of devices is 2856 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS, 2857 * whenever a new list of devices for this bus appears. 2858 */ 2859 static void hv_pci_devices_present(struct hv_pcibus_device *hbus, 2860 struct pci_bus_relations *relations) 2861 { 2862 struct hv_dr_state *dr; 2863 int i; 2864 2865 dr = kzalloc(struct_size(dr, func, relations->device_count), 2866 GFP_NOWAIT); 2867 if (!dr) 2868 return; 2869 2870 dr->device_count = relations->device_count; 2871 for (i = 0; i < dr->device_count; i++) { 2872 dr->func[i].v_id = relations->func[i].v_id; 2873 dr->func[i].d_id = relations->func[i].d_id; 2874 dr->func[i].rev = relations->func[i].rev; 2875 dr->func[i].prog_intf = relations->func[i].prog_intf; 2876 dr->func[i].subclass = relations->func[i].subclass; 2877 dr->func[i].base_class = relations->func[i].base_class; 2878 dr->func[i].subsystem_id = relations->func[i].subsystem_id; 2879 dr->func[i].win_slot = relations->func[i].win_slot; 2880 dr->func[i].ser = relations->func[i].ser; 2881 } 2882 2883 if (hv_pci_start_relations_work(hbus, dr)) 2884 kfree(dr); 2885 } 2886 2887 /** 2888 * hv_pci_devices_present2() - Handle list of new children 2889 * @hbus: Root PCI bus, as understood by this driver 2890 * @relations: Packet from host listing children 2891 * 2892 * This function is the v2 version of hv_pci_devices_present() 2893 */ 2894 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus, 2895 struct pci_bus_relations2 *relations) 2896 { 2897 struct hv_dr_state *dr; 2898 int i; 2899 2900 dr = kzalloc(struct_size(dr, func, relations->device_count), 2901 GFP_NOWAIT); 2902 if (!dr) 2903 return; 2904 2905 dr->device_count = relations->device_count; 2906 for (i = 0; i < dr->device_count; i++) { 2907 dr->func[i].v_id = relations->func[i].v_id; 2908 dr->func[i].d_id = relations->func[i].d_id; 2909 dr->func[i].rev = relations->func[i].rev; 2910 dr->func[i].prog_intf = relations->func[i].prog_intf; 2911 dr->func[i].subclass = relations->func[i].subclass; 2912 dr->func[i].base_class = relations->func[i].base_class; 2913 dr->func[i].subsystem_id = relations->func[i].subsystem_id; 2914 dr->func[i].win_slot = relations->func[i].win_slot; 2915 dr->func[i].ser = relations->func[i].ser; 2916 dr->func[i].flags = relations->func[i].flags; 2917 dr->func[i].virtual_numa_node = 2918 relations->func[i].virtual_numa_node; 2919 } 2920 2921 if (hv_pci_start_relations_work(hbus, dr)) 2922 kfree(dr); 2923 } 2924 2925 /** 2926 * hv_eject_device_work() - Asynchronously handles ejection 2927 * @work: Work struct embedded in internal device struct 2928 * 2929 * This function handles ejecting a device. Windows will 2930 * attempt to gracefully eject a device, waiting 60 seconds to 2931 * hear back from the guest OS that this completed successfully. 2932 * If this timer expires, the device will be forcibly removed. 2933 */ 2934 static void hv_eject_device_work(struct work_struct *work) 2935 { 2936 struct pci_eject_response *ejct_pkt; 2937 struct hv_pcibus_device *hbus; 2938 struct hv_pci_dev *hpdev; 2939 struct pci_dev *pdev; 2940 unsigned long flags; 2941 int wslot; 2942 struct { 2943 struct pci_packet pkt; 2944 u8 buffer[sizeof(struct pci_eject_response)]; 2945 } ctxt; 2946 2947 hpdev = container_of(work, struct hv_pci_dev, wrk); 2948 hbus = hpdev->hbus; 2949 2950 mutex_lock(&hbus->state_lock); 2951 2952 /* 2953 * Ejection can come before or after the PCI bus has been set up, so 2954 * attempt to find it and tear down the bus state, if it exists. This 2955 * must be done without constructs like pci_domain_nr(hbus->bridge->bus) 2956 * because hbus->bridge->bus may not exist yet. 2957 */ 2958 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); 2959 pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot); 2960 if (pdev) { 2961 pci_lock_rescan_remove(); 2962 pci_stop_and_remove_bus_device(pdev); 2963 pci_dev_put(pdev); 2964 pci_unlock_rescan_remove(); 2965 } 2966 2967 spin_lock_irqsave(&hbus->device_list_lock, flags); 2968 list_del(&hpdev->list_entry); 2969 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 2970 2971 if (hpdev->pci_slot) 2972 pci_destroy_slot(hpdev->pci_slot); 2973 2974 memset(&ctxt, 0, sizeof(ctxt)); 2975 ejct_pkt = (struct pci_eject_response *)ctxt.buffer; 2976 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; 2977 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; 2978 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, 2979 sizeof(*ejct_pkt), 0, 2980 VM_PKT_DATA_INBAND, 0); 2981 2982 /* For the get_pcichild() in hv_pci_eject_device() */ 2983 put_pcichild(hpdev); 2984 /* For the two refs got in new_pcichild_device() */ 2985 put_pcichild(hpdev); 2986 put_pcichild(hpdev); 2987 /* hpdev has been freed. Do not use it any more. */ 2988 2989 mutex_unlock(&hbus->state_lock); 2990 } 2991 2992 /** 2993 * hv_pci_eject_device() - Handles device ejection 2994 * @hpdev: Internal device tracking struct 2995 * 2996 * This function is invoked when an ejection packet arrives. It 2997 * just schedules work so that we don't re-enter the packet 2998 * delivery code handling the ejection. 2999 */ 3000 static void hv_pci_eject_device(struct hv_pci_dev *hpdev) 3001 { 3002 struct hv_pcibus_device *hbus = hpdev->hbus; 3003 struct hv_device *hdev = hbus->hdev; 3004 3005 if (hbus->state == hv_pcibus_removing) { 3006 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n"); 3007 return; 3008 } 3009 3010 get_pcichild(hpdev); 3011 INIT_WORK(&hpdev->wrk, hv_eject_device_work); 3012 queue_work(hbus->wq, &hpdev->wrk); 3013 } 3014 3015 /** 3016 * hv_pci_onchannelcallback() - Handles incoming packets 3017 * @context: Internal bus tracking struct 3018 * 3019 * This function is invoked whenever the host sends a packet to 3020 * this channel (which is private to this root PCI bus). 3021 */ 3022 static void hv_pci_onchannelcallback(void *context) 3023 { 3024 const int packet_size = 0x100; 3025 int ret; 3026 struct hv_pcibus_device *hbus = context; 3027 struct vmbus_channel *chan = hbus->hdev->channel; 3028 u32 bytes_recvd; 3029 u64 req_id, req_addr; 3030 struct vmpacket_descriptor *desc; 3031 unsigned char *buffer; 3032 int bufferlen = packet_size; 3033 struct pci_packet *comp_packet; 3034 struct pci_response *response; 3035 struct pci_incoming_message *new_message; 3036 struct pci_bus_relations *bus_rel; 3037 struct pci_bus_relations2 *bus_rel2; 3038 struct pci_dev_inval_block *inval; 3039 struct pci_dev_incoming *dev_message; 3040 struct hv_pci_dev *hpdev; 3041 unsigned long flags; 3042 3043 buffer = kmalloc(bufferlen, GFP_ATOMIC); 3044 if (!buffer) 3045 return; 3046 3047 while (1) { 3048 ret = vmbus_recvpacket_raw(chan, buffer, bufferlen, 3049 &bytes_recvd, &req_id); 3050 3051 if (ret == -ENOBUFS) { 3052 kfree(buffer); 3053 /* Handle large packet */ 3054 bufferlen = bytes_recvd; 3055 buffer = kmalloc(bytes_recvd, GFP_ATOMIC); 3056 if (!buffer) 3057 return; 3058 continue; 3059 } 3060 3061 /* Zero length indicates there are no more packets. */ 3062 if (ret || !bytes_recvd) 3063 break; 3064 3065 /* 3066 * All incoming packets must be at least as large as a 3067 * response. 3068 */ 3069 if (bytes_recvd <= sizeof(struct pci_response)) 3070 continue; 3071 desc = (struct vmpacket_descriptor *)buffer; 3072 3073 switch (desc->type) { 3074 case VM_PKT_COMP: 3075 3076 lock_requestor(chan, flags); 3077 req_addr = __vmbus_request_addr_match(chan, req_id, 3078 VMBUS_RQST_ADDR_ANY); 3079 if (req_addr == VMBUS_RQST_ERROR) { 3080 unlock_requestor(chan, flags); 3081 dev_err(&hbus->hdev->device, 3082 "Invalid transaction ID %llx\n", 3083 req_id); 3084 break; 3085 } 3086 comp_packet = (struct pci_packet *)req_addr; 3087 response = (struct pci_response *)buffer; 3088 /* 3089 * Call ->completion_func() within the critical section to make 3090 * sure that the packet pointer is still valid during the call: 3091 * here 'valid' means that there's a task still waiting for the 3092 * completion, and that the packet data is still on the waiting 3093 * task's stack. Cf. hv_compose_msi_msg(). 3094 */ 3095 comp_packet->completion_func(comp_packet->compl_ctxt, 3096 response, 3097 bytes_recvd); 3098 unlock_requestor(chan, flags); 3099 break; 3100 3101 case VM_PKT_DATA_INBAND: 3102 3103 new_message = (struct pci_incoming_message *)buffer; 3104 switch (new_message->message_type.type) { 3105 case PCI_BUS_RELATIONS: 3106 3107 bus_rel = (struct pci_bus_relations *)buffer; 3108 if (bytes_recvd < sizeof(*bus_rel) || 3109 bytes_recvd < 3110 struct_size(bus_rel, func, 3111 bus_rel->device_count)) { 3112 dev_err(&hbus->hdev->device, 3113 "bus relations too small\n"); 3114 break; 3115 } 3116 3117 hv_pci_devices_present(hbus, bus_rel); 3118 break; 3119 3120 case PCI_BUS_RELATIONS2: 3121 3122 bus_rel2 = (struct pci_bus_relations2 *)buffer; 3123 if (bytes_recvd < sizeof(*bus_rel2) || 3124 bytes_recvd < 3125 struct_size(bus_rel2, func, 3126 bus_rel2->device_count)) { 3127 dev_err(&hbus->hdev->device, 3128 "bus relations v2 too small\n"); 3129 break; 3130 } 3131 3132 hv_pci_devices_present2(hbus, bus_rel2); 3133 break; 3134 3135 case PCI_EJECT: 3136 3137 dev_message = (struct pci_dev_incoming *)buffer; 3138 if (bytes_recvd < sizeof(*dev_message)) { 3139 dev_err(&hbus->hdev->device, 3140 "eject message too small\n"); 3141 break; 3142 } 3143 hpdev = get_pcichild_wslot(hbus, 3144 dev_message->wslot.slot); 3145 if (hpdev) { 3146 hv_pci_eject_device(hpdev); 3147 put_pcichild(hpdev); 3148 } 3149 break; 3150 3151 case PCI_INVALIDATE_BLOCK: 3152 3153 inval = (struct pci_dev_inval_block *)buffer; 3154 if (bytes_recvd < sizeof(*inval)) { 3155 dev_err(&hbus->hdev->device, 3156 "invalidate message too small\n"); 3157 break; 3158 } 3159 hpdev = get_pcichild_wslot(hbus, 3160 inval->wslot.slot); 3161 if (hpdev) { 3162 if (hpdev->block_invalidate) { 3163 hpdev->block_invalidate( 3164 hpdev->invalidate_context, 3165 inval->block_mask); 3166 } 3167 put_pcichild(hpdev); 3168 } 3169 break; 3170 3171 default: 3172 dev_warn(&hbus->hdev->device, 3173 "Unimplemented protocol message %x\n", 3174 new_message->message_type.type); 3175 break; 3176 } 3177 break; 3178 3179 default: 3180 dev_err(&hbus->hdev->device, 3181 "unhandled packet type %d, tid %llx len %d\n", 3182 desc->type, req_id, bytes_recvd); 3183 break; 3184 } 3185 } 3186 3187 kfree(buffer); 3188 } 3189 3190 /** 3191 * hv_pci_protocol_negotiation() - Set up protocol 3192 * @hdev: VMBus's tracking struct for this root PCI bus. 3193 * @version: Array of supported channel protocol versions in 3194 * the order of probing - highest go first. 3195 * @num_version: Number of elements in the version array. 3196 * 3197 * This driver is intended to support running on Windows 10 3198 * (server) and later versions. It will not run on earlier 3199 * versions, as they assume that many of the operations which 3200 * Linux needs accomplished with a spinlock held were done via 3201 * asynchronous messaging via VMBus. Windows 10 increases the 3202 * surface area of PCI emulation so that these actions can take 3203 * place by suspending a virtual processor for their duration. 3204 * 3205 * This function negotiates the channel protocol version, 3206 * failing if the host doesn't support the necessary protocol 3207 * level. 3208 */ 3209 static int hv_pci_protocol_negotiation(struct hv_device *hdev, 3210 enum pci_protocol_version_t version[], 3211 int num_version) 3212 { 3213 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 3214 struct pci_version_request *version_req; 3215 struct hv_pci_compl comp_pkt; 3216 struct pci_packet *pkt; 3217 int ret; 3218 int i; 3219 3220 /* 3221 * Initiate the handshake with the host and negotiate 3222 * a version that the host can support. We start with the 3223 * highest version number and go down if the host cannot 3224 * support it. 3225 */ 3226 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL); 3227 if (!pkt) 3228 return -ENOMEM; 3229 3230 init_completion(&comp_pkt.host_event); 3231 pkt->completion_func = hv_pci_generic_compl; 3232 pkt->compl_ctxt = &comp_pkt; 3233 version_req = (struct pci_version_request *)(pkt + 1); 3234 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; 3235 3236 for (i = 0; i < num_version; i++) { 3237 version_req->protocol_version = version[i]; 3238 ret = vmbus_sendpacket(hdev->channel, version_req, 3239 sizeof(struct pci_version_request), 3240 (unsigned long)pkt, VM_PKT_DATA_INBAND, 3241 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 3242 if (!ret) 3243 ret = wait_for_response(hdev, &comp_pkt.host_event); 3244 3245 if (ret) { 3246 dev_err(&hdev->device, 3247 "PCI Pass-through VSP failed to request version: %d", 3248 ret); 3249 goto exit; 3250 } 3251 3252 if (comp_pkt.completion_status >= 0) { 3253 hbus->protocol_version = version[i]; 3254 dev_info(&hdev->device, 3255 "PCI VMBus probing: Using version %#x\n", 3256 hbus->protocol_version); 3257 goto exit; 3258 } 3259 3260 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { 3261 dev_err(&hdev->device, 3262 "PCI Pass-through VSP failed version request: %#x", 3263 comp_pkt.completion_status); 3264 ret = -EPROTO; 3265 goto exit; 3266 } 3267 3268 reinit_completion(&comp_pkt.host_event); 3269 } 3270 3271 dev_err(&hdev->device, 3272 "PCI pass-through VSP failed to find supported version"); 3273 ret = -EPROTO; 3274 3275 exit: 3276 kfree(pkt); 3277 return ret; 3278 } 3279 3280 /** 3281 * hv_pci_free_bridge_windows() - Release memory regions for the 3282 * bus 3283 * @hbus: Root PCI bus, as understood by this driver 3284 */ 3285 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) 3286 { 3287 /* 3288 * Set the resources back to the way they looked when they 3289 * were allocated by setting IORESOURCE_BUSY again. 3290 */ 3291 3292 if (hbus->low_mmio_space && hbus->low_mmio_res) { 3293 hbus->low_mmio_res->flags |= IORESOURCE_BUSY; 3294 vmbus_free_mmio(hbus->low_mmio_res->start, 3295 resource_size(hbus->low_mmio_res)); 3296 } 3297 3298 if (hbus->high_mmio_space && hbus->high_mmio_res) { 3299 hbus->high_mmio_res->flags |= IORESOURCE_BUSY; 3300 vmbus_free_mmio(hbus->high_mmio_res->start, 3301 resource_size(hbus->high_mmio_res)); 3302 } 3303 } 3304 3305 /** 3306 * hv_pci_allocate_bridge_windows() - Allocate memory regions 3307 * for the bus 3308 * @hbus: Root PCI bus, as understood by this driver 3309 * 3310 * This function calls vmbus_allocate_mmio(), which is itself a 3311 * bit of a compromise. Ideally, we might change the pnp layer 3312 * in the kernel such that it comprehends either PCI devices 3313 * which are "grandchildren of ACPI," with some intermediate bus 3314 * node (in this case, VMBus) or change it such that it 3315 * understands VMBus. The pnp layer, however, has been declared 3316 * deprecated, and not subject to change. 3317 * 3318 * The workaround, implemented here, is to ask VMBus to allocate 3319 * MMIO space for this bus. VMBus itself knows which ranges are 3320 * appropriate by looking at its own ACPI objects. Then, after 3321 * these ranges are claimed, they're modified to look like they 3322 * would have looked if the ACPI and pnp code had allocated 3323 * bridge windows. These descriptors have to exist in this form 3324 * in order to satisfy the code which will get invoked when the 3325 * endpoint PCI function driver calls request_mem_region() or 3326 * request_mem_region_exclusive(). 3327 * 3328 * Return: 0 on success, -errno on failure 3329 */ 3330 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) 3331 { 3332 resource_size_t align; 3333 int ret; 3334 3335 if (hbus->low_mmio_space) { 3336 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); 3337 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0, 3338 (u64)(u32)0xffffffff, 3339 hbus->low_mmio_space, 3340 align, false); 3341 if (ret) { 3342 dev_err(&hbus->hdev->device, 3343 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n", 3344 hbus->low_mmio_space); 3345 return ret; 3346 } 3347 3348 /* Modify this resource to become a bridge window. */ 3349 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW; 3350 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY; 3351 pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res); 3352 } 3353 3354 if (hbus->high_mmio_space) { 3355 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space)); 3356 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev, 3357 0x100000000, -1, 3358 hbus->high_mmio_space, align, 3359 false); 3360 if (ret) { 3361 dev_err(&hbus->hdev->device, 3362 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n", 3363 hbus->high_mmio_space); 3364 goto release_low_mmio; 3365 } 3366 3367 /* Modify this resource to become a bridge window. */ 3368 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW; 3369 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY; 3370 pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res); 3371 } 3372 3373 return 0; 3374 3375 release_low_mmio: 3376 if (hbus->low_mmio_res) { 3377 vmbus_free_mmio(hbus->low_mmio_res->start, 3378 resource_size(hbus->low_mmio_res)); 3379 } 3380 3381 return ret; 3382 } 3383 3384 /** 3385 * hv_allocate_config_window() - Find MMIO space for PCI Config 3386 * @hbus: Root PCI bus, as understood by this driver 3387 * 3388 * This function claims memory-mapped I/O space for accessing 3389 * configuration space for the functions on this bus. 3390 * 3391 * Return: 0 on success, -errno on failure 3392 */ 3393 static int hv_allocate_config_window(struct hv_pcibus_device *hbus) 3394 { 3395 int ret; 3396 3397 /* 3398 * Set up a region of MMIO space to use for accessing configuration 3399 * space. 3400 */ 3401 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1, 3402 PCI_CONFIG_MMIO_LENGTH, 0x1000, false); 3403 if (ret) 3404 return ret; 3405 3406 /* 3407 * vmbus_allocate_mmio() gets used for allocating both device endpoint 3408 * resource claims (those which cannot be overlapped) and the ranges 3409 * which are valid for the children of this bus, which are intended 3410 * to be overlapped by those children. Set the flag on this claim 3411 * meaning that this region can't be overlapped. 3412 */ 3413 3414 hbus->mem_config->flags |= IORESOURCE_BUSY; 3415 3416 return 0; 3417 } 3418 3419 static void hv_free_config_window(struct hv_pcibus_device *hbus) 3420 { 3421 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); 3422 } 3423 3424 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs); 3425 3426 /** 3427 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state 3428 * @hdev: VMBus's tracking struct for this root PCI bus 3429 * 3430 * Return: 0 on success, -errno on failure 3431 */ 3432 static int hv_pci_enter_d0(struct hv_device *hdev) 3433 { 3434 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 3435 struct pci_bus_d0_entry *d0_entry; 3436 struct hv_pci_compl comp_pkt; 3437 struct pci_packet *pkt; 3438 bool retry = true; 3439 int ret; 3440 3441 enter_d0_retry: 3442 /* 3443 * Tell the host that the bus is ready to use, and moved into the 3444 * powered-on state. This includes telling the host which region 3445 * of memory-mapped I/O space has been chosen for configuration space 3446 * access. 3447 */ 3448 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL); 3449 if (!pkt) 3450 return -ENOMEM; 3451 3452 init_completion(&comp_pkt.host_event); 3453 pkt->completion_func = hv_pci_generic_compl; 3454 pkt->compl_ctxt = &comp_pkt; 3455 d0_entry = (struct pci_bus_d0_entry *)(pkt + 1); 3456 d0_entry->message_type.type = PCI_BUS_D0ENTRY; 3457 d0_entry->mmio_base = hbus->mem_config->start; 3458 3459 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), 3460 (unsigned long)pkt, VM_PKT_DATA_INBAND, 3461 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 3462 if (!ret) 3463 ret = wait_for_response(hdev, &comp_pkt.host_event); 3464 3465 if (ret) 3466 goto exit; 3467 3468 /* 3469 * In certain case (Kdump) the pci device of interest was 3470 * not cleanly shut down and resource is still held on host 3471 * side, the host could return invalid device status. 3472 * We need to explicitly request host to release the resource 3473 * and try to enter D0 again. 3474 */ 3475 if (comp_pkt.completion_status < 0 && retry) { 3476 retry = false; 3477 3478 dev_err(&hdev->device, "Retrying D0 Entry\n"); 3479 3480 /* 3481 * Hv_pci_bus_exit() calls hv_send_resource_released() 3482 * to free up resources of its child devices. 3483 * In the kdump kernel we need to set the 3484 * wslot_res_allocated to 255 so it scans all child 3485 * devices to release resources allocated in the 3486 * normal kernel before panic happened. 3487 */ 3488 hbus->wslot_res_allocated = 255; 3489 3490 ret = hv_pci_bus_exit(hdev, true); 3491 3492 if (ret == 0) { 3493 kfree(pkt); 3494 goto enter_d0_retry; 3495 } 3496 dev_err(&hdev->device, 3497 "Retrying D0 failed with ret %d\n", ret); 3498 } 3499 3500 if (comp_pkt.completion_status < 0) { 3501 dev_err(&hdev->device, 3502 "PCI Pass-through VSP failed D0 Entry with status %x\n", 3503 comp_pkt.completion_status); 3504 ret = -EPROTO; 3505 goto exit; 3506 } 3507 3508 ret = 0; 3509 3510 exit: 3511 kfree(pkt); 3512 return ret; 3513 } 3514 3515 /** 3516 * hv_pci_query_relations() - Ask host to send list of child 3517 * devices 3518 * @hdev: VMBus's tracking struct for this root PCI bus 3519 * 3520 * Return: 0 on success, -errno on failure 3521 */ 3522 static int hv_pci_query_relations(struct hv_device *hdev) 3523 { 3524 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 3525 struct pci_message message; 3526 struct completion comp; 3527 int ret; 3528 3529 /* Ask the host to send along the list of child devices */ 3530 init_completion(&comp); 3531 if (cmpxchg(&hbus->survey_event, NULL, &comp)) 3532 return -ENOTEMPTY; 3533 3534 memset(&message, 0, sizeof(message)); 3535 message.type = PCI_QUERY_BUS_RELATIONS; 3536 3537 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), 3538 0, VM_PKT_DATA_INBAND, 0); 3539 if (!ret) 3540 ret = wait_for_response(hdev, &comp); 3541 3542 /* 3543 * In the case of fast device addition/removal, it's possible that 3544 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we 3545 * already got a PCI_BUS_RELATIONS* message from the host and the 3546 * channel callback already scheduled a work to hbus->wq, which can be 3547 * running pci_devices_present_work() -> survey_child_resources() -> 3548 * complete(&hbus->survey_event), even after hv_pci_query_relations() 3549 * exits and the stack variable 'comp' is no longer valid; as a result, 3550 * a hang or a page fault may happen when the complete() calls 3551 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from 3552 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is 3553 * -ENODEV, there can't be any more work item scheduled to hbus->wq 3554 * after the flush_workqueue(): see vmbus_onoffer_rescind() -> 3555 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() -> 3556 * channel->rescind = true. 3557 */ 3558 flush_workqueue(hbus->wq); 3559 3560 return ret; 3561 } 3562 3563 /** 3564 * hv_send_resources_allocated() - Report local resource choices 3565 * @hdev: VMBus's tracking struct for this root PCI bus 3566 * 3567 * The host OS is expecting to be sent a request as a message 3568 * which contains all the resources that the device will use. 3569 * The response contains those same resources, "translated" 3570 * which is to say, the values which should be used by the 3571 * hardware, when it delivers an interrupt. (MMIO resources are 3572 * used in local terms.) This is nice for Windows, and lines up 3573 * with the FDO/PDO split, which doesn't exist in Linux. Linux 3574 * is deeply expecting to scan an emulated PCI configuration 3575 * space. So this message is sent here only to drive the state 3576 * machine on the host forward. 3577 * 3578 * Return: 0 on success, -errno on failure 3579 */ 3580 static int hv_send_resources_allocated(struct hv_device *hdev) 3581 { 3582 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 3583 struct pci_resources_assigned *res_assigned; 3584 struct pci_resources_assigned2 *res_assigned2; 3585 struct hv_pci_compl comp_pkt; 3586 struct hv_pci_dev *hpdev; 3587 struct pci_packet *pkt; 3588 size_t size_res; 3589 int wslot; 3590 int ret; 3591 3592 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) 3593 ? sizeof(*res_assigned) : sizeof(*res_assigned2); 3594 3595 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); 3596 if (!pkt) 3597 return -ENOMEM; 3598 3599 ret = 0; 3600 3601 for (wslot = 0; wslot < 256; wslot++) { 3602 hpdev = get_pcichild_wslot(hbus, wslot); 3603 if (!hpdev) 3604 continue; 3605 3606 memset(pkt, 0, sizeof(*pkt) + size_res); 3607 init_completion(&comp_pkt.host_event); 3608 pkt->completion_func = hv_pci_generic_compl; 3609 pkt->compl_ctxt = &comp_pkt; 3610 3611 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) { 3612 res_assigned = 3613 (struct pci_resources_assigned *)(pkt + 1); 3614 res_assigned->message_type.type = 3615 PCI_RESOURCES_ASSIGNED; 3616 res_assigned->wslot.slot = hpdev->desc.win_slot.slot; 3617 } else { 3618 res_assigned2 = 3619 (struct pci_resources_assigned2 *)(pkt + 1); 3620 res_assigned2->message_type.type = 3621 PCI_RESOURCES_ASSIGNED2; 3622 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; 3623 } 3624 put_pcichild(hpdev); 3625 3626 ret = vmbus_sendpacket(hdev->channel, pkt + 1, 3627 size_res, (unsigned long)pkt, 3628 VM_PKT_DATA_INBAND, 3629 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 3630 if (!ret) 3631 ret = wait_for_response(hdev, &comp_pkt.host_event); 3632 if (ret) 3633 break; 3634 3635 if (comp_pkt.completion_status < 0) { 3636 ret = -EPROTO; 3637 dev_err(&hdev->device, 3638 "resource allocated returned 0x%x", 3639 comp_pkt.completion_status); 3640 break; 3641 } 3642 3643 hbus->wslot_res_allocated = wslot; 3644 } 3645 3646 kfree(pkt); 3647 return ret; 3648 } 3649 3650 /** 3651 * hv_send_resources_released() - Report local resources 3652 * released 3653 * @hdev: VMBus's tracking struct for this root PCI bus 3654 * 3655 * Return: 0 on success, -errno on failure 3656 */ 3657 static int hv_send_resources_released(struct hv_device *hdev) 3658 { 3659 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 3660 struct pci_child_message pkt; 3661 struct hv_pci_dev *hpdev; 3662 int wslot; 3663 int ret; 3664 3665 for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) { 3666 hpdev = get_pcichild_wslot(hbus, wslot); 3667 if (!hpdev) 3668 continue; 3669 3670 memset(&pkt, 0, sizeof(pkt)); 3671 pkt.message_type.type = PCI_RESOURCES_RELEASED; 3672 pkt.wslot.slot = hpdev->desc.win_slot.slot; 3673 3674 put_pcichild(hpdev); 3675 3676 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0, 3677 VM_PKT_DATA_INBAND, 0); 3678 if (ret) 3679 return ret; 3680 3681 hbus->wslot_res_allocated = wslot - 1; 3682 } 3683 3684 hbus->wslot_res_allocated = -1; 3685 3686 return 0; 3687 } 3688 3689 #define HVPCI_DOM_MAP_SIZE (64 * 1024) 3690 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE); 3691 3692 /* 3693 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0 3694 * as invalid for passthrough PCI devices of this driver. 3695 */ 3696 #define HVPCI_DOM_INVALID 0 3697 3698 /** 3699 * hv_get_dom_num() - Get a valid PCI domain number 3700 * Check if the PCI domain number is in use, and return another number if 3701 * it is in use. 3702 * 3703 * @dom: Requested domain number 3704 * 3705 * return: domain number on success, HVPCI_DOM_INVALID on failure 3706 */ 3707 static u16 hv_get_dom_num(u16 dom) 3708 { 3709 unsigned int i; 3710 3711 if (test_and_set_bit(dom, hvpci_dom_map) == 0) 3712 return dom; 3713 3714 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) { 3715 if (test_and_set_bit(i, hvpci_dom_map) == 0) 3716 return i; 3717 } 3718 3719 return HVPCI_DOM_INVALID; 3720 } 3721 3722 /** 3723 * hv_put_dom_num() - Mark the PCI domain number as free 3724 * @dom: Domain number to be freed 3725 */ 3726 static void hv_put_dom_num(u16 dom) 3727 { 3728 clear_bit(dom, hvpci_dom_map); 3729 } 3730 3731 /** 3732 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus 3733 * @hdev: VMBus's tracking struct for this root PCI bus 3734 * @dev_id: Identifies the device itself 3735 * 3736 * Return: 0 on success, -errno on failure 3737 */ 3738 static int hv_pci_probe(struct hv_device *hdev, 3739 const struct hv_vmbus_device_id *dev_id) 3740 { 3741 struct pci_host_bridge *bridge; 3742 struct hv_pcibus_device *hbus; 3743 u16 dom_req, dom; 3744 char *name; 3745 int ret; 3746 3747 bridge = devm_pci_alloc_host_bridge(&hdev->device, 0); 3748 if (!bridge) 3749 return -ENOMEM; 3750 3751 hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); 3752 if (!hbus) 3753 return -ENOMEM; 3754 3755 hbus->bridge = bridge; 3756 mutex_init(&hbus->state_lock); 3757 hbus->state = hv_pcibus_init; 3758 hbus->wslot_res_allocated = -1; 3759 3760 /* 3761 * The PCI bus "domain" is what is called "segment" in ACPI and other 3762 * specs. Pull it from the instance ID, to get something usually 3763 * unique. In rare cases of collision, we will find out another number 3764 * not in use. 3765 * 3766 * Note that, since this code only runs in a Hyper-V VM, Hyper-V 3767 * together with this guest driver can guarantee that (1) The only 3768 * domain used by Gen1 VMs for something that looks like a physical 3769 * PCI bus (which is actually emulated by the hypervisor) is domain 0. 3770 * (2) There will be no overlap between domains (after fixing possible 3771 * collisions) in the same VM. 3772 */ 3773 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4]; 3774 dom = hv_get_dom_num(dom_req); 3775 3776 if (dom == HVPCI_DOM_INVALID) { 3777 dev_err(&hdev->device, 3778 "Unable to use dom# 0x%x or other numbers", dom_req); 3779 ret = -EINVAL; 3780 goto free_bus; 3781 } 3782 3783 if (dom != dom_req) 3784 dev_info(&hdev->device, 3785 "PCI dom# 0x%x has collision, using 0x%x", 3786 dom_req, dom); 3787 3788 hbus->bridge->domain_nr = dom; 3789 #ifdef CONFIG_X86 3790 hbus->sysdata.domain = dom; 3791 hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS); 3792 #elif defined(CONFIG_ARM64) 3793 /* 3794 * Set the PCI bus parent to be the corresponding VMbus 3795 * device. Then the VMbus device will be assigned as the 3796 * ACPI companion in pcibios_root_bridge_prepare() and 3797 * pci_dma_configure() will propagate device coherence 3798 * information to devices created on the bus. 3799 */ 3800 hbus->sysdata.parent = hdev->device.parent; 3801 hbus->use_calls = false; 3802 #endif 3803 3804 hbus->hdev = hdev; 3805 INIT_LIST_HEAD(&hbus->children); 3806 INIT_LIST_HEAD(&hbus->dr_list); 3807 spin_lock_init(&hbus->config_lock); 3808 spin_lock_init(&hbus->device_list_lock); 3809 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, 3810 hbus->bridge->domain_nr); 3811 if (!hbus->wq) { 3812 ret = -ENOMEM; 3813 goto free_dom; 3814 } 3815 3816 hdev->channel->next_request_id_callback = vmbus_next_request_id; 3817 hdev->channel->request_addr_callback = vmbus_request_addr; 3818 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; 3819 3820 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, 3821 hv_pci_onchannelcallback, hbus); 3822 if (ret) 3823 goto destroy_wq; 3824 3825 hv_set_drvdata(hdev, hbus); 3826 3827 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions, 3828 ARRAY_SIZE(pci_protocol_versions)); 3829 if (ret) 3830 goto close; 3831 3832 ret = hv_allocate_config_window(hbus); 3833 if (ret) 3834 goto close; 3835 3836 hbus->cfg_addr = ioremap(hbus->mem_config->start, 3837 PCI_CONFIG_MMIO_LENGTH); 3838 if (!hbus->cfg_addr) { 3839 dev_err(&hdev->device, 3840 "Unable to map a virtual address for config space\n"); 3841 ret = -ENOMEM; 3842 goto free_config; 3843 } 3844 3845 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance); 3846 if (!name) { 3847 ret = -ENOMEM; 3848 goto unmap; 3849 } 3850 3851 hbus->fwnode = irq_domain_alloc_named_fwnode(name); 3852 kfree(name); 3853 if (!hbus->fwnode) { 3854 ret = -ENOMEM; 3855 goto unmap; 3856 } 3857 3858 ret = hv_pcie_init_irq_domain(hbus); 3859 if (ret) 3860 goto free_fwnode; 3861 3862 ret = hv_pci_query_relations(hdev); 3863 if (ret) 3864 goto free_irq_domain; 3865 3866 mutex_lock(&hbus->state_lock); 3867 3868 ret = hv_pci_enter_d0(hdev); 3869 if (ret) 3870 goto release_state_lock; 3871 3872 ret = hv_pci_allocate_bridge_windows(hbus); 3873 if (ret) 3874 goto exit_d0; 3875 3876 ret = hv_send_resources_allocated(hdev); 3877 if (ret) 3878 goto free_windows; 3879 3880 prepopulate_bars(hbus); 3881 3882 hbus->state = hv_pcibus_probed; 3883 3884 ret = create_root_hv_pci_bus(hbus); 3885 if (ret) 3886 goto free_windows; 3887 3888 mutex_unlock(&hbus->state_lock); 3889 return 0; 3890 3891 free_windows: 3892 hv_pci_free_bridge_windows(hbus); 3893 exit_d0: 3894 (void) hv_pci_bus_exit(hdev, true); 3895 release_state_lock: 3896 mutex_unlock(&hbus->state_lock); 3897 free_irq_domain: 3898 irq_domain_remove(hbus->irq_domain); 3899 free_fwnode: 3900 irq_domain_free_fwnode(hbus->fwnode); 3901 unmap: 3902 iounmap(hbus->cfg_addr); 3903 free_config: 3904 hv_free_config_window(hbus); 3905 close: 3906 vmbus_close(hdev->channel); 3907 destroy_wq: 3908 destroy_workqueue(hbus->wq); 3909 free_dom: 3910 hv_put_dom_num(hbus->bridge->domain_nr); 3911 free_bus: 3912 kfree(hbus); 3913 return ret; 3914 } 3915 3916 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) 3917 { 3918 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 3919 struct vmbus_channel *chan = hdev->channel; 3920 struct { 3921 struct pci_packet teardown_packet; 3922 u8 buffer[sizeof(struct pci_message)]; 3923 } pkt; 3924 struct pci_message *msg; 3925 struct hv_pci_compl comp_pkt; 3926 struct hv_pci_dev *hpdev, *tmp; 3927 unsigned long flags; 3928 u64 trans_id; 3929 int ret; 3930 3931 /* 3932 * After the host sends the RESCIND_CHANNEL message, it doesn't 3933 * access the per-channel ringbuffer any longer. 3934 */ 3935 if (chan->rescind) 3936 return 0; 3937 3938 if (!keep_devs) { 3939 struct list_head removed; 3940 3941 /* Move all present children to the list on stack */ 3942 INIT_LIST_HEAD(&removed); 3943 spin_lock_irqsave(&hbus->device_list_lock, flags); 3944 list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) 3945 list_move_tail(&hpdev->list_entry, &removed); 3946 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 3947 3948 /* Remove all children in the list */ 3949 list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) { 3950 list_del(&hpdev->list_entry); 3951 if (hpdev->pci_slot) 3952 pci_destroy_slot(hpdev->pci_slot); 3953 /* For the two refs got in new_pcichild_device() */ 3954 put_pcichild(hpdev); 3955 put_pcichild(hpdev); 3956 } 3957 } 3958 3959 ret = hv_send_resources_released(hdev); 3960 if (ret) { 3961 dev_err(&hdev->device, 3962 "Couldn't send resources released packet(s)\n"); 3963 return ret; 3964 } 3965 3966 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); 3967 init_completion(&comp_pkt.host_event); 3968 pkt.teardown_packet.completion_func = hv_pci_generic_compl; 3969 pkt.teardown_packet.compl_ctxt = &comp_pkt; 3970 msg = (struct pci_message *)pkt.buffer; 3971 msg->type = PCI_BUS_D0EXIT; 3972 3973 ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg), 3974 (unsigned long)&pkt.teardown_packet, 3975 &trans_id, VM_PKT_DATA_INBAND, 3976 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 3977 if (ret) 3978 return ret; 3979 3980 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) { 3981 /* 3982 * The completion packet on the stack becomes invalid after 3983 * 'return'; remove the ID from the VMbus requestor if the 3984 * identifier is still mapped to/associated with the packet. 3985 * 3986 * Cf. hv_pci_onchannelcallback(). 3987 */ 3988 vmbus_request_addr_match(chan, trans_id, 3989 (unsigned long)&pkt.teardown_packet); 3990 return -ETIMEDOUT; 3991 } 3992 3993 return 0; 3994 } 3995 3996 /** 3997 * hv_pci_remove() - Remove routine for this VMBus channel 3998 * @hdev: VMBus's tracking struct for this root PCI bus 3999 */ 4000 static void hv_pci_remove(struct hv_device *hdev) 4001 { 4002 struct hv_pcibus_device *hbus; 4003 4004 hbus = hv_get_drvdata(hdev); 4005 if (hbus->state == hv_pcibus_installed) { 4006 tasklet_disable(&hdev->channel->callback_event); 4007 hbus->state = hv_pcibus_removing; 4008 tasklet_enable(&hdev->channel->callback_event); 4009 destroy_workqueue(hbus->wq); 4010 hbus->wq = NULL; 4011 /* 4012 * At this point, no work is running or can be scheduled 4013 * on hbus-wq. We can't race with hv_pci_devices_present() 4014 * or hv_pci_eject_device(), it's safe to proceed. 4015 */ 4016 4017 /* Remove the bus from PCI's point of view. */ 4018 pci_lock_rescan_remove(); 4019 pci_stop_root_bus(hbus->bridge->bus); 4020 hv_pci_remove_slots(hbus); 4021 pci_remove_root_bus(hbus->bridge->bus); 4022 pci_unlock_rescan_remove(); 4023 } 4024 4025 hv_pci_bus_exit(hdev, false); 4026 4027 vmbus_close(hdev->channel); 4028 4029 iounmap(hbus->cfg_addr); 4030 hv_free_config_window(hbus); 4031 hv_pci_free_bridge_windows(hbus); 4032 irq_domain_remove(hbus->irq_domain); 4033 irq_domain_free_fwnode(hbus->fwnode); 4034 4035 hv_put_dom_num(hbus->bridge->domain_nr); 4036 4037 kfree(hbus); 4038 } 4039 4040 static int hv_pci_suspend(struct hv_device *hdev) 4041 { 4042 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 4043 enum hv_pcibus_state old_state; 4044 int ret; 4045 4046 /* 4047 * hv_pci_suspend() must make sure there are no pending work items 4048 * before calling vmbus_close(), since it runs in a process context 4049 * as a callback in dpm_suspend(). When it starts to run, the channel 4050 * callback hv_pci_onchannelcallback(), which runs in a tasklet 4051 * context, can be still running concurrently and scheduling new work 4052 * items onto hbus->wq in hv_pci_devices_present() and 4053 * hv_pci_eject_device(), and the work item handlers can access the 4054 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g. 4055 * the work item handler pci_devices_present_work() -> 4056 * new_pcichild_device() writes to the vmbus channel. 4057 * 4058 * To eliminate the race, hv_pci_suspend() disables the channel 4059 * callback tasklet, sets hbus->state to hv_pcibus_removing, and 4060 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds, 4061 * it knows that no new work item can be scheduled, and then it flushes 4062 * hbus->wq and safely closes the vmbus channel. 4063 */ 4064 tasklet_disable(&hdev->channel->callback_event); 4065 4066 /* Change the hbus state to prevent new work items. */ 4067 old_state = hbus->state; 4068 if (hbus->state == hv_pcibus_installed) 4069 hbus->state = hv_pcibus_removing; 4070 4071 tasklet_enable(&hdev->channel->callback_event); 4072 4073 if (old_state != hv_pcibus_installed) 4074 return -EINVAL; 4075 4076 flush_workqueue(hbus->wq); 4077 4078 ret = hv_pci_bus_exit(hdev, true); 4079 if (ret) 4080 return ret; 4081 4082 vmbus_close(hdev->channel); 4083 4084 return 0; 4085 } 4086 4087 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) 4088 { 4089 struct irq_data *irq_data; 4090 struct msi_desc *entry; 4091 4092 if (!pdev->msi_enabled && !pdev->msix_enabled) 4093 return 0; 4094 4095 guard(msi_descs_lock)(&pdev->dev); 4096 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { 4097 irq_data = irq_get_irq_data(entry->irq); 4098 if (WARN_ON_ONCE(!irq_data)) 4099 return -EINVAL; 4100 hv_compose_msi_msg(irq_data, &entry->msg); 4101 } 4102 return 0; 4103 } 4104 4105 /* 4106 * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg() 4107 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V 4108 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg() 4109 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping 4110 * Table entries. 4111 */ 4112 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus) 4113 { 4114 pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL); 4115 } 4116 4117 static int hv_pci_resume(struct hv_device *hdev) 4118 { 4119 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 4120 enum pci_protocol_version_t version[1]; 4121 int ret; 4122 4123 hbus->state = hv_pcibus_init; 4124 4125 hdev->channel->next_request_id_callback = vmbus_next_request_id; 4126 hdev->channel->request_addr_callback = vmbus_request_addr; 4127 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; 4128 4129 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, 4130 hv_pci_onchannelcallback, hbus); 4131 if (ret) 4132 return ret; 4133 4134 /* Only use the version that was in use before hibernation. */ 4135 version[0] = hbus->protocol_version; 4136 ret = hv_pci_protocol_negotiation(hdev, version, 1); 4137 if (ret) 4138 goto out; 4139 4140 ret = hv_pci_query_relations(hdev); 4141 if (ret) 4142 goto out; 4143 4144 mutex_lock(&hbus->state_lock); 4145 4146 ret = hv_pci_enter_d0(hdev); 4147 if (ret) 4148 goto release_state_lock; 4149 4150 ret = hv_send_resources_allocated(hdev); 4151 if (ret) 4152 goto release_state_lock; 4153 4154 prepopulate_bars(hbus); 4155 4156 hv_pci_restore_msi_state(hbus); 4157 4158 hbus->state = hv_pcibus_installed; 4159 mutex_unlock(&hbus->state_lock); 4160 return 0; 4161 4162 release_state_lock: 4163 mutex_unlock(&hbus->state_lock); 4164 out: 4165 vmbus_close(hdev->channel); 4166 return ret; 4167 } 4168 4169 static const struct hv_vmbus_device_id hv_pci_id_table[] = { 4170 /* PCI Pass-through Class ID */ 4171 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */ 4172 { HV_PCIE_GUID, }, 4173 { }, 4174 }; 4175 4176 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table); 4177 4178 static struct hv_driver hv_pci_drv = { 4179 .name = "hv_pci", 4180 .id_table = hv_pci_id_table, 4181 .probe = hv_pci_probe, 4182 .remove = hv_pci_remove, 4183 .suspend = hv_pci_suspend, 4184 .resume = hv_pci_resume, 4185 }; 4186 4187 static void __exit exit_hv_pci_drv(void) 4188 { 4189 vmbus_driver_unregister(&hv_pci_drv); 4190 4191 hvpci_block_ops.read_block = NULL; 4192 hvpci_block_ops.write_block = NULL; 4193 hvpci_block_ops.reg_blk_invalidate = NULL; 4194 } 4195 4196 static int __init init_hv_pci_drv(void) 4197 { 4198 int ret; 4199 4200 if (!hv_is_hyperv_initialized()) 4201 return -ENODEV; 4202 4203 ret = hv_pci_irqchip_init(); 4204 if (ret) 4205 return ret; 4206 4207 /* Set the invalid domain number's bit, so it will not be used */ 4208 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map); 4209 4210 /* Initialize PCI block r/w interface */ 4211 hvpci_block_ops.read_block = hv_read_config_block; 4212 hvpci_block_ops.write_block = hv_write_config_block; 4213 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate; 4214 4215 return vmbus_driver_register(&hv_pci_drv); 4216 } 4217 4218 module_init(init_hv_pci_drv); 4219 module_exit(exit_hv_pci_drv); 4220 4221 MODULE_DESCRIPTION("Hyper-V PCI"); 4222 MODULE_LICENSE("GPL v2"); 4223