1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) Microsoft Corporation.
4 *
5 * Author:
6 * Jake Oshins <jakeo@microsoft.com>
7 *
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
15 *
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
21 *
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
31 *
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
38 */
39
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/msi.h>
48 #include <linux/hyperv.h>
49 #include <linux/refcount.h>
50 #include <linux/irqdomain.h>
51 #include <linux/acpi.h>
52 #include <linux/sizes.h>
53 #include <linux/of_irq.h>
54 #include <asm/mshyperv.h>
55
56 /*
57 * Protocol versions. The low word is the minor version, the high word the
58 * major version.
59 */
60
61 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
62 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
63 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
64
65 enum pci_protocol_version_t {
66 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
67 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
68 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
69 PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
70 };
71
72 #define CPU_AFFINITY_ALL -1ULL
73
74 /*
75 * Supported protocol versions in the order of probing - highest go
76 * first.
77 */
78 static enum pci_protocol_version_t pci_protocol_versions[] = {
79 PCI_PROTOCOL_VERSION_1_4,
80 PCI_PROTOCOL_VERSION_1_3,
81 PCI_PROTOCOL_VERSION_1_2,
82 PCI_PROTOCOL_VERSION_1_1,
83 };
84
85 #define PCI_CONFIG_MMIO_LENGTH 0x2000
86 #define CFG_PAGE_OFFSET 0x1000
87 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
88
89 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
90
91 #define STATUS_REVISION_MISMATCH 0xC0000059
92
93 /* space for 32bit serial number as string */
94 #define SLOT_NAME_SIZE 11
95
96 /*
97 * Size of requestor for VMbus; the value is based on the observation
98 * that having more than one request outstanding is 'rare', and so 64
99 * should be generous in ensuring that we don't ever run out.
100 */
101 #define HV_PCI_RQSTOR_SIZE 64
102
103 /*
104 * Message Types
105 */
106
107 enum pci_message_type {
108 /*
109 * Version 1.1
110 */
111 PCI_MESSAGE_BASE = 0x42490000,
112 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
113 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
114 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
115 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
116 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
117 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
118 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
119 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
120 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
121 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
122 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
123 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
124 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
125 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
126 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
127 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
128 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
129 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
130 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
131 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
132 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
133 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
134 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
135 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
136 PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
137 PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
138 PCI_MESSAGE_MAXIMUM
139 };
140
141 /*
142 * Structures defining the virtual PCI Express protocol.
143 */
144
145 union pci_version {
146 struct {
147 u16 minor_version;
148 u16 major_version;
149 } parts;
150 u32 version;
151 } __packed;
152
153 /*
154 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
155 * which is all this driver does. This representation is the one used in
156 * Windows, which is what is expected when sending this back and forth with
157 * the Hyper-V parent partition.
158 */
159 union win_slot_encoding {
160 struct {
161 u32 dev:5;
162 u32 func:3;
163 u32 reserved:24;
164 } bits;
165 u32 slot;
166 } __packed;
167
168 /*
169 * Pretty much as defined in the PCI Specifications.
170 */
171 struct pci_function_description {
172 u16 v_id; /* vendor ID */
173 u16 d_id; /* device ID */
174 u8 rev;
175 u8 prog_intf;
176 u8 subclass;
177 u8 base_class;
178 u32 subsystem_id;
179 union win_slot_encoding win_slot;
180 u32 ser; /* serial number */
181 } __packed;
182
183 enum pci_device_description_flags {
184 HV_PCI_DEVICE_FLAG_NONE = 0x0,
185 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
186 };
187
188 struct pci_function_description2 {
189 u16 v_id; /* vendor ID */
190 u16 d_id; /* device ID */
191 u8 rev;
192 u8 prog_intf;
193 u8 subclass;
194 u8 base_class;
195 u32 subsystem_id;
196 union win_slot_encoding win_slot;
197 u32 ser; /* serial number */
198 u32 flags;
199 u16 virtual_numa_node;
200 u16 reserved;
201 } __packed;
202
203 /**
204 * struct hv_msi_desc
205 * @vector: IDT entry
206 * @delivery_mode: As defined in Intel's Programmer's
207 * Reference Manual, Volume 3, Chapter 8.
208 * @vector_count: Number of contiguous entries in the
209 * Interrupt Descriptor Table that are
210 * occupied by this Message-Signaled
211 * Interrupt. For "MSI", as first defined
212 * in PCI 2.2, this can be between 1 and
213 * 32. For "MSI-X," as first defined in PCI
214 * 3.0, this must be 1, as each MSI-X table
215 * entry would have its own descriptor.
216 * @reserved: Empty space
217 * @cpu_mask: All the target virtual processors.
218 */
219 struct hv_msi_desc {
220 u8 vector;
221 u8 delivery_mode;
222 u16 vector_count;
223 u32 reserved;
224 u64 cpu_mask;
225 } __packed;
226
227 /**
228 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
229 * @vector: IDT entry
230 * @delivery_mode: As defined in Intel's Programmer's
231 * Reference Manual, Volume 3, Chapter 8.
232 * @vector_count: Number of contiguous entries in the
233 * Interrupt Descriptor Table that are
234 * occupied by this Message-Signaled
235 * Interrupt. For "MSI", as first defined
236 * in PCI 2.2, this can be between 1 and
237 * 32. For "MSI-X," as first defined in PCI
238 * 3.0, this must be 1, as each MSI-X table
239 * entry would have its own descriptor.
240 * @processor_count: number of bits enabled in array.
241 * @processor_array: All the target virtual processors.
242 */
243 struct hv_msi_desc2 {
244 u8 vector;
245 u8 delivery_mode;
246 u16 vector_count;
247 u16 processor_count;
248 u16 processor_array[32];
249 } __packed;
250
251 /*
252 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
253 * Everything is the same as in 'hv_msi_desc2' except that the size of the
254 * 'vector' field is larger to support bigger vector values. For ex: LPI
255 * vectors on ARM.
256 */
257 struct hv_msi_desc3 {
258 u32 vector;
259 u8 delivery_mode;
260 u8 reserved;
261 u16 vector_count;
262 u16 processor_count;
263 u16 processor_array[32];
264 } __packed;
265
266 /**
267 * struct tran_int_desc
268 * @reserved: unused, padding
269 * @vector_count: same as in hv_msi_desc
270 * @data: This is the "data payload" value that is
271 * written by the device when it generates
272 * a message-signaled interrupt, either MSI
273 * or MSI-X.
274 * @address: This is the address to which the data
275 * payload is written on interrupt
276 * generation.
277 */
278 struct tran_int_desc {
279 u16 reserved;
280 u16 vector_count;
281 u32 data;
282 u64 address;
283 } __packed;
284
285 /*
286 * A generic message format for virtual PCI.
287 * Specific message formats are defined later in the file.
288 */
289
290 struct pci_message {
291 u32 type;
292 } __packed;
293
294 struct pci_child_message {
295 struct pci_message message_type;
296 union win_slot_encoding wslot;
297 } __packed;
298
299 struct pci_incoming_message {
300 struct vmpacket_descriptor hdr;
301 struct pci_message message_type;
302 } __packed;
303
304 struct pci_response {
305 struct vmpacket_descriptor hdr;
306 s32 status; /* negative values are failures */
307 } __packed;
308
309 struct pci_packet {
310 void (*completion_func)(void *context, struct pci_response *resp,
311 int resp_packet_size);
312 void *compl_ctxt;
313 };
314
315 /*
316 * Specific message types supporting the PCI protocol.
317 */
318
319 /*
320 * Version negotiation message. Sent from the guest to the host.
321 * The guest is free to try different versions until the host
322 * accepts the version.
323 *
324 * pci_version: The protocol version requested.
325 * is_last_attempt: If TRUE, this is the last version guest will request.
326 * reservedz: Reserved field, set to zero.
327 */
328
329 struct pci_version_request {
330 struct pci_message message_type;
331 u32 protocol_version;
332 } __packed;
333
334 /*
335 * Bus D0 Entry. This is sent from the guest to the host when the virtual
336 * bus (PCI Express port) is ready for action.
337 */
338
339 struct pci_bus_d0_entry {
340 struct pci_message message_type;
341 u32 reserved;
342 u64 mmio_base;
343 } __packed;
344
345 struct pci_bus_relations {
346 struct pci_incoming_message incoming;
347 u32 device_count;
348 struct pci_function_description func[];
349 } __packed;
350
351 struct pci_bus_relations2 {
352 struct pci_incoming_message incoming;
353 u32 device_count;
354 struct pci_function_description2 func[];
355 } __packed;
356
357 struct pci_q_res_req_response {
358 struct vmpacket_descriptor hdr;
359 s32 status; /* negative values are failures */
360 u32 probed_bar[PCI_STD_NUM_BARS];
361 } __packed;
362
363 struct pci_set_power {
364 struct pci_message message_type;
365 union win_slot_encoding wslot;
366 u32 power_state; /* In Windows terms */
367 u32 reserved;
368 } __packed;
369
370 struct pci_set_power_response {
371 struct vmpacket_descriptor hdr;
372 s32 status; /* negative values are failures */
373 union win_slot_encoding wslot;
374 u32 resultant_state; /* In Windows terms */
375 u32 reserved;
376 } __packed;
377
378 struct pci_resources_assigned {
379 struct pci_message message_type;
380 union win_slot_encoding wslot;
381 u8 memory_range[0x14][6]; /* not used here */
382 u32 msi_descriptors;
383 u32 reserved[4];
384 } __packed;
385
386 struct pci_resources_assigned2 {
387 struct pci_message message_type;
388 union win_slot_encoding wslot;
389 u8 memory_range[0x14][6]; /* not used here */
390 u32 msi_descriptor_count;
391 u8 reserved[70];
392 } __packed;
393
394 struct pci_create_interrupt {
395 struct pci_message message_type;
396 union win_slot_encoding wslot;
397 struct hv_msi_desc int_desc;
398 } __packed;
399
400 struct pci_create_int_response {
401 struct pci_response response;
402 u32 reserved;
403 struct tran_int_desc int_desc;
404 } __packed;
405
406 struct pci_create_interrupt2 {
407 struct pci_message message_type;
408 union win_slot_encoding wslot;
409 struct hv_msi_desc2 int_desc;
410 } __packed;
411
412 struct pci_create_interrupt3 {
413 struct pci_message message_type;
414 union win_slot_encoding wslot;
415 struct hv_msi_desc3 int_desc;
416 } __packed;
417
418 struct pci_delete_interrupt {
419 struct pci_message message_type;
420 union win_slot_encoding wslot;
421 struct tran_int_desc int_desc;
422 } __packed;
423
424 /*
425 * Note: the VM must pass a valid block id, wslot and bytes_requested.
426 */
427 struct pci_read_block {
428 struct pci_message message_type;
429 u32 block_id;
430 union win_slot_encoding wslot;
431 u32 bytes_requested;
432 } __packed;
433
434 struct pci_read_block_response {
435 struct vmpacket_descriptor hdr;
436 u32 status;
437 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
438 } __packed;
439
440 /*
441 * Note: the VM must pass a valid block id, wslot and byte_count.
442 */
443 struct pci_write_block {
444 struct pci_message message_type;
445 u32 block_id;
446 union win_slot_encoding wslot;
447 u32 byte_count;
448 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
449 } __packed;
450
451 struct pci_dev_inval_block {
452 struct pci_incoming_message incoming;
453 union win_slot_encoding wslot;
454 u64 block_mask;
455 } __packed;
456
457 struct pci_dev_incoming {
458 struct pci_incoming_message incoming;
459 union win_slot_encoding wslot;
460 } __packed;
461
462 struct pci_eject_response {
463 struct pci_message message_type;
464 union win_slot_encoding wslot;
465 u32 status;
466 } __packed;
467
468 static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
469
470 /*
471 * Driver specific state.
472 */
473
474 enum hv_pcibus_state {
475 hv_pcibus_init = 0,
476 hv_pcibus_probed,
477 hv_pcibus_installed,
478 hv_pcibus_removing,
479 hv_pcibus_maximum
480 };
481
482 struct hv_pcibus_device {
483 #ifdef CONFIG_X86
484 struct pci_sysdata sysdata;
485 #elif defined(CONFIG_ARM64)
486 struct pci_config_window sysdata;
487 #endif
488 struct pci_host_bridge *bridge;
489 struct fwnode_handle *fwnode;
490 /* Protocol version negotiated with the host */
491 enum pci_protocol_version_t protocol_version;
492
493 struct mutex state_lock;
494 enum hv_pcibus_state state;
495
496 struct hv_device *hdev;
497 resource_size_t low_mmio_space;
498 resource_size_t high_mmio_space;
499 struct resource *mem_config;
500 struct resource *low_mmio_res;
501 struct resource *high_mmio_res;
502 struct completion *survey_event;
503 struct pci_bus *pci_bus;
504 spinlock_t config_lock; /* Avoid two threads writing index page */
505 spinlock_t device_list_lock; /* Protect lists below */
506 void __iomem *cfg_addr;
507
508 struct list_head children;
509 struct list_head dr_list;
510
511 struct msi_domain_info msi_info;
512 struct irq_domain *irq_domain;
513
514 struct workqueue_struct *wq;
515
516 /* Highest slot of child device with resources allocated */
517 int wslot_res_allocated;
518 bool use_calls; /* Use hypercalls to access mmio cfg space */
519 };
520
521 /*
522 * Tracks "Device Relations" messages from the host, which must be both
523 * processed in order and deferred so that they don't run in the context
524 * of the incoming packet callback.
525 */
526 struct hv_dr_work {
527 struct work_struct wrk;
528 struct hv_pcibus_device *bus;
529 };
530
531 struct hv_pcidev_description {
532 u16 v_id; /* vendor ID */
533 u16 d_id; /* device ID */
534 u8 rev;
535 u8 prog_intf;
536 u8 subclass;
537 u8 base_class;
538 u32 subsystem_id;
539 union win_slot_encoding win_slot;
540 u32 ser; /* serial number */
541 u32 flags;
542 u16 virtual_numa_node;
543 };
544
545 struct hv_dr_state {
546 struct list_head list_entry;
547 u32 device_count;
548 struct hv_pcidev_description func[] __counted_by(device_count);
549 };
550
551 struct hv_pci_dev {
552 /* List protected by pci_rescan_remove_lock */
553 struct list_head list_entry;
554 refcount_t refs;
555 struct pci_slot *pci_slot;
556 struct hv_pcidev_description desc;
557 bool reported_missing;
558 struct hv_pcibus_device *hbus;
559 struct work_struct wrk;
560
561 void (*block_invalidate)(void *context, u64 block_mask);
562 void *invalidate_context;
563
564 /*
565 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
566 * read it back, for each of the BAR offsets within config space.
567 */
568 u32 probed_bar[PCI_STD_NUM_BARS];
569 };
570
571 struct hv_pci_compl {
572 struct completion host_event;
573 s32 completion_status;
574 };
575
576 static void hv_pci_onchannelcallback(void *context);
577
578 #ifdef CONFIG_X86
579 #define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
580 #define FLOW_HANDLER handle_edge_irq
581 #define FLOW_NAME "edge"
582
hv_pci_irqchip_init(void)583 static int hv_pci_irqchip_init(void)
584 {
585 return 0;
586 }
587
hv_pci_get_root_domain(void)588 static struct irq_domain *hv_pci_get_root_domain(void)
589 {
590 return x86_vector_domain;
591 }
592
hv_msi_get_int_vector(struct irq_data * data)593 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
594 {
595 struct irq_cfg *cfg = irqd_cfg(data);
596
597 return cfg->vector;
598 }
599
600 #define hv_msi_prepare pci_msi_prepare
601
602 /**
603 * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
604 * affinity.
605 * @data: Describes the IRQ
606 *
607 * Build new a destination for the MSI and make a hypercall to
608 * update the Interrupt Redirection Table. "Device Logical ID"
609 * is built out of this PCI bus's instance GUID and the function
610 * number of the device.
611 */
hv_irq_retarget_interrupt(struct irq_data * data)612 static void hv_irq_retarget_interrupt(struct irq_data *data)
613 {
614 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
615 struct hv_retarget_device_interrupt *params;
616 struct tran_int_desc *int_desc;
617 struct hv_pcibus_device *hbus;
618 const struct cpumask *dest;
619 cpumask_var_t tmp;
620 struct pci_bus *pbus;
621 struct pci_dev *pdev;
622 unsigned long flags;
623 u32 var_size = 0;
624 int cpu, nr_bank;
625 u64 res;
626
627 dest = irq_data_get_effective_affinity_mask(data);
628 pdev = msi_desc_to_pci_dev(msi_desc);
629 pbus = pdev->bus;
630 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
631 int_desc = data->chip_data;
632 if (!int_desc) {
633 dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
634 __func__, data->irq);
635 return;
636 }
637
638 local_irq_save(flags);
639
640 params = *this_cpu_ptr(hyperv_pcpu_input_arg);
641 memset(params, 0, sizeof(*params));
642 params->partition_id = HV_PARTITION_ID_SELF;
643 params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
644 params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
645 params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
646 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
647 (hbus->hdev->dev_instance.b[4] << 16) |
648 (hbus->hdev->dev_instance.b[7] << 8) |
649 (hbus->hdev->dev_instance.b[6] & 0xf8) |
650 PCI_FUNC(pdev->devfn);
651 params->int_target.vector = hv_msi_get_int_vector(data);
652
653 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
654 /*
655 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
656 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
657 * with >64 VP support.
658 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
659 * is not sufficient for this hypercall.
660 */
661 params->int_target.flags |=
662 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
663
664 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
665 res = 1;
666 goto out;
667 }
668
669 cpumask_and(tmp, dest, cpu_online_mask);
670 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp);
671 free_cpumask_var(tmp);
672
673 if (nr_bank <= 0) {
674 res = 1;
675 goto out;
676 }
677
678 /*
679 * var-sized hypercall, var-size starts after vp_mask (thus
680 * vp_set.format does not count, but vp_set.valid_bank_mask
681 * does).
682 */
683 var_size = 1 + nr_bank;
684 } else {
685 for_each_cpu_and(cpu, dest, cpu_online_mask) {
686 params->int_target.vp_mask |=
687 (1ULL << hv_cpu_number_to_vp_number(cpu));
688 }
689 }
690
691 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
692 params, NULL);
693
694 out:
695 local_irq_restore(flags);
696
697 /*
698 * During hibernation, when a CPU is offlined, the kernel tries
699 * to move the interrupt to the remaining CPUs that haven't
700 * been offlined yet. In this case, the below hv_do_hypercall()
701 * always fails since the vmbus channel has been closed:
702 * refer to cpu_disable_common() -> fixup_irqs() ->
703 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
704 *
705 * Suppress the error message for hibernation because the failure
706 * during hibernation does not matter (at this time all the devices
707 * have been frozen). Note: the correct affinity info is still updated
708 * into the irqdata data structure in migrate_one_irq() ->
709 * irq_do_set_affinity(), so later when the VM resumes,
710 * hv_pci_restore_msi_state() is able to correctly restore the
711 * interrupt with the correct affinity.
712 */
713 if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
714 dev_err(&hbus->hdev->device,
715 "%s() failed: %#llx", __func__, res);
716 }
717
hv_arch_irq_unmask(struct irq_data * data)718 static void hv_arch_irq_unmask(struct irq_data *data)
719 {
720 if (hv_root_partition())
721 /*
722 * In case of the nested root partition, the nested hypervisor
723 * is taking care of interrupt remapping and thus the
724 * MAP_DEVICE_INTERRUPT hypercall is required instead of
725 * RETARGET_INTERRUPT.
726 */
727 (void)hv_map_msi_interrupt(data, NULL);
728 else
729 hv_irq_retarget_interrupt(data);
730 }
731 #elif defined(CONFIG_ARM64)
732 /*
733 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
734 * of room at the start to allow for SPIs to be specified through ACPI and
735 * starting with a power of two to satisfy power of 2 multi-MSI requirement.
736 */
737 #define HV_PCI_MSI_SPI_START 64
738 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
739 #define DELIVERY_MODE 0
740 #define FLOW_HANDLER NULL
741 #define FLOW_NAME NULL
742 #define hv_msi_prepare NULL
743
744 struct hv_pci_chip_data {
745 DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
746 struct mutex map_lock;
747 };
748
749 /* Hyper-V vPCI MSI GIC IRQ domain */
750 static struct irq_domain *hv_msi_gic_irq_domain;
751
752 /* Hyper-V PCI MSI IRQ chip */
753 static struct irq_chip hv_arm64_msi_irq_chip = {
754 .name = "MSI",
755 .irq_set_affinity = irq_chip_set_affinity_parent,
756 .irq_eoi = irq_chip_eoi_parent,
757 .irq_mask = irq_chip_mask_parent,
758 .irq_unmask = irq_chip_unmask_parent
759 };
760
hv_msi_get_int_vector(struct irq_data * irqd)761 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
762 {
763 return irqd->parent_data->hwirq;
764 }
765
766 /*
767 * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
768 * the bitmap.
769 * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
770 * the parent domain.
771 */
hv_pci_vec_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_bm_irqs,unsigned int nr_dom_irqs)772 static void hv_pci_vec_irq_free(struct irq_domain *domain,
773 unsigned int virq,
774 unsigned int nr_bm_irqs,
775 unsigned int nr_dom_irqs)
776 {
777 struct hv_pci_chip_data *chip_data = domain->host_data;
778 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
779 int first = d->hwirq - HV_PCI_MSI_SPI_START;
780 int i;
781
782 mutex_lock(&chip_data->map_lock);
783 bitmap_release_region(chip_data->spi_map,
784 first,
785 get_count_order(nr_bm_irqs));
786 mutex_unlock(&chip_data->map_lock);
787 for (i = 0; i < nr_dom_irqs; i++) {
788 if (i)
789 d = irq_domain_get_irq_data(domain, virq + i);
790 irq_domain_reset_irq_data(d);
791 }
792
793 irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
794 }
795
hv_pci_vec_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)796 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
797 unsigned int virq,
798 unsigned int nr_irqs)
799 {
800 hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
801 }
802
hv_pci_vec_alloc_device_irq(struct irq_domain * domain,unsigned int nr_irqs,irq_hw_number_t * hwirq)803 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
804 unsigned int nr_irqs,
805 irq_hw_number_t *hwirq)
806 {
807 struct hv_pci_chip_data *chip_data = domain->host_data;
808 int index;
809
810 /* Find and allocate region from the SPI bitmap */
811 mutex_lock(&chip_data->map_lock);
812 index = bitmap_find_free_region(chip_data->spi_map,
813 HV_PCI_MSI_SPI_NR,
814 get_count_order(nr_irqs));
815 mutex_unlock(&chip_data->map_lock);
816 if (index < 0)
817 return -ENOSPC;
818
819 *hwirq = index + HV_PCI_MSI_SPI_START;
820
821 return 0;
822 }
823
hv_pci_vec_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)824 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
825 unsigned int virq,
826 irq_hw_number_t hwirq)
827 {
828 struct irq_fwspec fwspec;
829 struct irq_data *d;
830 int ret;
831
832 fwspec.fwnode = domain->parent->fwnode;
833 if (is_of_node(fwspec.fwnode)) {
834 /* SPI lines for OF translations start at offset 32 */
835 fwspec.param_count = 3;
836 fwspec.param[0] = 0;
837 fwspec.param[1] = hwirq - 32;
838 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
839 } else {
840 fwspec.param_count = 2;
841 fwspec.param[0] = hwirq;
842 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
843 }
844
845 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
846 if (ret)
847 return ret;
848
849 /*
850 * Since the interrupt specifier is not coming from ACPI or DT, the
851 * trigger type will need to be set explicitly. Otherwise, it will be
852 * set to whatever is in the GIC configuration.
853 */
854 d = irq_domain_get_irq_data(domain->parent, virq);
855
856 return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
857 }
858
hv_pci_vec_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)859 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
860 unsigned int virq, unsigned int nr_irqs,
861 void *args)
862 {
863 irq_hw_number_t hwirq;
864 unsigned int i;
865 int ret;
866
867 ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
868 if (ret)
869 return ret;
870
871 for (i = 0; i < nr_irqs; i++) {
872 ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
873 hwirq + i);
874 if (ret) {
875 hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
876 return ret;
877 }
878
879 irq_domain_set_hwirq_and_chip(domain, virq + i,
880 hwirq + i,
881 &hv_arm64_msi_irq_chip,
882 domain->host_data);
883 pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
884 }
885
886 return 0;
887 }
888
889 /*
890 * Pick the first cpu as the irq affinity that can be temporarily used for
891 * composing MSI from the hypervisor. GIC will eventually set the right
892 * affinity for the irq and the 'unmask' will retarget the interrupt to that
893 * cpu.
894 */
hv_pci_vec_irq_domain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)895 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
896 struct irq_data *irqd, bool reserve)
897 {
898 int cpu = cpumask_first(cpu_present_mask);
899
900 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
901
902 return 0;
903 }
904
905 static const struct irq_domain_ops hv_pci_domain_ops = {
906 .alloc = hv_pci_vec_irq_domain_alloc,
907 .free = hv_pci_vec_irq_domain_free,
908 .activate = hv_pci_vec_irq_domain_activate,
909 };
910
911 #ifdef CONFIG_OF
912
hv_pci_of_irq_domain_parent(void)913 static struct irq_domain *hv_pci_of_irq_domain_parent(void)
914 {
915 struct device_node *parent;
916 struct irq_domain *domain;
917
918 parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
919 if (!parent)
920 return NULL;
921 domain = irq_find_host(parent);
922 of_node_put(parent);
923
924 return domain;
925 }
926
927 #endif
928
929 #ifdef CONFIG_ACPI
930
hv_pci_acpi_irq_domain_parent(void)931 static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
932 {
933 acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
934
935 gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
936 if (!gsi_domain_disp_fn)
937 return NULL;
938 return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
939 DOMAIN_BUS_ANY);
940 }
941
942 #endif
943
hv_pci_irqchip_init(void)944 static int hv_pci_irqchip_init(void)
945 {
946 static struct hv_pci_chip_data *chip_data;
947 struct fwnode_handle *fn = NULL;
948 struct irq_domain *irq_domain_parent = NULL;
949 int ret = -ENOMEM;
950
951 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
952 if (!chip_data)
953 return ret;
954
955 mutex_init(&chip_data->map_lock);
956 fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
957 if (!fn)
958 goto free_chip;
959
960 /*
961 * IRQ domain once enabled, should not be removed since there is no
962 * way to ensure that all the corresponding devices are also gone and
963 * no interrupts will be generated.
964 */
965 #ifdef CONFIG_ACPI
966 if (!acpi_disabled)
967 irq_domain_parent = hv_pci_acpi_irq_domain_parent();
968 #endif
969 #ifdef CONFIG_OF
970 if (!irq_domain_parent)
971 irq_domain_parent = hv_pci_of_irq_domain_parent();
972 #endif
973 if (!irq_domain_parent) {
974 WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
975 ret = -EINVAL;
976 goto free_chip;
977 }
978
979 hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
980 HV_PCI_MSI_SPI_NR,
981 fn, &hv_pci_domain_ops,
982 chip_data);
983
984 if (!hv_msi_gic_irq_domain) {
985 pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
986 goto free_chip;
987 }
988
989 return 0;
990
991 free_chip:
992 kfree(chip_data);
993 if (fn)
994 irq_domain_free_fwnode(fn);
995
996 return ret;
997 }
998
hv_pci_get_root_domain(void)999 static struct irq_domain *hv_pci_get_root_domain(void)
1000 {
1001 return hv_msi_gic_irq_domain;
1002 }
1003
1004 /*
1005 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
1006 * registers which Hyper-V already supports, so no hypercall needed.
1007 */
hv_arch_irq_unmask(struct irq_data * data)1008 static void hv_arch_irq_unmask(struct irq_data *data) { }
1009 #endif /* CONFIG_ARM64 */
1010
1011 /**
1012 * hv_pci_generic_compl() - Invoked for a completion packet
1013 * @context: Set up by the sender of the packet.
1014 * @resp: The response packet
1015 * @resp_packet_size: Size in bytes of the packet
1016 *
1017 * This function is used to trigger an event and report status
1018 * for any message for which the completion packet contains a
1019 * status and nothing else.
1020 */
hv_pci_generic_compl(void * context,struct pci_response * resp,int resp_packet_size)1021 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
1022 int resp_packet_size)
1023 {
1024 struct hv_pci_compl *comp_pkt = context;
1025
1026 comp_pkt->completion_status = resp->status;
1027 complete(&comp_pkt->host_event);
1028 }
1029
1030 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1031 u32 wslot);
1032
get_pcichild(struct hv_pci_dev * hpdev)1033 static void get_pcichild(struct hv_pci_dev *hpdev)
1034 {
1035 refcount_inc(&hpdev->refs);
1036 }
1037
put_pcichild(struct hv_pci_dev * hpdev)1038 static void put_pcichild(struct hv_pci_dev *hpdev)
1039 {
1040 if (refcount_dec_and_test(&hpdev->refs))
1041 kfree(hpdev);
1042 }
1043
1044 /*
1045 * There is no good way to get notified from vmbus_onoffer_rescind(),
1046 * so let's use polling here, since this is not a hot path.
1047 */
wait_for_response(struct hv_device * hdev,struct completion * comp)1048 static int wait_for_response(struct hv_device *hdev,
1049 struct completion *comp)
1050 {
1051 while (true) {
1052 if (hdev->channel->rescind) {
1053 dev_warn_once(&hdev->device, "The device is gone.\n");
1054 return -ENODEV;
1055 }
1056
1057 if (wait_for_completion_timeout(comp, HZ / 10))
1058 break;
1059 }
1060
1061 return 0;
1062 }
1063
1064 /**
1065 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1066 * @devfn: The Linux representation of PCI slot
1067 *
1068 * Windows uses a slightly different representation of PCI slot.
1069 *
1070 * Return: The Windows representation
1071 */
devfn_to_wslot(int devfn)1072 static u32 devfn_to_wslot(int devfn)
1073 {
1074 union win_slot_encoding wslot;
1075
1076 wslot.slot = 0;
1077 wslot.bits.dev = PCI_SLOT(devfn);
1078 wslot.bits.func = PCI_FUNC(devfn);
1079
1080 return wslot.slot;
1081 }
1082
1083 /**
1084 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1085 * @wslot: The Windows representation of PCI slot
1086 *
1087 * Windows uses a slightly different representation of PCI slot.
1088 *
1089 * Return: The Linux representation
1090 */
wslot_to_devfn(u32 wslot)1091 static int wslot_to_devfn(u32 wslot)
1092 {
1093 union win_slot_encoding slot_no;
1094
1095 slot_no.slot = wslot;
1096 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1097 }
1098
hv_pci_read_mmio(struct device * dev,phys_addr_t gpa,int size,u32 * val)1099 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1100 {
1101 struct hv_mmio_read_input *in;
1102 struct hv_mmio_read_output *out;
1103 u64 ret;
1104
1105 /*
1106 * Must be called with interrupts disabled so it is safe
1107 * to use the per-cpu input argument page. Use it for
1108 * both input and output.
1109 */
1110 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1111 out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1112 in->gpa = gpa;
1113 in->size = size;
1114
1115 ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1116 if (hv_result_success(ret)) {
1117 switch (size) {
1118 case 1:
1119 *val = *(u8 *)(out->data);
1120 break;
1121 case 2:
1122 *val = *(u16 *)(out->data);
1123 break;
1124 default:
1125 *val = *(u32 *)(out->data);
1126 break;
1127 }
1128 } else
1129 dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1130 ret, gpa, size);
1131 }
1132
hv_pci_write_mmio(struct device * dev,phys_addr_t gpa,int size,u32 val)1133 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1134 {
1135 struct hv_mmio_write_input *in;
1136 u64 ret;
1137
1138 /*
1139 * Must be called with interrupts disabled so it is safe
1140 * to use the per-cpu input argument memory.
1141 */
1142 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1143 in->gpa = gpa;
1144 in->size = size;
1145 switch (size) {
1146 case 1:
1147 *(u8 *)(in->data) = val;
1148 break;
1149 case 2:
1150 *(u16 *)(in->data) = val;
1151 break;
1152 default:
1153 *(u32 *)(in->data) = val;
1154 break;
1155 }
1156
1157 ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1158 if (!hv_result_success(ret))
1159 dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1160 ret, gpa, size);
1161 }
1162
1163 /*
1164 * PCI Configuration Space for these root PCI buses is implemented as a pair
1165 * of pages in memory-mapped I/O space. Writing to the first page chooses
1166 * the PCI function being written or read. Once the first page has been
1167 * written to, the following page maps in the entire configuration space of
1168 * the function.
1169 */
1170
1171 /**
1172 * _hv_pcifront_read_config() - Internal PCI config read
1173 * @hpdev: The PCI driver's representation of the device
1174 * @where: Offset within config space
1175 * @size: Size of the transfer
1176 * @val: Pointer to the buffer receiving the data
1177 */
_hv_pcifront_read_config(struct hv_pci_dev * hpdev,int where,int size,u32 * val)1178 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1179 int size, u32 *val)
1180 {
1181 struct hv_pcibus_device *hbus = hpdev->hbus;
1182 struct device *dev = &hbus->hdev->device;
1183 int offset = where + CFG_PAGE_OFFSET;
1184 unsigned long flags;
1185
1186 /*
1187 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1188 */
1189 if (where + size <= PCI_COMMAND) {
1190 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1191 } else if (where >= PCI_CLASS_REVISION && where + size <=
1192 PCI_CACHE_LINE_SIZE) {
1193 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1194 PCI_CLASS_REVISION, size);
1195 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1196 PCI_ROM_ADDRESS) {
1197 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1198 PCI_SUBSYSTEM_VENDOR_ID, size);
1199 } else if (where >= PCI_ROM_ADDRESS && where + size <=
1200 PCI_CAPABILITY_LIST) {
1201 /* ROM BARs are unimplemented */
1202 *val = 0;
1203 } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
1204 (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
1205 /*
1206 * Interrupt Line and Interrupt PIN are hard-wired to zero
1207 * because this front-end only supports message-signaled
1208 * interrupts.
1209 */
1210 *val = 0;
1211 } else if (where + size <= CFG_PAGE_SIZE) {
1212
1213 spin_lock_irqsave(&hbus->config_lock, flags);
1214 if (hbus->use_calls) {
1215 phys_addr_t addr = hbus->mem_config->start + offset;
1216
1217 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1218 hpdev->desc.win_slot.slot);
1219 hv_pci_read_mmio(dev, addr, size, val);
1220 } else {
1221 void __iomem *addr = hbus->cfg_addr + offset;
1222
1223 /* Choose the function to be read. (See comment above) */
1224 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1225 /* Make sure the function was chosen before reading. */
1226 mb();
1227 /* Read from that function's config space. */
1228 switch (size) {
1229 case 1:
1230 *val = readb(addr);
1231 break;
1232 case 2:
1233 *val = readw(addr);
1234 break;
1235 default:
1236 *val = readl(addr);
1237 break;
1238 }
1239 /*
1240 * Make sure the read was done before we release the
1241 * spinlock allowing consecutive reads/writes.
1242 */
1243 mb();
1244 }
1245 spin_unlock_irqrestore(&hbus->config_lock, flags);
1246 } else {
1247 dev_err(dev, "Attempt to read beyond a function's config space.\n");
1248 }
1249 }
1250
hv_pcifront_get_vendor_id(struct hv_pci_dev * hpdev)1251 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1252 {
1253 struct hv_pcibus_device *hbus = hpdev->hbus;
1254 struct device *dev = &hbus->hdev->device;
1255 u32 val;
1256 u16 ret;
1257 unsigned long flags;
1258
1259 spin_lock_irqsave(&hbus->config_lock, flags);
1260
1261 if (hbus->use_calls) {
1262 phys_addr_t addr = hbus->mem_config->start +
1263 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1264
1265 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1266 hpdev->desc.win_slot.slot);
1267 hv_pci_read_mmio(dev, addr, 2, &val);
1268 ret = val; /* Truncates to 16 bits */
1269 } else {
1270 void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1271 PCI_VENDOR_ID;
1272 /* Choose the function to be read. (See comment above) */
1273 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1274 /* Make sure the function was chosen before we start reading. */
1275 mb();
1276 /* Read from that function's config space. */
1277 ret = readw(addr);
1278 /*
1279 * mb() is not required here, because the
1280 * spin_unlock_irqrestore() is a barrier.
1281 */
1282 }
1283
1284 spin_unlock_irqrestore(&hbus->config_lock, flags);
1285
1286 return ret;
1287 }
1288
1289 /**
1290 * _hv_pcifront_write_config() - Internal PCI config write
1291 * @hpdev: The PCI driver's representation of the device
1292 * @where: Offset within config space
1293 * @size: Size of the transfer
1294 * @val: The data being transferred
1295 */
_hv_pcifront_write_config(struct hv_pci_dev * hpdev,int where,int size,u32 val)1296 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1297 int size, u32 val)
1298 {
1299 struct hv_pcibus_device *hbus = hpdev->hbus;
1300 struct device *dev = &hbus->hdev->device;
1301 int offset = where + CFG_PAGE_OFFSET;
1302 unsigned long flags;
1303
1304 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1305 where + size <= PCI_CAPABILITY_LIST) {
1306 /* SSIDs and ROM BARs are read-only */
1307 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1308 spin_lock_irqsave(&hbus->config_lock, flags);
1309
1310 if (hbus->use_calls) {
1311 phys_addr_t addr = hbus->mem_config->start + offset;
1312
1313 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1314 hpdev->desc.win_slot.slot);
1315 hv_pci_write_mmio(dev, addr, size, val);
1316 } else {
1317 void __iomem *addr = hbus->cfg_addr + offset;
1318
1319 /* Choose the function to write. (See comment above) */
1320 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1321 /* Make sure the function was chosen before writing. */
1322 wmb();
1323 /* Write to that function's config space. */
1324 switch (size) {
1325 case 1:
1326 writeb(val, addr);
1327 break;
1328 case 2:
1329 writew(val, addr);
1330 break;
1331 default:
1332 writel(val, addr);
1333 break;
1334 }
1335 /*
1336 * Make sure the write was done before we release the
1337 * spinlock allowing consecutive reads/writes.
1338 */
1339 mb();
1340 }
1341 spin_unlock_irqrestore(&hbus->config_lock, flags);
1342 } else {
1343 dev_err(dev, "Attempt to write beyond a function's config space.\n");
1344 }
1345 }
1346
1347 /**
1348 * hv_pcifront_read_config() - Read configuration space
1349 * @bus: PCI Bus structure
1350 * @devfn: Device/function
1351 * @where: Offset from base
1352 * @size: Byte/word/dword
1353 * @val: Value to be read
1354 *
1355 * Return: PCIBIOS_SUCCESSFUL on success
1356 * PCIBIOS_DEVICE_NOT_FOUND on failure
1357 */
hv_pcifront_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1358 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1359 int where, int size, u32 *val)
1360 {
1361 struct hv_pcibus_device *hbus =
1362 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1363 struct hv_pci_dev *hpdev;
1364
1365 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1366 if (!hpdev)
1367 return PCIBIOS_DEVICE_NOT_FOUND;
1368
1369 _hv_pcifront_read_config(hpdev, where, size, val);
1370
1371 put_pcichild(hpdev);
1372 return PCIBIOS_SUCCESSFUL;
1373 }
1374
1375 /**
1376 * hv_pcifront_write_config() - Write configuration space
1377 * @bus: PCI Bus structure
1378 * @devfn: Device/function
1379 * @where: Offset from base
1380 * @size: Byte/word/dword
1381 * @val: Value to be written to device
1382 *
1383 * Return: PCIBIOS_SUCCESSFUL on success
1384 * PCIBIOS_DEVICE_NOT_FOUND on failure
1385 */
hv_pcifront_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1386 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1387 int where, int size, u32 val)
1388 {
1389 struct hv_pcibus_device *hbus =
1390 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1391 struct hv_pci_dev *hpdev;
1392
1393 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1394 if (!hpdev)
1395 return PCIBIOS_DEVICE_NOT_FOUND;
1396
1397 _hv_pcifront_write_config(hpdev, where, size, val);
1398
1399 put_pcichild(hpdev);
1400 return PCIBIOS_SUCCESSFUL;
1401 }
1402
1403 /* PCIe operations */
1404 static struct pci_ops hv_pcifront_ops = {
1405 .read = hv_pcifront_read_config,
1406 .write = hv_pcifront_write_config,
1407 };
1408
1409 /*
1410 * Paravirtual backchannel
1411 *
1412 * Hyper-V SR-IOV provides a backchannel mechanism in software for
1413 * communication between a VF driver and a PF driver. These
1414 * "configuration blocks" are similar in concept to PCI configuration space,
1415 * but instead of doing reads and writes in 32-bit chunks through a very slow
1416 * path, packets of up to 128 bytes can be sent or received asynchronously.
1417 *
1418 * Nearly every SR-IOV device contains just such a communications channel in
1419 * hardware, so using this one in software is usually optional. Using the
1420 * software channel, however, allows driver implementers to leverage software
1421 * tools that fuzz the communications channel looking for vulnerabilities.
1422 *
1423 * The usage model for these packets puts the responsibility for reading or
1424 * writing on the VF driver. The VF driver sends a read or a write packet,
1425 * indicating which "block" is being referred to by number.
1426 *
1427 * If the PF driver wishes to initiate communication, it can "invalidate" one or
1428 * more of the first 64 blocks. This invalidation is delivered via a callback
1429 * supplied to the VF driver by this driver.
1430 *
1431 * No protocol is implied, except that supplied by the PF and VF drivers.
1432 */
1433
1434 struct hv_read_config_compl {
1435 struct hv_pci_compl comp_pkt;
1436 void *buf;
1437 unsigned int len;
1438 unsigned int bytes_returned;
1439 };
1440
1441 /**
1442 * hv_pci_read_config_compl() - Invoked when a response packet
1443 * for a read config block operation arrives.
1444 * @context: Identifies the read config operation
1445 * @resp: The response packet itself
1446 * @resp_packet_size: Size in bytes of the response packet
1447 */
hv_pci_read_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1448 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1449 int resp_packet_size)
1450 {
1451 struct hv_read_config_compl *comp = context;
1452 struct pci_read_block_response *read_resp =
1453 (struct pci_read_block_response *)resp;
1454 unsigned int data_len, hdr_len;
1455
1456 hdr_len = offsetof(struct pci_read_block_response, bytes);
1457 if (resp_packet_size < hdr_len) {
1458 comp->comp_pkt.completion_status = -1;
1459 goto out;
1460 }
1461
1462 data_len = resp_packet_size - hdr_len;
1463 if (data_len > 0 && read_resp->status == 0) {
1464 comp->bytes_returned = min(comp->len, data_len);
1465 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1466 } else {
1467 comp->bytes_returned = 0;
1468 }
1469
1470 comp->comp_pkt.completion_status = read_resp->status;
1471 out:
1472 complete(&comp->comp_pkt.host_event);
1473 }
1474
1475 /**
1476 * hv_read_config_block() - Sends a read config block request to
1477 * the back-end driver running in the Hyper-V parent partition.
1478 * @pdev: The PCI driver's representation for this device.
1479 * @buf: Buffer into which the config block will be copied.
1480 * @len: Size in bytes of buf.
1481 * @block_id: Identifies the config block which has been requested.
1482 * @bytes_returned: Size which came back from the back-end driver.
1483 *
1484 * Return: 0 on success, -errno on failure
1485 */
hv_read_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id,unsigned int * bytes_returned)1486 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1487 unsigned int len, unsigned int block_id,
1488 unsigned int *bytes_returned)
1489 {
1490 struct hv_pcibus_device *hbus =
1491 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1492 sysdata);
1493 struct {
1494 struct pci_packet pkt;
1495 char buf[sizeof(struct pci_read_block)];
1496 } pkt;
1497 struct hv_read_config_compl comp_pkt;
1498 struct pci_read_block *read_blk;
1499 int ret;
1500
1501 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1502 return -EINVAL;
1503
1504 init_completion(&comp_pkt.comp_pkt.host_event);
1505 comp_pkt.buf = buf;
1506 comp_pkt.len = len;
1507
1508 memset(&pkt, 0, sizeof(pkt));
1509 pkt.pkt.completion_func = hv_pci_read_config_compl;
1510 pkt.pkt.compl_ctxt = &comp_pkt;
1511 read_blk = (struct pci_read_block *)pkt.buf;
1512 read_blk->message_type.type = PCI_READ_BLOCK;
1513 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1514 read_blk->block_id = block_id;
1515 read_blk->bytes_requested = len;
1516
1517 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1518 sizeof(*read_blk), (unsigned long)&pkt.pkt,
1519 VM_PKT_DATA_INBAND,
1520 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1521 if (ret)
1522 return ret;
1523
1524 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1525 if (ret)
1526 return ret;
1527
1528 if (comp_pkt.comp_pkt.completion_status != 0 ||
1529 comp_pkt.bytes_returned == 0) {
1530 dev_err(&hbus->hdev->device,
1531 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
1532 comp_pkt.comp_pkt.completion_status,
1533 comp_pkt.bytes_returned);
1534 return -EIO;
1535 }
1536
1537 *bytes_returned = comp_pkt.bytes_returned;
1538 return 0;
1539 }
1540
1541 /**
1542 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1543 * config block operation arrives.
1544 * @context: Identifies the write config operation
1545 * @resp: The response packet itself
1546 * @resp_packet_size: Size in bytes of the response packet
1547 */
hv_pci_write_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1548 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1549 int resp_packet_size)
1550 {
1551 struct hv_pci_compl *comp_pkt = context;
1552
1553 comp_pkt->completion_status = resp->status;
1554 complete(&comp_pkt->host_event);
1555 }
1556
1557 /**
1558 * hv_write_config_block() - Sends a write config block request to the
1559 * back-end driver running in the Hyper-V parent partition.
1560 * @pdev: The PCI driver's representation for this device.
1561 * @buf: Buffer from which the config block will be copied.
1562 * @len: Size in bytes of buf.
1563 * @block_id: Identifies the config block which is being written.
1564 *
1565 * Return: 0 on success, -errno on failure
1566 */
hv_write_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id)1567 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1568 unsigned int len, unsigned int block_id)
1569 {
1570 struct hv_pcibus_device *hbus =
1571 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1572 sysdata);
1573 struct {
1574 struct pci_packet pkt;
1575 char buf[sizeof(struct pci_write_block)];
1576 u32 reserved;
1577 } pkt;
1578 struct hv_pci_compl comp_pkt;
1579 struct pci_write_block *write_blk;
1580 u32 pkt_size;
1581 int ret;
1582
1583 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1584 return -EINVAL;
1585
1586 init_completion(&comp_pkt.host_event);
1587
1588 memset(&pkt, 0, sizeof(pkt));
1589 pkt.pkt.completion_func = hv_pci_write_config_compl;
1590 pkt.pkt.compl_ctxt = &comp_pkt;
1591 write_blk = (struct pci_write_block *)pkt.buf;
1592 write_blk->message_type.type = PCI_WRITE_BLOCK;
1593 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1594 write_blk->block_id = block_id;
1595 write_blk->byte_count = len;
1596 memcpy(write_blk->bytes, buf, len);
1597 pkt_size = offsetof(struct pci_write_block, bytes) + len;
1598 /*
1599 * This quirk is required on some hosts shipped around 2018, because
1600 * these hosts don't check the pkt_size correctly (new hosts have been
1601 * fixed since early 2019). The quirk is also safe on very old hosts
1602 * and new hosts, because, on them, what really matters is the length
1603 * specified in write_blk->byte_count.
1604 */
1605 pkt_size += sizeof(pkt.reserved);
1606
1607 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1608 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1609 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1610 if (ret)
1611 return ret;
1612
1613 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1614 if (ret)
1615 return ret;
1616
1617 if (comp_pkt.completion_status != 0) {
1618 dev_err(&hbus->hdev->device,
1619 "Write Config Block failed: 0x%x\n",
1620 comp_pkt.completion_status);
1621 return -EIO;
1622 }
1623
1624 return 0;
1625 }
1626
1627 /**
1628 * hv_register_block_invalidate() - Invoked when a config block invalidation
1629 * arrives from the back-end driver.
1630 * @pdev: The PCI driver's representation for this device.
1631 * @context: Identifies the device.
1632 * @block_invalidate: Identifies all of the blocks being invalidated.
1633 *
1634 * Return: 0 on success, -errno on failure
1635 */
hv_register_block_invalidate(struct pci_dev * pdev,void * context,void (* block_invalidate)(void * context,u64 block_mask))1636 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1637 void (*block_invalidate)(void *context,
1638 u64 block_mask))
1639 {
1640 struct hv_pcibus_device *hbus =
1641 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1642 sysdata);
1643 struct hv_pci_dev *hpdev;
1644
1645 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1646 if (!hpdev)
1647 return -ENODEV;
1648
1649 hpdev->block_invalidate = block_invalidate;
1650 hpdev->invalidate_context = context;
1651
1652 put_pcichild(hpdev);
1653 return 0;
1654
1655 }
1656
1657 /* Interrupt management hooks */
hv_int_desc_free(struct hv_pci_dev * hpdev,struct tran_int_desc * int_desc)1658 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1659 struct tran_int_desc *int_desc)
1660 {
1661 struct pci_delete_interrupt *int_pkt;
1662 struct {
1663 struct pci_packet pkt;
1664 u8 buffer[sizeof(struct pci_delete_interrupt)];
1665 } ctxt;
1666
1667 if (!int_desc->vector_count) {
1668 kfree(int_desc);
1669 return;
1670 }
1671 memset(&ctxt, 0, sizeof(ctxt));
1672 int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
1673 int_pkt->message_type.type =
1674 PCI_DELETE_INTERRUPT_MESSAGE;
1675 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1676 int_pkt->int_desc = *int_desc;
1677 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1678 0, VM_PKT_DATA_INBAND, 0);
1679 kfree(int_desc);
1680 }
1681
1682 /**
1683 * hv_msi_free() - Free the MSI.
1684 * @domain: The interrupt domain pointer
1685 * @info: Extra MSI-related context
1686 * @irq: Identifies the IRQ.
1687 *
1688 * The Hyper-V parent partition and hypervisor are tracking the
1689 * messages that are in use, keeping the interrupt redirection
1690 * table up to date. This callback sends a message that frees
1691 * the IRT entry and related tracking nonsense.
1692 */
hv_msi_free(struct irq_domain * domain,struct msi_domain_info * info,unsigned int irq)1693 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1694 unsigned int irq)
1695 {
1696 struct hv_pcibus_device *hbus;
1697 struct hv_pci_dev *hpdev;
1698 struct pci_dev *pdev;
1699 struct tran_int_desc *int_desc;
1700 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1701 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1702
1703 pdev = msi_desc_to_pci_dev(msi);
1704 hbus = info->data;
1705 int_desc = irq_data_get_irq_chip_data(irq_data);
1706 if (!int_desc)
1707 return;
1708
1709 irq_data->chip_data = NULL;
1710 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1711 if (!hpdev) {
1712 kfree(int_desc);
1713 return;
1714 }
1715
1716 hv_int_desc_free(hpdev, int_desc);
1717 put_pcichild(hpdev);
1718 }
1719
hv_irq_mask(struct irq_data * data)1720 static void hv_irq_mask(struct irq_data *data)
1721 {
1722 pci_msi_mask_irq(data);
1723 if (data->parent_data->chip->irq_mask)
1724 irq_chip_mask_parent(data);
1725 }
1726
hv_irq_unmask(struct irq_data * data)1727 static void hv_irq_unmask(struct irq_data *data)
1728 {
1729 hv_arch_irq_unmask(data);
1730
1731 if (data->parent_data->chip->irq_unmask)
1732 irq_chip_unmask_parent(data);
1733 pci_msi_unmask_irq(data);
1734 }
1735
1736 struct compose_comp_ctxt {
1737 struct hv_pci_compl comp_pkt;
1738 struct tran_int_desc int_desc;
1739 };
1740
hv_pci_compose_compl(void * context,struct pci_response * resp,int resp_packet_size)1741 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1742 int resp_packet_size)
1743 {
1744 struct compose_comp_ctxt *comp_pkt = context;
1745 struct pci_create_int_response *int_resp =
1746 (struct pci_create_int_response *)resp;
1747
1748 if (resp_packet_size < sizeof(*int_resp)) {
1749 comp_pkt->comp_pkt.completion_status = -1;
1750 goto out;
1751 }
1752 comp_pkt->comp_pkt.completion_status = resp->status;
1753 comp_pkt->int_desc = int_resp->int_desc;
1754 out:
1755 complete(&comp_pkt->comp_pkt.host_event);
1756 }
1757
hv_compose_msi_req_v1(struct pci_create_interrupt * int_pkt,u32 slot,u8 vector,u16 vector_count)1758 static u32 hv_compose_msi_req_v1(
1759 struct pci_create_interrupt *int_pkt,
1760 u32 slot, u8 vector, u16 vector_count)
1761 {
1762 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1763 int_pkt->wslot.slot = slot;
1764 int_pkt->int_desc.vector = vector;
1765 int_pkt->int_desc.vector_count = vector_count;
1766 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1767
1768 /*
1769 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1770 * hv_irq_unmask().
1771 */
1772 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1773
1774 return sizeof(*int_pkt);
1775 }
1776
1777 /*
1778 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1779 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1780 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1781 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1782 * not irrelevant because Hyper-V chooses the physical CPU to handle the
1783 * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1784 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1785 * but assigning too many vPCI device interrupts to the same pCPU can cause a
1786 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1787 * to spread out the pCPUs that it selects.
1788 *
1789 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1790 * to always return the same dummy vCPU, because a second call to
1791 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1792 * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1793 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1794 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1795 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1796 * the same pCPU, even though the vCPUs will be spread out by later calls
1797 * to hv_irq_unmask(), but that is the best we can do now.
1798 *
1799 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1800 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1801 * enhancement is planned for a future version. With that enhancement, the
1802 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1803 * device will be spread across multiple pCPUs.
1804 */
1805
1806 /*
1807 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1808 * by subsequent retarget in hv_irq_unmask().
1809 */
hv_compose_msi_req_get_cpu(const struct cpumask * affinity)1810 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1811 {
1812 return cpumask_first_and(affinity, cpu_online_mask);
1813 }
1814
1815 /*
1816 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1817 */
hv_compose_multi_msi_req_get_cpu(void)1818 static int hv_compose_multi_msi_req_get_cpu(void)
1819 {
1820 static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1821
1822 /* -1 means starting with CPU 0 */
1823 static int cpu_next = -1;
1824
1825 unsigned long flags;
1826 int cpu;
1827
1828 spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1829
1830 cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
1831 cpu = cpu_next;
1832
1833 spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1834
1835 return cpu;
1836 }
1837
hv_compose_msi_req_v2(struct pci_create_interrupt2 * int_pkt,int cpu,u32 slot,u8 vector,u16 vector_count)1838 static u32 hv_compose_msi_req_v2(
1839 struct pci_create_interrupt2 *int_pkt, int cpu,
1840 u32 slot, u8 vector, u16 vector_count)
1841 {
1842 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1843 int_pkt->wslot.slot = slot;
1844 int_pkt->int_desc.vector = vector;
1845 int_pkt->int_desc.vector_count = vector_count;
1846 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1847 int_pkt->int_desc.processor_array[0] =
1848 hv_cpu_number_to_vp_number(cpu);
1849 int_pkt->int_desc.processor_count = 1;
1850
1851 return sizeof(*int_pkt);
1852 }
1853
hv_compose_msi_req_v3(struct pci_create_interrupt3 * int_pkt,int cpu,u32 slot,u32 vector,u16 vector_count)1854 static u32 hv_compose_msi_req_v3(
1855 struct pci_create_interrupt3 *int_pkt, int cpu,
1856 u32 slot, u32 vector, u16 vector_count)
1857 {
1858 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1859 int_pkt->wslot.slot = slot;
1860 int_pkt->int_desc.vector = vector;
1861 int_pkt->int_desc.reserved = 0;
1862 int_pkt->int_desc.vector_count = vector_count;
1863 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1864 int_pkt->int_desc.processor_array[0] =
1865 hv_cpu_number_to_vp_number(cpu);
1866 int_pkt->int_desc.processor_count = 1;
1867
1868 return sizeof(*int_pkt);
1869 }
1870
1871 /**
1872 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1873 * @data: Everything about this MSI
1874 * @msg: Buffer that is filled in by this function
1875 *
1876 * This function unpacks the IRQ looking for target CPU set, IDT
1877 * vector and mode and sends a message to the parent partition
1878 * asking for a mapping for that tuple in this partition. The
1879 * response supplies a data value and address to which that data
1880 * should be written to trigger that interrupt.
1881 */
hv_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1882 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1883 {
1884 struct hv_pcibus_device *hbus;
1885 struct vmbus_channel *channel;
1886 struct hv_pci_dev *hpdev;
1887 struct pci_bus *pbus;
1888 struct pci_dev *pdev;
1889 const struct cpumask *dest;
1890 struct compose_comp_ctxt comp;
1891 struct tran_int_desc *int_desc;
1892 struct msi_desc *msi_desc;
1893 /*
1894 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1895 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1896 */
1897 u16 vector_count;
1898 u32 vector;
1899 struct {
1900 struct pci_packet pci_pkt;
1901 union {
1902 struct pci_create_interrupt v1;
1903 struct pci_create_interrupt2 v2;
1904 struct pci_create_interrupt3 v3;
1905 } int_pkts;
1906 } __packed ctxt;
1907 bool multi_msi;
1908 u64 trans_id;
1909 u32 size;
1910 int ret;
1911 int cpu;
1912
1913 msi_desc = irq_data_get_msi_desc(data);
1914 multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1915 msi_desc->nvec_used > 1;
1916
1917 /* Reuse the previous allocation */
1918 if (data->chip_data && multi_msi) {
1919 int_desc = data->chip_data;
1920 msg->address_hi = int_desc->address >> 32;
1921 msg->address_lo = int_desc->address & 0xffffffff;
1922 msg->data = int_desc->data;
1923 return;
1924 }
1925
1926 pdev = msi_desc_to_pci_dev(msi_desc);
1927 dest = irq_data_get_effective_affinity_mask(data);
1928 pbus = pdev->bus;
1929 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1930 channel = hbus->hdev->channel;
1931 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1932 if (!hpdev)
1933 goto return_null_message;
1934
1935 /* Free any previous message that might have already been composed. */
1936 if (data->chip_data && !multi_msi) {
1937 int_desc = data->chip_data;
1938 data->chip_data = NULL;
1939 hv_int_desc_free(hpdev, int_desc);
1940 }
1941
1942 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1943 if (!int_desc)
1944 goto drop_reference;
1945
1946 if (multi_msi) {
1947 /*
1948 * If this is not the first MSI of Multi MSI, we already have
1949 * a mapping. Can exit early.
1950 */
1951 if (msi_desc->irq != data->irq) {
1952 data->chip_data = int_desc;
1953 int_desc->address = msi_desc->msg.address_lo |
1954 (u64)msi_desc->msg.address_hi << 32;
1955 int_desc->data = msi_desc->msg.data +
1956 (data->irq - msi_desc->irq);
1957 msg->address_hi = msi_desc->msg.address_hi;
1958 msg->address_lo = msi_desc->msg.address_lo;
1959 msg->data = int_desc->data;
1960 put_pcichild(hpdev);
1961 return;
1962 }
1963 /*
1964 * The vector we select here is a dummy value. The correct
1965 * value gets sent to the hypervisor in unmask(). This needs
1966 * to be aligned with the count, and also not zero. Multi-msi
1967 * is powers of 2 up to 32, so 32 will always work here.
1968 */
1969 vector = 32;
1970 vector_count = msi_desc->nvec_used;
1971 cpu = hv_compose_multi_msi_req_get_cpu();
1972 } else {
1973 vector = hv_msi_get_int_vector(data);
1974 vector_count = 1;
1975 cpu = hv_compose_msi_req_get_cpu(dest);
1976 }
1977
1978 /*
1979 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1980 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1981 * for better readability.
1982 */
1983 memset(&ctxt, 0, sizeof(ctxt));
1984 init_completion(&comp.comp_pkt.host_event);
1985 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1986 ctxt.pci_pkt.compl_ctxt = ∁
1987
1988 switch (hbus->protocol_version) {
1989 case PCI_PROTOCOL_VERSION_1_1:
1990 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1991 hpdev->desc.win_slot.slot,
1992 (u8)vector,
1993 vector_count);
1994 break;
1995
1996 case PCI_PROTOCOL_VERSION_1_2:
1997 case PCI_PROTOCOL_VERSION_1_3:
1998 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1999 cpu,
2000 hpdev->desc.win_slot.slot,
2001 (u8)vector,
2002 vector_count);
2003 break;
2004
2005 case PCI_PROTOCOL_VERSION_1_4:
2006 size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
2007 cpu,
2008 hpdev->desc.win_slot.slot,
2009 vector,
2010 vector_count);
2011 break;
2012
2013 default:
2014 /* As we only negotiate protocol versions known to this driver,
2015 * this path should never hit. However, this is it not a hot
2016 * path so we print a message to aid future updates.
2017 */
2018 dev_err(&hbus->hdev->device,
2019 "Unexpected vPCI protocol, update driver.");
2020 goto free_int_desc;
2021 }
2022
2023 ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
2024 size, (unsigned long)&ctxt.pci_pkt,
2025 &trans_id, VM_PKT_DATA_INBAND,
2026 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2027 if (ret) {
2028 dev_err(&hbus->hdev->device,
2029 "Sending request for interrupt failed: 0x%x",
2030 comp.comp_pkt.completion_status);
2031 goto free_int_desc;
2032 }
2033
2034 /*
2035 * Prevents hv_pci_onchannelcallback() from running concurrently
2036 * in the tasklet.
2037 */
2038 tasklet_disable_in_atomic(&channel->callback_event);
2039
2040 /*
2041 * Since this function is called with IRQ locks held, can't
2042 * do normal wait for completion; instead poll.
2043 */
2044 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
2045 unsigned long flags;
2046
2047 /* 0xFFFF means an invalid PCI VENDOR ID. */
2048 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
2049 dev_err_once(&hbus->hdev->device,
2050 "the device has gone\n");
2051 goto enable_tasklet;
2052 }
2053
2054 /*
2055 * Make sure that the ring buffer data structure doesn't get
2056 * freed while we dereference the ring buffer pointer. Test
2057 * for the channel's onchannel_callback being NULL within a
2058 * sched_lock critical section. See also the inline comments
2059 * in vmbus_reset_channel_cb().
2060 */
2061 spin_lock_irqsave(&channel->sched_lock, flags);
2062 if (unlikely(channel->onchannel_callback == NULL)) {
2063 spin_unlock_irqrestore(&channel->sched_lock, flags);
2064 goto enable_tasklet;
2065 }
2066 hv_pci_onchannelcallback(hbus);
2067 spin_unlock_irqrestore(&channel->sched_lock, flags);
2068
2069 udelay(100);
2070 }
2071
2072 tasklet_enable(&channel->callback_event);
2073
2074 if (comp.comp_pkt.completion_status < 0) {
2075 dev_err(&hbus->hdev->device,
2076 "Request for interrupt failed: 0x%x",
2077 comp.comp_pkt.completion_status);
2078 goto free_int_desc;
2079 }
2080
2081 /*
2082 * Record the assignment so that this can be unwound later. Using
2083 * irq_set_chip_data() here would be appropriate, but the lock it takes
2084 * is already held.
2085 */
2086 *int_desc = comp.int_desc;
2087 data->chip_data = int_desc;
2088
2089 /* Pass up the result. */
2090 msg->address_hi = comp.int_desc.address >> 32;
2091 msg->address_lo = comp.int_desc.address & 0xffffffff;
2092 msg->data = comp.int_desc.data;
2093
2094 put_pcichild(hpdev);
2095 return;
2096
2097 enable_tasklet:
2098 tasklet_enable(&channel->callback_event);
2099 /*
2100 * The completion packet on the stack becomes invalid after 'return';
2101 * remove the ID from the VMbus requestor if the identifier is still
2102 * mapped to/associated with the packet. (The identifier could have
2103 * been 're-used', i.e., already removed and (re-)mapped.)
2104 *
2105 * Cf. hv_pci_onchannelcallback().
2106 */
2107 vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2108 free_int_desc:
2109 kfree(int_desc);
2110 drop_reference:
2111 put_pcichild(hpdev);
2112 return_null_message:
2113 msg->address_hi = 0;
2114 msg->address_lo = 0;
2115 msg->data = 0;
2116 }
2117
2118 /* HW Interrupt Chip Descriptor */
2119 static struct irq_chip hv_msi_irq_chip = {
2120 .name = "Hyper-V PCIe MSI",
2121 .irq_compose_msi_msg = hv_compose_msi_msg,
2122 .irq_set_affinity = irq_chip_set_affinity_parent,
2123 #ifdef CONFIG_X86
2124 .irq_ack = irq_chip_ack_parent,
2125 .flags = IRQCHIP_MOVE_DEFERRED,
2126 #elif defined(CONFIG_ARM64)
2127 .irq_eoi = irq_chip_eoi_parent,
2128 #endif
2129 .irq_mask = hv_irq_mask,
2130 .irq_unmask = hv_irq_unmask,
2131 };
2132
2133 static struct msi_domain_ops hv_msi_ops = {
2134 .msi_prepare = hv_msi_prepare,
2135 .msi_free = hv_msi_free,
2136 };
2137
2138 /**
2139 * hv_pcie_init_irq_domain() - Initialize IRQ domain
2140 * @hbus: The root PCI bus
2141 *
2142 * This function creates an IRQ domain which will be used for
2143 * interrupts from devices that have been passed through. These
2144 * devices only support MSI and MSI-X, not line-based interrupts
2145 * or simulations of line-based interrupts through PCIe's
2146 * fabric-layer messages. Because interrupts are remapped, we
2147 * can support multi-message MSI here.
2148 *
2149 * Return: '0' on success and error value on failure
2150 */
hv_pcie_init_irq_domain(struct hv_pcibus_device * hbus)2151 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2152 {
2153 hbus->msi_info.chip = &hv_msi_irq_chip;
2154 hbus->msi_info.ops = &hv_msi_ops;
2155 hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
2156 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
2157 MSI_FLAG_PCI_MSIX);
2158 hbus->msi_info.handler = FLOW_HANDLER;
2159 hbus->msi_info.handler_name = FLOW_NAME;
2160 hbus->msi_info.data = hbus;
2161 hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
2162 &hbus->msi_info,
2163 hv_pci_get_root_domain());
2164 if (!hbus->irq_domain) {
2165 dev_err(&hbus->hdev->device,
2166 "Failed to build an MSI IRQ domain\n");
2167 return -ENODEV;
2168 }
2169
2170 dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2171
2172 return 0;
2173 }
2174
2175 /**
2176 * get_bar_size() - Get the address space consumed by a BAR
2177 * @bar_val: Value that a BAR returned after -1 was written
2178 * to it.
2179 *
2180 * This function returns the size of the BAR, rounded up to 1
2181 * page. It has to be rounded up because the hypervisor's page
2182 * table entry that maps the BAR into the VM can't specify an
2183 * offset within a page. The invariant is that the hypervisor
2184 * must place any BARs of smaller than page length at the
2185 * beginning of a page.
2186 *
2187 * Return: Size in bytes of the consumed MMIO space.
2188 */
get_bar_size(u64 bar_val)2189 static u64 get_bar_size(u64 bar_val)
2190 {
2191 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2192 PAGE_SIZE);
2193 }
2194
2195 /**
2196 * survey_child_resources() - Total all MMIO requirements
2197 * @hbus: Root PCI bus, as understood by this driver
2198 */
survey_child_resources(struct hv_pcibus_device * hbus)2199 static void survey_child_resources(struct hv_pcibus_device *hbus)
2200 {
2201 struct hv_pci_dev *hpdev;
2202 resource_size_t bar_size = 0;
2203 unsigned long flags;
2204 struct completion *event;
2205 u64 bar_val;
2206 int i;
2207
2208 /* If nobody is waiting on the answer, don't compute it. */
2209 event = xchg(&hbus->survey_event, NULL);
2210 if (!event)
2211 return;
2212
2213 /* If the answer has already been computed, go with it. */
2214 if (hbus->low_mmio_space || hbus->high_mmio_space) {
2215 complete(event);
2216 return;
2217 }
2218
2219 spin_lock_irqsave(&hbus->device_list_lock, flags);
2220
2221 /*
2222 * Due to an interesting quirk of the PCI spec, all memory regions
2223 * for a child device are a power of 2 in size and aligned in memory,
2224 * so it's sufficient to just add them up without tracking alignment.
2225 */
2226 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2227 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2228 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2229 dev_err(&hbus->hdev->device,
2230 "There's an I/O BAR in this list!\n");
2231
2232 if (hpdev->probed_bar[i] != 0) {
2233 /*
2234 * A probed BAR has all the upper bits set that
2235 * can be changed.
2236 */
2237
2238 bar_val = hpdev->probed_bar[i];
2239 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2240 bar_val |=
2241 ((u64)hpdev->probed_bar[++i] << 32);
2242 else
2243 bar_val |= 0xffffffff00000000ULL;
2244
2245 bar_size = get_bar_size(bar_val);
2246
2247 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2248 hbus->high_mmio_space += bar_size;
2249 else
2250 hbus->low_mmio_space += bar_size;
2251 }
2252 }
2253 }
2254
2255 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2256 complete(event);
2257 }
2258
2259 /**
2260 * prepopulate_bars() - Fill in BARs with defaults
2261 * @hbus: Root PCI bus, as understood by this driver
2262 *
2263 * The core PCI driver code seems much, much happier if the BARs
2264 * for a device have values upon first scan. So fill them in.
2265 * The algorithm below works down from large sizes to small,
2266 * attempting to pack the assignments optimally. The assumption,
2267 * enforced in other parts of the code, is that the beginning of
2268 * the memory-mapped I/O space will be aligned on the largest
2269 * BAR size.
2270 */
prepopulate_bars(struct hv_pcibus_device * hbus)2271 static void prepopulate_bars(struct hv_pcibus_device *hbus)
2272 {
2273 resource_size_t high_size = 0;
2274 resource_size_t low_size = 0;
2275 resource_size_t high_base = 0;
2276 resource_size_t low_base = 0;
2277 resource_size_t bar_size;
2278 struct hv_pci_dev *hpdev;
2279 unsigned long flags;
2280 u64 bar_val;
2281 u32 command;
2282 bool high;
2283 int i;
2284
2285 if (hbus->low_mmio_space) {
2286 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2287 low_base = hbus->low_mmio_res->start;
2288 }
2289
2290 if (hbus->high_mmio_space) {
2291 high_size = 1ULL <<
2292 (63 - __builtin_clzll(hbus->high_mmio_space));
2293 high_base = hbus->high_mmio_res->start;
2294 }
2295
2296 spin_lock_irqsave(&hbus->device_list_lock, flags);
2297
2298 /*
2299 * Clear the memory enable bit, in case it's already set. This occurs
2300 * in the suspend path of hibernation, where the device is suspended,
2301 * resumed and suspended again: see hibernation_snapshot() and
2302 * hibernation_platform_enter().
2303 *
2304 * If the memory enable bit is already set, Hyper-V silently ignores
2305 * the below BAR updates, and the related PCI device driver can not
2306 * work, because reading from the device register(s) always returns
2307 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2308 */
2309 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2310 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2311 command &= ~PCI_COMMAND_MEMORY;
2312 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2313 }
2314
2315 /* Pick addresses for the BARs. */
2316 do {
2317 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2318 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2319 bar_val = hpdev->probed_bar[i];
2320 if (bar_val == 0)
2321 continue;
2322 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2323 if (high) {
2324 bar_val |=
2325 ((u64)hpdev->probed_bar[i + 1]
2326 << 32);
2327 } else {
2328 bar_val |= 0xffffffffULL << 32;
2329 }
2330 bar_size = get_bar_size(bar_val);
2331 if (high) {
2332 if (high_size != bar_size) {
2333 i++;
2334 continue;
2335 }
2336 _hv_pcifront_write_config(hpdev,
2337 PCI_BASE_ADDRESS_0 + (4 * i),
2338 4,
2339 (u32)(high_base & 0xffffff00));
2340 i++;
2341 _hv_pcifront_write_config(hpdev,
2342 PCI_BASE_ADDRESS_0 + (4 * i),
2343 4, (u32)(high_base >> 32));
2344 high_base += bar_size;
2345 } else {
2346 if (low_size != bar_size)
2347 continue;
2348 _hv_pcifront_write_config(hpdev,
2349 PCI_BASE_ADDRESS_0 + (4 * i),
2350 4,
2351 (u32)(low_base & 0xffffff00));
2352 low_base += bar_size;
2353 }
2354 }
2355 if (high_size <= 1 && low_size <= 1) {
2356 /*
2357 * No need to set the PCI_COMMAND_MEMORY bit as
2358 * the core PCI driver doesn't require the bit
2359 * to be pre-set. Actually here we intentionally
2360 * keep the bit off so that the PCI BAR probing
2361 * in the core PCI driver doesn't cause Hyper-V
2362 * to unnecessarily unmap/map the virtual BARs
2363 * from/to the physical BARs multiple times.
2364 * This reduces the VM boot time significantly
2365 * if the BAR sizes are huge.
2366 */
2367 break;
2368 }
2369 }
2370
2371 high_size >>= 1;
2372 low_size >>= 1;
2373 } while (high_size || low_size);
2374
2375 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2376 }
2377
2378 /*
2379 * Assign entries in sysfs pci slot directory.
2380 *
2381 * Note that this function does not need to lock the children list
2382 * because it is called from pci_devices_present_work which
2383 * is serialized with hv_eject_device_work because they are on the
2384 * same ordered workqueue. Therefore hbus->children list will not change
2385 * even when pci_create_slot sleeps.
2386 */
hv_pci_assign_slots(struct hv_pcibus_device * hbus)2387 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2388 {
2389 struct hv_pci_dev *hpdev;
2390 char name[SLOT_NAME_SIZE];
2391 int slot_nr;
2392
2393 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2394 if (hpdev->pci_slot)
2395 continue;
2396
2397 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2398 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2399 hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2400 name, NULL);
2401 if (IS_ERR(hpdev->pci_slot)) {
2402 pr_warn("pci_create slot %s failed\n", name);
2403 hpdev->pci_slot = NULL;
2404 }
2405 }
2406 }
2407
2408 /*
2409 * Remove entries in sysfs pci slot directory.
2410 */
hv_pci_remove_slots(struct hv_pcibus_device * hbus)2411 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2412 {
2413 struct hv_pci_dev *hpdev;
2414
2415 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2416 if (!hpdev->pci_slot)
2417 continue;
2418 pci_destroy_slot(hpdev->pci_slot);
2419 hpdev->pci_slot = NULL;
2420 }
2421 }
2422
2423 /*
2424 * Set NUMA node for the devices on the bus
2425 */
hv_pci_assign_numa_node(struct hv_pcibus_device * hbus)2426 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2427 {
2428 struct pci_dev *dev;
2429 struct pci_bus *bus = hbus->bridge->bus;
2430 struct hv_pci_dev *hv_dev;
2431
2432 list_for_each_entry(dev, &bus->devices, bus_list) {
2433 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2434 if (!hv_dev)
2435 continue;
2436
2437 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2438 hv_dev->desc.virtual_numa_node < num_possible_nodes())
2439 /*
2440 * The kernel may boot with some NUMA nodes offline
2441 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2442 * "numa=off". In those cases, adjust the host provided
2443 * NUMA node to a valid NUMA node used by the kernel.
2444 */
2445 set_dev_node(&dev->dev,
2446 numa_map_to_online_node(
2447 hv_dev->desc.virtual_numa_node));
2448
2449 put_pcichild(hv_dev);
2450 }
2451 }
2452
2453 /**
2454 * create_root_hv_pci_bus() - Expose a new root PCI bus
2455 * @hbus: Root PCI bus, as understood by this driver
2456 *
2457 * Return: 0 on success, -errno on failure
2458 */
create_root_hv_pci_bus(struct hv_pcibus_device * hbus)2459 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2460 {
2461 int error;
2462 struct pci_host_bridge *bridge = hbus->bridge;
2463
2464 bridge->dev.parent = &hbus->hdev->device;
2465 bridge->sysdata = &hbus->sysdata;
2466 bridge->ops = &hv_pcifront_ops;
2467
2468 error = pci_scan_root_bus_bridge(bridge);
2469 if (error)
2470 return error;
2471
2472 pci_lock_rescan_remove();
2473 hv_pci_assign_numa_node(hbus);
2474 pci_bus_assign_resources(bridge->bus);
2475 hv_pci_assign_slots(hbus);
2476 pci_bus_add_devices(bridge->bus);
2477 pci_unlock_rescan_remove();
2478 hbus->state = hv_pcibus_installed;
2479 return 0;
2480 }
2481
2482 struct q_res_req_compl {
2483 struct completion host_event;
2484 struct hv_pci_dev *hpdev;
2485 };
2486
2487 /**
2488 * q_resource_requirements() - Query Resource Requirements
2489 * @context: The completion context.
2490 * @resp: The response that came from the host.
2491 * @resp_packet_size: The size in bytes of resp.
2492 *
2493 * This function is invoked on completion of a Query Resource
2494 * Requirements packet.
2495 */
q_resource_requirements(void * context,struct pci_response * resp,int resp_packet_size)2496 static void q_resource_requirements(void *context, struct pci_response *resp,
2497 int resp_packet_size)
2498 {
2499 struct q_res_req_compl *completion = context;
2500 struct pci_q_res_req_response *q_res_req =
2501 (struct pci_q_res_req_response *)resp;
2502 s32 status;
2503 int i;
2504
2505 status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2506 if (status < 0) {
2507 dev_err(&completion->hpdev->hbus->hdev->device,
2508 "query resource requirements failed: %x\n",
2509 status);
2510 } else {
2511 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2512 completion->hpdev->probed_bar[i] =
2513 q_res_req->probed_bar[i];
2514 }
2515 }
2516
2517 complete(&completion->host_event);
2518 }
2519
2520 /**
2521 * new_pcichild_device() - Create a new child device
2522 * @hbus: The internal struct tracking this root PCI bus.
2523 * @desc: The information supplied so far from the host
2524 * about the device.
2525 *
2526 * This function creates the tracking structure for a new child
2527 * device and kicks off the process of figuring out what it is.
2528 *
2529 * Return: Pointer to the new tracking struct
2530 */
new_pcichild_device(struct hv_pcibus_device * hbus,struct hv_pcidev_description * desc)2531 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2532 struct hv_pcidev_description *desc)
2533 {
2534 struct hv_pci_dev *hpdev;
2535 struct pci_child_message *res_req;
2536 struct q_res_req_compl comp_pkt;
2537 struct {
2538 struct pci_packet init_packet;
2539 u8 buffer[sizeof(struct pci_child_message)];
2540 } pkt;
2541 unsigned long flags;
2542 int ret;
2543
2544 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2545 if (!hpdev)
2546 return NULL;
2547
2548 hpdev->hbus = hbus;
2549
2550 memset(&pkt, 0, sizeof(pkt));
2551 init_completion(&comp_pkt.host_event);
2552 comp_pkt.hpdev = hpdev;
2553 pkt.init_packet.compl_ctxt = &comp_pkt;
2554 pkt.init_packet.completion_func = q_resource_requirements;
2555 res_req = (struct pci_child_message *)pkt.buffer;
2556 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2557 res_req->wslot.slot = desc->win_slot.slot;
2558
2559 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2560 sizeof(struct pci_child_message),
2561 (unsigned long)&pkt.init_packet,
2562 VM_PKT_DATA_INBAND,
2563 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2564 if (ret)
2565 goto error;
2566
2567 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2568 goto error;
2569
2570 hpdev->desc = *desc;
2571 refcount_set(&hpdev->refs, 1);
2572 get_pcichild(hpdev);
2573 spin_lock_irqsave(&hbus->device_list_lock, flags);
2574
2575 list_add_tail(&hpdev->list_entry, &hbus->children);
2576 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2577 return hpdev;
2578
2579 error:
2580 kfree(hpdev);
2581 return NULL;
2582 }
2583
2584 /**
2585 * get_pcichild_wslot() - Find device from slot
2586 * @hbus: Root PCI bus, as understood by this driver
2587 * @wslot: Location on the bus
2588 *
2589 * This function looks up a PCI device and returns the internal
2590 * representation of it. It acquires a reference on it, so that
2591 * the device won't be deleted while somebody is using it. The
2592 * caller is responsible for calling put_pcichild() to release
2593 * this reference.
2594 *
2595 * Return: Internal representation of a PCI device
2596 */
get_pcichild_wslot(struct hv_pcibus_device * hbus,u32 wslot)2597 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2598 u32 wslot)
2599 {
2600 unsigned long flags;
2601 struct hv_pci_dev *iter, *hpdev = NULL;
2602
2603 spin_lock_irqsave(&hbus->device_list_lock, flags);
2604 list_for_each_entry(iter, &hbus->children, list_entry) {
2605 if (iter->desc.win_slot.slot == wslot) {
2606 hpdev = iter;
2607 get_pcichild(hpdev);
2608 break;
2609 }
2610 }
2611 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2612
2613 return hpdev;
2614 }
2615
2616 /**
2617 * pci_devices_present_work() - Handle new list of child devices
2618 * @work: Work struct embedded in struct hv_dr_work
2619 *
2620 * "Bus Relations" is the Windows term for "children of this
2621 * bus." The terminology is preserved here for people trying to
2622 * debug the interaction between Hyper-V and Linux. This
2623 * function is called when the parent partition reports a list
2624 * of functions that should be observed under this PCI Express
2625 * port (bus).
2626 *
2627 * This function updates the list, and must tolerate being
2628 * called multiple times with the same information. The typical
2629 * number of child devices is one, with very atypical cases
2630 * involving three or four, so the algorithms used here can be
2631 * simple and inefficient.
2632 *
2633 * It must also treat the omission of a previously observed device as
2634 * notification that the device no longer exists.
2635 *
2636 * Note that this function is serialized with hv_eject_device_work(),
2637 * because both are pushed to the ordered workqueue hbus->wq.
2638 */
pci_devices_present_work(struct work_struct * work)2639 static void pci_devices_present_work(struct work_struct *work)
2640 {
2641 u32 child_no;
2642 bool found;
2643 struct hv_pcidev_description *new_desc;
2644 struct hv_pci_dev *hpdev;
2645 struct hv_pcibus_device *hbus;
2646 struct list_head removed;
2647 struct hv_dr_work *dr_wrk;
2648 struct hv_dr_state *dr = NULL;
2649 unsigned long flags;
2650
2651 dr_wrk = container_of(work, struct hv_dr_work, wrk);
2652 hbus = dr_wrk->bus;
2653 kfree(dr_wrk);
2654
2655 INIT_LIST_HEAD(&removed);
2656
2657 /* Pull this off the queue and process it if it was the last one. */
2658 spin_lock_irqsave(&hbus->device_list_lock, flags);
2659 while (!list_empty(&hbus->dr_list)) {
2660 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2661 list_entry);
2662 list_del(&dr->list_entry);
2663
2664 /* Throw this away if the list still has stuff in it. */
2665 if (!list_empty(&hbus->dr_list)) {
2666 kfree(dr);
2667 continue;
2668 }
2669 }
2670 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2671
2672 if (!dr)
2673 return;
2674
2675 mutex_lock(&hbus->state_lock);
2676
2677 /* First, mark all existing children as reported missing. */
2678 spin_lock_irqsave(&hbus->device_list_lock, flags);
2679 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2680 hpdev->reported_missing = true;
2681 }
2682 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2683
2684 /* Next, add back any reported devices. */
2685 for (child_no = 0; child_no < dr->device_count; child_no++) {
2686 found = false;
2687 new_desc = &dr->func[child_no];
2688
2689 spin_lock_irqsave(&hbus->device_list_lock, flags);
2690 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2691 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2692 (hpdev->desc.v_id == new_desc->v_id) &&
2693 (hpdev->desc.d_id == new_desc->d_id) &&
2694 (hpdev->desc.ser == new_desc->ser)) {
2695 hpdev->reported_missing = false;
2696 found = true;
2697 }
2698 }
2699 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2700
2701 if (!found) {
2702 hpdev = new_pcichild_device(hbus, new_desc);
2703 if (!hpdev)
2704 dev_err(&hbus->hdev->device,
2705 "couldn't record a child device.\n");
2706 }
2707 }
2708
2709 /* Move missing children to a list on the stack. */
2710 spin_lock_irqsave(&hbus->device_list_lock, flags);
2711 do {
2712 found = false;
2713 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2714 if (hpdev->reported_missing) {
2715 found = true;
2716 put_pcichild(hpdev);
2717 list_move_tail(&hpdev->list_entry, &removed);
2718 break;
2719 }
2720 }
2721 } while (found);
2722 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2723
2724 /* Delete everything that should no longer exist. */
2725 while (!list_empty(&removed)) {
2726 hpdev = list_first_entry(&removed, struct hv_pci_dev,
2727 list_entry);
2728 list_del(&hpdev->list_entry);
2729
2730 if (hpdev->pci_slot)
2731 pci_destroy_slot(hpdev->pci_slot);
2732
2733 put_pcichild(hpdev);
2734 }
2735
2736 switch (hbus->state) {
2737 case hv_pcibus_installed:
2738 /*
2739 * Tell the core to rescan bus
2740 * because there may have been changes.
2741 */
2742 pci_lock_rescan_remove();
2743 pci_scan_child_bus(hbus->bridge->bus);
2744 hv_pci_assign_numa_node(hbus);
2745 hv_pci_assign_slots(hbus);
2746 pci_unlock_rescan_remove();
2747 break;
2748
2749 case hv_pcibus_init:
2750 case hv_pcibus_probed:
2751 survey_child_resources(hbus);
2752 break;
2753
2754 default:
2755 break;
2756 }
2757
2758 mutex_unlock(&hbus->state_lock);
2759
2760 kfree(dr);
2761 }
2762
2763 /**
2764 * hv_pci_start_relations_work() - Queue work to start device discovery
2765 * @hbus: Root PCI bus, as understood by this driver
2766 * @dr: The list of children returned from host
2767 *
2768 * Return: 0 on success, -errno on failure
2769 */
hv_pci_start_relations_work(struct hv_pcibus_device * hbus,struct hv_dr_state * dr)2770 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2771 struct hv_dr_state *dr)
2772 {
2773 struct hv_dr_work *dr_wrk;
2774 unsigned long flags;
2775 bool pending_dr;
2776
2777 if (hbus->state == hv_pcibus_removing) {
2778 dev_info(&hbus->hdev->device,
2779 "PCI VMBus BUS_RELATIONS: ignored\n");
2780 return -ENOENT;
2781 }
2782
2783 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2784 if (!dr_wrk)
2785 return -ENOMEM;
2786
2787 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2788 dr_wrk->bus = hbus;
2789
2790 spin_lock_irqsave(&hbus->device_list_lock, flags);
2791 /*
2792 * If pending_dr is true, we have already queued a work,
2793 * which will see the new dr. Otherwise, we need to
2794 * queue a new work.
2795 */
2796 pending_dr = !list_empty(&hbus->dr_list);
2797 list_add_tail(&dr->list_entry, &hbus->dr_list);
2798 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2799
2800 if (pending_dr)
2801 kfree(dr_wrk);
2802 else
2803 queue_work(hbus->wq, &dr_wrk->wrk);
2804
2805 return 0;
2806 }
2807
2808 /**
2809 * hv_pci_devices_present() - Handle list of new children
2810 * @hbus: Root PCI bus, as understood by this driver
2811 * @relations: Packet from host listing children
2812 *
2813 * Process a new list of devices on the bus. The list of devices is
2814 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2815 * whenever a new list of devices for this bus appears.
2816 */
hv_pci_devices_present(struct hv_pcibus_device * hbus,struct pci_bus_relations * relations)2817 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2818 struct pci_bus_relations *relations)
2819 {
2820 struct hv_dr_state *dr;
2821 int i;
2822
2823 dr = kzalloc(struct_size(dr, func, relations->device_count),
2824 GFP_NOWAIT);
2825 if (!dr)
2826 return;
2827
2828 dr->device_count = relations->device_count;
2829 for (i = 0; i < dr->device_count; i++) {
2830 dr->func[i].v_id = relations->func[i].v_id;
2831 dr->func[i].d_id = relations->func[i].d_id;
2832 dr->func[i].rev = relations->func[i].rev;
2833 dr->func[i].prog_intf = relations->func[i].prog_intf;
2834 dr->func[i].subclass = relations->func[i].subclass;
2835 dr->func[i].base_class = relations->func[i].base_class;
2836 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2837 dr->func[i].win_slot = relations->func[i].win_slot;
2838 dr->func[i].ser = relations->func[i].ser;
2839 }
2840
2841 if (hv_pci_start_relations_work(hbus, dr))
2842 kfree(dr);
2843 }
2844
2845 /**
2846 * hv_pci_devices_present2() - Handle list of new children
2847 * @hbus: Root PCI bus, as understood by this driver
2848 * @relations: Packet from host listing children
2849 *
2850 * This function is the v2 version of hv_pci_devices_present()
2851 */
hv_pci_devices_present2(struct hv_pcibus_device * hbus,struct pci_bus_relations2 * relations)2852 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2853 struct pci_bus_relations2 *relations)
2854 {
2855 struct hv_dr_state *dr;
2856 int i;
2857
2858 dr = kzalloc(struct_size(dr, func, relations->device_count),
2859 GFP_NOWAIT);
2860 if (!dr)
2861 return;
2862
2863 dr->device_count = relations->device_count;
2864 for (i = 0; i < dr->device_count; i++) {
2865 dr->func[i].v_id = relations->func[i].v_id;
2866 dr->func[i].d_id = relations->func[i].d_id;
2867 dr->func[i].rev = relations->func[i].rev;
2868 dr->func[i].prog_intf = relations->func[i].prog_intf;
2869 dr->func[i].subclass = relations->func[i].subclass;
2870 dr->func[i].base_class = relations->func[i].base_class;
2871 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2872 dr->func[i].win_slot = relations->func[i].win_slot;
2873 dr->func[i].ser = relations->func[i].ser;
2874 dr->func[i].flags = relations->func[i].flags;
2875 dr->func[i].virtual_numa_node =
2876 relations->func[i].virtual_numa_node;
2877 }
2878
2879 if (hv_pci_start_relations_work(hbus, dr))
2880 kfree(dr);
2881 }
2882
2883 /**
2884 * hv_eject_device_work() - Asynchronously handles ejection
2885 * @work: Work struct embedded in internal device struct
2886 *
2887 * This function handles ejecting a device. Windows will
2888 * attempt to gracefully eject a device, waiting 60 seconds to
2889 * hear back from the guest OS that this completed successfully.
2890 * If this timer expires, the device will be forcibly removed.
2891 */
hv_eject_device_work(struct work_struct * work)2892 static void hv_eject_device_work(struct work_struct *work)
2893 {
2894 struct pci_eject_response *ejct_pkt;
2895 struct hv_pcibus_device *hbus;
2896 struct hv_pci_dev *hpdev;
2897 struct pci_dev *pdev;
2898 unsigned long flags;
2899 int wslot;
2900 struct {
2901 struct pci_packet pkt;
2902 u8 buffer[sizeof(struct pci_eject_response)];
2903 } ctxt;
2904
2905 hpdev = container_of(work, struct hv_pci_dev, wrk);
2906 hbus = hpdev->hbus;
2907
2908 mutex_lock(&hbus->state_lock);
2909
2910 /*
2911 * Ejection can come before or after the PCI bus has been set up, so
2912 * attempt to find it and tear down the bus state, if it exists. This
2913 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2914 * because hbus->bridge->bus may not exist yet.
2915 */
2916 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2917 pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2918 if (pdev) {
2919 pci_lock_rescan_remove();
2920 pci_stop_and_remove_bus_device(pdev);
2921 pci_dev_put(pdev);
2922 pci_unlock_rescan_remove();
2923 }
2924
2925 spin_lock_irqsave(&hbus->device_list_lock, flags);
2926 list_del(&hpdev->list_entry);
2927 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2928
2929 if (hpdev->pci_slot)
2930 pci_destroy_slot(hpdev->pci_slot);
2931
2932 memset(&ctxt, 0, sizeof(ctxt));
2933 ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
2934 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2935 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2936 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2937 sizeof(*ejct_pkt), 0,
2938 VM_PKT_DATA_INBAND, 0);
2939
2940 /* For the get_pcichild() in hv_pci_eject_device() */
2941 put_pcichild(hpdev);
2942 /* For the two refs got in new_pcichild_device() */
2943 put_pcichild(hpdev);
2944 put_pcichild(hpdev);
2945 /* hpdev has been freed. Do not use it any more. */
2946
2947 mutex_unlock(&hbus->state_lock);
2948 }
2949
2950 /**
2951 * hv_pci_eject_device() - Handles device ejection
2952 * @hpdev: Internal device tracking struct
2953 *
2954 * This function is invoked when an ejection packet arrives. It
2955 * just schedules work so that we don't re-enter the packet
2956 * delivery code handling the ejection.
2957 */
hv_pci_eject_device(struct hv_pci_dev * hpdev)2958 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2959 {
2960 struct hv_pcibus_device *hbus = hpdev->hbus;
2961 struct hv_device *hdev = hbus->hdev;
2962
2963 if (hbus->state == hv_pcibus_removing) {
2964 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2965 return;
2966 }
2967
2968 get_pcichild(hpdev);
2969 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2970 queue_work(hbus->wq, &hpdev->wrk);
2971 }
2972
2973 /**
2974 * hv_pci_onchannelcallback() - Handles incoming packets
2975 * @context: Internal bus tracking struct
2976 *
2977 * This function is invoked whenever the host sends a packet to
2978 * this channel (which is private to this root PCI bus).
2979 */
hv_pci_onchannelcallback(void * context)2980 static void hv_pci_onchannelcallback(void *context)
2981 {
2982 const int packet_size = 0x100;
2983 int ret;
2984 struct hv_pcibus_device *hbus = context;
2985 struct vmbus_channel *chan = hbus->hdev->channel;
2986 u32 bytes_recvd;
2987 u64 req_id, req_addr;
2988 struct vmpacket_descriptor *desc;
2989 unsigned char *buffer;
2990 int bufferlen = packet_size;
2991 struct pci_packet *comp_packet;
2992 struct pci_response *response;
2993 struct pci_incoming_message *new_message;
2994 struct pci_bus_relations *bus_rel;
2995 struct pci_bus_relations2 *bus_rel2;
2996 struct pci_dev_inval_block *inval;
2997 struct pci_dev_incoming *dev_message;
2998 struct hv_pci_dev *hpdev;
2999 unsigned long flags;
3000
3001 buffer = kmalloc(bufferlen, GFP_ATOMIC);
3002 if (!buffer)
3003 return;
3004
3005 while (1) {
3006 ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
3007 &bytes_recvd, &req_id);
3008
3009 if (ret == -ENOBUFS) {
3010 kfree(buffer);
3011 /* Handle large packet */
3012 bufferlen = bytes_recvd;
3013 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
3014 if (!buffer)
3015 return;
3016 continue;
3017 }
3018
3019 /* Zero length indicates there are no more packets. */
3020 if (ret || !bytes_recvd)
3021 break;
3022
3023 /*
3024 * All incoming packets must be at least as large as a
3025 * response.
3026 */
3027 if (bytes_recvd <= sizeof(struct pci_response))
3028 continue;
3029 desc = (struct vmpacket_descriptor *)buffer;
3030
3031 switch (desc->type) {
3032 case VM_PKT_COMP:
3033
3034 lock_requestor(chan, flags);
3035 req_addr = __vmbus_request_addr_match(chan, req_id,
3036 VMBUS_RQST_ADDR_ANY);
3037 if (req_addr == VMBUS_RQST_ERROR) {
3038 unlock_requestor(chan, flags);
3039 dev_err(&hbus->hdev->device,
3040 "Invalid transaction ID %llx\n",
3041 req_id);
3042 break;
3043 }
3044 comp_packet = (struct pci_packet *)req_addr;
3045 response = (struct pci_response *)buffer;
3046 /*
3047 * Call ->completion_func() within the critical section to make
3048 * sure that the packet pointer is still valid during the call:
3049 * here 'valid' means that there's a task still waiting for the
3050 * completion, and that the packet data is still on the waiting
3051 * task's stack. Cf. hv_compose_msi_msg().
3052 */
3053 comp_packet->completion_func(comp_packet->compl_ctxt,
3054 response,
3055 bytes_recvd);
3056 unlock_requestor(chan, flags);
3057 break;
3058
3059 case VM_PKT_DATA_INBAND:
3060
3061 new_message = (struct pci_incoming_message *)buffer;
3062 switch (new_message->message_type.type) {
3063 case PCI_BUS_RELATIONS:
3064
3065 bus_rel = (struct pci_bus_relations *)buffer;
3066 if (bytes_recvd < sizeof(*bus_rel) ||
3067 bytes_recvd <
3068 struct_size(bus_rel, func,
3069 bus_rel->device_count)) {
3070 dev_err(&hbus->hdev->device,
3071 "bus relations too small\n");
3072 break;
3073 }
3074
3075 hv_pci_devices_present(hbus, bus_rel);
3076 break;
3077
3078 case PCI_BUS_RELATIONS2:
3079
3080 bus_rel2 = (struct pci_bus_relations2 *)buffer;
3081 if (bytes_recvd < sizeof(*bus_rel2) ||
3082 bytes_recvd <
3083 struct_size(bus_rel2, func,
3084 bus_rel2->device_count)) {
3085 dev_err(&hbus->hdev->device,
3086 "bus relations v2 too small\n");
3087 break;
3088 }
3089
3090 hv_pci_devices_present2(hbus, bus_rel2);
3091 break;
3092
3093 case PCI_EJECT:
3094
3095 dev_message = (struct pci_dev_incoming *)buffer;
3096 if (bytes_recvd < sizeof(*dev_message)) {
3097 dev_err(&hbus->hdev->device,
3098 "eject message too small\n");
3099 break;
3100 }
3101 hpdev = get_pcichild_wslot(hbus,
3102 dev_message->wslot.slot);
3103 if (hpdev) {
3104 hv_pci_eject_device(hpdev);
3105 put_pcichild(hpdev);
3106 }
3107 break;
3108
3109 case PCI_INVALIDATE_BLOCK:
3110
3111 inval = (struct pci_dev_inval_block *)buffer;
3112 if (bytes_recvd < sizeof(*inval)) {
3113 dev_err(&hbus->hdev->device,
3114 "invalidate message too small\n");
3115 break;
3116 }
3117 hpdev = get_pcichild_wslot(hbus,
3118 inval->wslot.slot);
3119 if (hpdev) {
3120 if (hpdev->block_invalidate) {
3121 hpdev->block_invalidate(
3122 hpdev->invalidate_context,
3123 inval->block_mask);
3124 }
3125 put_pcichild(hpdev);
3126 }
3127 break;
3128
3129 default:
3130 dev_warn(&hbus->hdev->device,
3131 "Unimplemented protocol message %x\n",
3132 new_message->message_type.type);
3133 break;
3134 }
3135 break;
3136
3137 default:
3138 dev_err(&hbus->hdev->device,
3139 "unhandled packet type %d, tid %llx len %d\n",
3140 desc->type, req_id, bytes_recvd);
3141 break;
3142 }
3143 }
3144
3145 kfree(buffer);
3146 }
3147
3148 /**
3149 * hv_pci_protocol_negotiation() - Set up protocol
3150 * @hdev: VMBus's tracking struct for this root PCI bus.
3151 * @version: Array of supported channel protocol versions in
3152 * the order of probing - highest go first.
3153 * @num_version: Number of elements in the version array.
3154 *
3155 * This driver is intended to support running on Windows 10
3156 * (server) and later versions. It will not run on earlier
3157 * versions, as they assume that many of the operations which
3158 * Linux needs accomplished with a spinlock held were done via
3159 * asynchronous messaging via VMBus. Windows 10 increases the
3160 * surface area of PCI emulation so that these actions can take
3161 * place by suspending a virtual processor for their duration.
3162 *
3163 * This function negotiates the channel protocol version,
3164 * failing if the host doesn't support the necessary protocol
3165 * level.
3166 */
hv_pci_protocol_negotiation(struct hv_device * hdev,enum pci_protocol_version_t version[],int num_version)3167 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3168 enum pci_protocol_version_t version[],
3169 int num_version)
3170 {
3171 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3172 struct pci_version_request *version_req;
3173 struct hv_pci_compl comp_pkt;
3174 struct pci_packet *pkt;
3175 int ret;
3176 int i;
3177
3178 /*
3179 * Initiate the handshake with the host and negotiate
3180 * a version that the host can support. We start with the
3181 * highest version number and go down if the host cannot
3182 * support it.
3183 */
3184 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3185 if (!pkt)
3186 return -ENOMEM;
3187
3188 init_completion(&comp_pkt.host_event);
3189 pkt->completion_func = hv_pci_generic_compl;
3190 pkt->compl_ctxt = &comp_pkt;
3191 version_req = (struct pci_version_request *)(pkt + 1);
3192 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3193
3194 for (i = 0; i < num_version; i++) {
3195 version_req->protocol_version = version[i];
3196 ret = vmbus_sendpacket(hdev->channel, version_req,
3197 sizeof(struct pci_version_request),
3198 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3199 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3200 if (!ret)
3201 ret = wait_for_response(hdev, &comp_pkt.host_event);
3202
3203 if (ret) {
3204 dev_err(&hdev->device,
3205 "PCI Pass-through VSP failed to request version: %d",
3206 ret);
3207 goto exit;
3208 }
3209
3210 if (comp_pkt.completion_status >= 0) {
3211 hbus->protocol_version = version[i];
3212 dev_info(&hdev->device,
3213 "PCI VMBus probing: Using version %#x\n",
3214 hbus->protocol_version);
3215 goto exit;
3216 }
3217
3218 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3219 dev_err(&hdev->device,
3220 "PCI Pass-through VSP failed version request: %#x",
3221 comp_pkt.completion_status);
3222 ret = -EPROTO;
3223 goto exit;
3224 }
3225
3226 reinit_completion(&comp_pkt.host_event);
3227 }
3228
3229 dev_err(&hdev->device,
3230 "PCI pass-through VSP failed to find supported version");
3231 ret = -EPROTO;
3232
3233 exit:
3234 kfree(pkt);
3235 return ret;
3236 }
3237
3238 /**
3239 * hv_pci_free_bridge_windows() - Release memory regions for the
3240 * bus
3241 * @hbus: Root PCI bus, as understood by this driver
3242 */
hv_pci_free_bridge_windows(struct hv_pcibus_device * hbus)3243 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3244 {
3245 /*
3246 * Set the resources back to the way they looked when they
3247 * were allocated by setting IORESOURCE_BUSY again.
3248 */
3249
3250 if (hbus->low_mmio_space && hbus->low_mmio_res) {
3251 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3252 vmbus_free_mmio(hbus->low_mmio_res->start,
3253 resource_size(hbus->low_mmio_res));
3254 }
3255
3256 if (hbus->high_mmio_space && hbus->high_mmio_res) {
3257 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3258 vmbus_free_mmio(hbus->high_mmio_res->start,
3259 resource_size(hbus->high_mmio_res));
3260 }
3261 }
3262
3263 /**
3264 * hv_pci_allocate_bridge_windows() - Allocate memory regions
3265 * for the bus
3266 * @hbus: Root PCI bus, as understood by this driver
3267 *
3268 * This function calls vmbus_allocate_mmio(), which is itself a
3269 * bit of a compromise. Ideally, we might change the pnp layer
3270 * in the kernel such that it comprehends either PCI devices
3271 * which are "grandchildren of ACPI," with some intermediate bus
3272 * node (in this case, VMBus) or change it such that it
3273 * understands VMBus. The pnp layer, however, has been declared
3274 * deprecated, and not subject to change.
3275 *
3276 * The workaround, implemented here, is to ask VMBus to allocate
3277 * MMIO space for this bus. VMBus itself knows which ranges are
3278 * appropriate by looking at its own ACPI objects. Then, after
3279 * these ranges are claimed, they're modified to look like they
3280 * would have looked if the ACPI and pnp code had allocated
3281 * bridge windows. These descriptors have to exist in this form
3282 * in order to satisfy the code which will get invoked when the
3283 * endpoint PCI function driver calls request_mem_region() or
3284 * request_mem_region_exclusive().
3285 *
3286 * Return: 0 on success, -errno on failure
3287 */
hv_pci_allocate_bridge_windows(struct hv_pcibus_device * hbus)3288 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3289 {
3290 resource_size_t align;
3291 int ret;
3292
3293 if (hbus->low_mmio_space) {
3294 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3295 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3296 (u64)(u32)0xffffffff,
3297 hbus->low_mmio_space,
3298 align, false);
3299 if (ret) {
3300 dev_err(&hbus->hdev->device,
3301 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3302 hbus->low_mmio_space);
3303 return ret;
3304 }
3305
3306 /* Modify this resource to become a bridge window. */
3307 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3308 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3309 pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3310 }
3311
3312 if (hbus->high_mmio_space) {
3313 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3314 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3315 0x100000000, -1,
3316 hbus->high_mmio_space, align,
3317 false);
3318 if (ret) {
3319 dev_err(&hbus->hdev->device,
3320 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3321 hbus->high_mmio_space);
3322 goto release_low_mmio;
3323 }
3324
3325 /* Modify this resource to become a bridge window. */
3326 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3327 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3328 pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3329 }
3330
3331 return 0;
3332
3333 release_low_mmio:
3334 if (hbus->low_mmio_res) {
3335 vmbus_free_mmio(hbus->low_mmio_res->start,
3336 resource_size(hbus->low_mmio_res));
3337 }
3338
3339 return ret;
3340 }
3341
3342 /**
3343 * hv_allocate_config_window() - Find MMIO space for PCI Config
3344 * @hbus: Root PCI bus, as understood by this driver
3345 *
3346 * This function claims memory-mapped I/O space for accessing
3347 * configuration space for the functions on this bus.
3348 *
3349 * Return: 0 on success, -errno on failure
3350 */
hv_allocate_config_window(struct hv_pcibus_device * hbus)3351 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3352 {
3353 int ret;
3354
3355 /*
3356 * Set up a region of MMIO space to use for accessing configuration
3357 * space.
3358 */
3359 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3360 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3361 if (ret)
3362 return ret;
3363
3364 /*
3365 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3366 * resource claims (those which cannot be overlapped) and the ranges
3367 * which are valid for the children of this bus, which are intended
3368 * to be overlapped by those children. Set the flag on this claim
3369 * meaning that this region can't be overlapped.
3370 */
3371
3372 hbus->mem_config->flags |= IORESOURCE_BUSY;
3373
3374 return 0;
3375 }
3376
hv_free_config_window(struct hv_pcibus_device * hbus)3377 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3378 {
3379 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3380 }
3381
3382 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3383
3384 /**
3385 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3386 * @hdev: VMBus's tracking struct for this root PCI bus
3387 *
3388 * Return: 0 on success, -errno on failure
3389 */
hv_pci_enter_d0(struct hv_device * hdev)3390 static int hv_pci_enter_d0(struct hv_device *hdev)
3391 {
3392 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3393 struct pci_bus_d0_entry *d0_entry;
3394 struct hv_pci_compl comp_pkt;
3395 struct pci_packet *pkt;
3396 bool retry = true;
3397 int ret;
3398
3399 enter_d0_retry:
3400 /*
3401 * Tell the host that the bus is ready to use, and moved into the
3402 * powered-on state. This includes telling the host which region
3403 * of memory-mapped I/O space has been chosen for configuration space
3404 * access.
3405 */
3406 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3407 if (!pkt)
3408 return -ENOMEM;
3409
3410 init_completion(&comp_pkt.host_event);
3411 pkt->completion_func = hv_pci_generic_compl;
3412 pkt->compl_ctxt = &comp_pkt;
3413 d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
3414 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3415 d0_entry->mmio_base = hbus->mem_config->start;
3416
3417 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3418 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3419 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3420 if (!ret)
3421 ret = wait_for_response(hdev, &comp_pkt.host_event);
3422
3423 if (ret)
3424 goto exit;
3425
3426 /*
3427 * In certain case (Kdump) the pci device of interest was
3428 * not cleanly shut down and resource is still held on host
3429 * side, the host could return invalid device status.
3430 * We need to explicitly request host to release the resource
3431 * and try to enter D0 again.
3432 */
3433 if (comp_pkt.completion_status < 0 && retry) {
3434 retry = false;
3435
3436 dev_err(&hdev->device, "Retrying D0 Entry\n");
3437
3438 /*
3439 * Hv_pci_bus_exit() calls hv_send_resource_released()
3440 * to free up resources of its child devices.
3441 * In the kdump kernel we need to set the
3442 * wslot_res_allocated to 255 so it scans all child
3443 * devices to release resources allocated in the
3444 * normal kernel before panic happened.
3445 */
3446 hbus->wslot_res_allocated = 255;
3447
3448 ret = hv_pci_bus_exit(hdev, true);
3449
3450 if (ret == 0) {
3451 kfree(pkt);
3452 goto enter_d0_retry;
3453 }
3454 dev_err(&hdev->device,
3455 "Retrying D0 failed with ret %d\n", ret);
3456 }
3457
3458 if (comp_pkt.completion_status < 0) {
3459 dev_err(&hdev->device,
3460 "PCI Pass-through VSP failed D0 Entry with status %x\n",
3461 comp_pkt.completion_status);
3462 ret = -EPROTO;
3463 goto exit;
3464 }
3465
3466 ret = 0;
3467
3468 exit:
3469 kfree(pkt);
3470 return ret;
3471 }
3472
3473 /**
3474 * hv_pci_query_relations() - Ask host to send list of child
3475 * devices
3476 * @hdev: VMBus's tracking struct for this root PCI bus
3477 *
3478 * Return: 0 on success, -errno on failure
3479 */
hv_pci_query_relations(struct hv_device * hdev)3480 static int hv_pci_query_relations(struct hv_device *hdev)
3481 {
3482 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3483 struct pci_message message;
3484 struct completion comp;
3485 int ret;
3486
3487 /* Ask the host to send along the list of child devices */
3488 init_completion(&comp);
3489 if (cmpxchg(&hbus->survey_event, NULL, &comp))
3490 return -ENOTEMPTY;
3491
3492 memset(&message, 0, sizeof(message));
3493 message.type = PCI_QUERY_BUS_RELATIONS;
3494
3495 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3496 0, VM_PKT_DATA_INBAND, 0);
3497 if (!ret)
3498 ret = wait_for_response(hdev, &comp);
3499
3500 /*
3501 * In the case of fast device addition/removal, it's possible that
3502 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3503 * already got a PCI_BUS_RELATIONS* message from the host and the
3504 * channel callback already scheduled a work to hbus->wq, which can be
3505 * running pci_devices_present_work() -> survey_child_resources() ->
3506 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3507 * exits and the stack variable 'comp' is no longer valid; as a result,
3508 * a hang or a page fault may happen when the complete() calls
3509 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3510 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3511 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3512 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3513 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3514 * channel->rescind = true.
3515 */
3516 flush_workqueue(hbus->wq);
3517
3518 return ret;
3519 }
3520
3521 /**
3522 * hv_send_resources_allocated() - Report local resource choices
3523 * @hdev: VMBus's tracking struct for this root PCI bus
3524 *
3525 * The host OS is expecting to be sent a request as a message
3526 * which contains all the resources that the device will use.
3527 * The response contains those same resources, "translated"
3528 * which is to say, the values which should be used by the
3529 * hardware, when it delivers an interrupt. (MMIO resources are
3530 * used in local terms.) This is nice for Windows, and lines up
3531 * with the FDO/PDO split, which doesn't exist in Linux. Linux
3532 * is deeply expecting to scan an emulated PCI configuration
3533 * space. So this message is sent here only to drive the state
3534 * machine on the host forward.
3535 *
3536 * Return: 0 on success, -errno on failure
3537 */
hv_send_resources_allocated(struct hv_device * hdev)3538 static int hv_send_resources_allocated(struct hv_device *hdev)
3539 {
3540 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3541 struct pci_resources_assigned *res_assigned;
3542 struct pci_resources_assigned2 *res_assigned2;
3543 struct hv_pci_compl comp_pkt;
3544 struct hv_pci_dev *hpdev;
3545 struct pci_packet *pkt;
3546 size_t size_res;
3547 int wslot;
3548 int ret;
3549
3550 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3551 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
3552
3553 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3554 if (!pkt)
3555 return -ENOMEM;
3556
3557 ret = 0;
3558
3559 for (wslot = 0; wslot < 256; wslot++) {
3560 hpdev = get_pcichild_wslot(hbus, wslot);
3561 if (!hpdev)
3562 continue;
3563
3564 memset(pkt, 0, sizeof(*pkt) + size_res);
3565 init_completion(&comp_pkt.host_event);
3566 pkt->completion_func = hv_pci_generic_compl;
3567 pkt->compl_ctxt = &comp_pkt;
3568
3569 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3570 res_assigned =
3571 (struct pci_resources_assigned *)(pkt + 1);
3572 res_assigned->message_type.type =
3573 PCI_RESOURCES_ASSIGNED;
3574 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3575 } else {
3576 res_assigned2 =
3577 (struct pci_resources_assigned2 *)(pkt + 1);
3578 res_assigned2->message_type.type =
3579 PCI_RESOURCES_ASSIGNED2;
3580 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3581 }
3582 put_pcichild(hpdev);
3583
3584 ret = vmbus_sendpacket(hdev->channel, pkt + 1,
3585 size_res, (unsigned long)pkt,
3586 VM_PKT_DATA_INBAND,
3587 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3588 if (!ret)
3589 ret = wait_for_response(hdev, &comp_pkt.host_event);
3590 if (ret)
3591 break;
3592
3593 if (comp_pkt.completion_status < 0) {
3594 ret = -EPROTO;
3595 dev_err(&hdev->device,
3596 "resource allocated returned 0x%x",
3597 comp_pkt.completion_status);
3598 break;
3599 }
3600
3601 hbus->wslot_res_allocated = wslot;
3602 }
3603
3604 kfree(pkt);
3605 return ret;
3606 }
3607
3608 /**
3609 * hv_send_resources_released() - Report local resources
3610 * released
3611 * @hdev: VMBus's tracking struct for this root PCI bus
3612 *
3613 * Return: 0 on success, -errno on failure
3614 */
hv_send_resources_released(struct hv_device * hdev)3615 static int hv_send_resources_released(struct hv_device *hdev)
3616 {
3617 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3618 struct pci_child_message pkt;
3619 struct hv_pci_dev *hpdev;
3620 int wslot;
3621 int ret;
3622
3623 for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3624 hpdev = get_pcichild_wslot(hbus, wslot);
3625 if (!hpdev)
3626 continue;
3627
3628 memset(&pkt, 0, sizeof(pkt));
3629 pkt.message_type.type = PCI_RESOURCES_RELEASED;
3630 pkt.wslot.slot = hpdev->desc.win_slot.slot;
3631
3632 put_pcichild(hpdev);
3633
3634 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3635 VM_PKT_DATA_INBAND, 0);
3636 if (ret)
3637 return ret;
3638
3639 hbus->wslot_res_allocated = wslot - 1;
3640 }
3641
3642 hbus->wslot_res_allocated = -1;
3643
3644 return 0;
3645 }
3646
3647 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3648 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3649
3650 /*
3651 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3652 * as invalid for passthrough PCI devices of this driver.
3653 */
3654 #define HVPCI_DOM_INVALID 0
3655
3656 /**
3657 * hv_get_dom_num() - Get a valid PCI domain number
3658 * Check if the PCI domain number is in use, and return another number if
3659 * it is in use.
3660 *
3661 * @dom: Requested domain number
3662 *
3663 * return: domain number on success, HVPCI_DOM_INVALID on failure
3664 */
hv_get_dom_num(u16 dom)3665 static u16 hv_get_dom_num(u16 dom)
3666 {
3667 unsigned int i;
3668
3669 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3670 return dom;
3671
3672 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3673 if (test_and_set_bit(i, hvpci_dom_map) == 0)
3674 return i;
3675 }
3676
3677 return HVPCI_DOM_INVALID;
3678 }
3679
3680 /**
3681 * hv_put_dom_num() - Mark the PCI domain number as free
3682 * @dom: Domain number to be freed
3683 */
hv_put_dom_num(u16 dom)3684 static void hv_put_dom_num(u16 dom)
3685 {
3686 clear_bit(dom, hvpci_dom_map);
3687 }
3688
3689 /**
3690 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3691 * @hdev: VMBus's tracking struct for this root PCI bus
3692 * @dev_id: Identifies the device itself
3693 *
3694 * Return: 0 on success, -errno on failure
3695 */
hv_pci_probe(struct hv_device * hdev,const struct hv_vmbus_device_id * dev_id)3696 static int hv_pci_probe(struct hv_device *hdev,
3697 const struct hv_vmbus_device_id *dev_id)
3698 {
3699 struct pci_host_bridge *bridge;
3700 struct hv_pcibus_device *hbus;
3701 u16 dom_req, dom;
3702 char *name;
3703 int ret;
3704
3705 bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3706 if (!bridge)
3707 return -ENOMEM;
3708
3709 hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
3710 if (!hbus)
3711 return -ENOMEM;
3712
3713 hbus->bridge = bridge;
3714 mutex_init(&hbus->state_lock);
3715 hbus->state = hv_pcibus_init;
3716 hbus->wslot_res_allocated = -1;
3717
3718 /*
3719 * The PCI bus "domain" is what is called "segment" in ACPI and other
3720 * specs. Pull it from the instance ID, to get something usually
3721 * unique. In rare cases of collision, we will find out another number
3722 * not in use.
3723 *
3724 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3725 * together with this guest driver can guarantee that (1) The only
3726 * domain used by Gen1 VMs for something that looks like a physical
3727 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3728 * (2) There will be no overlap between domains (after fixing possible
3729 * collisions) in the same VM.
3730 */
3731 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3732 dom = hv_get_dom_num(dom_req);
3733
3734 if (dom == HVPCI_DOM_INVALID) {
3735 dev_err(&hdev->device,
3736 "Unable to use dom# 0x%x or other numbers", dom_req);
3737 ret = -EINVAL;
3738 goto free_bus;
3739 }
3740
3741 if (dom != dom_req)
3742 dev_info(&hdev->device,
3743 "PCI dom# 0x%x has collision, using 0x%x",
3744 dom_req, dom);
3745
3746 hbus->bridge->domain_nr = dom;
3747 #ifdef CONFIG_X86
3748 hbus->sysdata.domain = dom;
3749 hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3750 #elif defined(CONFIG_ARM64)
3751 /*
3752 * Set the PCI bus parent to be the corresponding VMbus
3753 * device. Then the VMbus device will be assigned as the
3754 * ACPI companion in pcibios_root_bridge_prepare() and
3755 * pci_dma_configure() will propagate device coherence
3756 * information to devices created on the bus.
3757 */
3758 hbus->sysdata.parent = hdev->device.parent;
3759 hbus->use_calls = false;
3760 #endif
3761
3762 hbus->hdev = hdev;
3763 INIT_LIST_HEAD(&hbus->children);
3764 INIT_LIST_HEAD(&hbus->dr_list);
3765 spin_lock_init(&hbus->config_lock);
3766 spin_lock_init(&hbus->device_list_lock);
3767 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3768 hbus->bridge->domain_nr);
3769 if (!hbus->wq) {
3770 ret = -ENOMEM;
3771 goto free_dom;
3772 }
3773
3774 hdev->channel->next_request_id_callback = vmbus_next_request_id;
3775 hdev->channel->request_addr_callback = vmbus_request_addr;
3776 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3777
3778 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3779 hv_pci_onchannelcallback, hbus);
3780 if (ret)
3781 goto destroy_wq;
3782
3783 hv_set_drvdata(hdev, hbus);
3784
3785 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3786 ARRAY_SIZE(pci_protocol_versions));
3787 if (ret)
3788 goto close;
3789
3790 ret = hv_allocate_config_window(hbus);
3791 if (ret)
3792 goto close;
3793
3794 hbus->cfg_addr = ioremap(hbus->mem_config->start,
3795 PCI_CONFIG_MMIO_LENGTH);
3796 if (!hbus->cfg_addr) {
3797 dev_err(&hdev->device,
3798 "Unable to map a virtual address for config space\n");
3799 ret = -ENOMEM;
3800 goto free_config;
3801 }
3802
3803 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3804 if (!name) {
3805 ret = -ENOMEM;
3806 goto unmap;
3807 }
3808
3809 hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3810 kfree(name);
3811 if (!hbus->fwnode) {
3812 ret = -ENOMEM;
3813 goto unmap;
3814 }
3815
3816 ret = hv_pcie_init_irq_domain(hbus);
3817 if (ret)
3818 goto free_fwnode;
3819
3820 ret = hv_pci_query_relations(hdev);
3821 if (ret)
3822 goto free_irq_domain;
3823
3824 mutex_lock(&hbus->state_lock);
3825
3826 ret = hv_pci_enter_d0(hdev);
3827 if (ret)
3828 goto release_state_lock;
3829
3830 ret = hv_pci_allocate_bridge_windows(hbus);
3831 if (ret)
3832 goto exit_d0;
3833
3834 ret = hv_send_resources_allocated(hdev);
3835 if (ret)
3836 goto free_windows;
3837
3838 prepopulate_bars(hbus);
3839
3840 hbus->state = hv_pcibus_probed;
3841
3842 ret = create_root_hv_pci_bus(hbus);
3843 if (ret)
3844 goto free_windows;
3845
3846 mutex_unlock(&hbus->state_lock);
3847 return 0;
3848
3849 free_windows:
3850 hv_pci_free_bridge_windows(hbus);
3851 exit_d0:
3852 (void) hv_pci_bus_exit(hdev, true);
3853 release_state_lock:
3854 mutex_unlock(&hbus->state_lock);
3855 free_irq_domain:
3856 irq_domain_remove(hbus->irq_domain);
3857 free_fwnode:
3858 irq_domain_free_fwnode(hbus->fwnode);
3859 unmap:
3860 iounmap(hbus->cfg_addr);
3861 free_config:
3862 hv_free_config_window(hbus);
3863 close:
3864 vmbus_close(hdev->channel);
3865 destroy_wq:
3866 destroy_workqueue(hbus->wq);
3867 free_dom:
3868 hv_put_dom_num(hbus->bridge->domain_nr);
3869 free_bus:
3870 kfree(hbus);
3871 return ret;
3872 }
3873
hv_pci_bus_exit(struct hv_device * hdev,bool keep_devs)3874 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3875 {
3876 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3877 struct vmbus_channel *chan = hdev->channel;
3878 struct {
3879 struct pci_packet teardown_packet;
3880 u8 buffer[sizeof(struct pci_message)];
3881 } pkt;
3882 struct pci_message *msg;
3883 struct hv_pci_compl comp_pkt;
3884 struct hv_pci_dev *hpdev, *tmp;
3885 unsigned long flags;
3886 u64 trans_id;
3887 int ret;
3888
3889 /*
3890 * After the host sends the RESCIND_CHANNEL message, it doesn't
3891 * access the per-channel ringbuffer any longer.
3892 */
3893 if (chan->rescind)
3894 return 0;
3895
3896 if (!keep_devs) {
3897 struct list_head removed;
3898
3899 /* Move all present children to the list on stack */
3900 INIT_LIST_HEAD(&removed);
3901 spin_lock_irqsave(&hbus->device_list_lock, flags);
3902 list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3903 list_move_tail(&hpdev->list_entry, &removed);
3904 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3905
3906 /* Remove all children in the list */
3907 list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3908 list_del(&hpdev->list_entry);
3909 if (hpdev->pci_slot)
3910 pci_destroy_slot(hpdev->pci_slot);
3911 /* For the two refs got in new_pcichild_device() */
3912 put_pcichild(hpdev);
3913 put_pcichild(hpdev);
3914 }
3915 }
3916
3917 ret = hv_send_resources_released(hdev);
3918 if (ret) {
3919 dev_err(&hdev->device,
3920 "Couldn't send resources released packet(s)\n");
3921 return ret;
3922 }
3923
3924 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3925 init_completion(&comp_pkt.host_event);
3926 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3927 pkt.teardown_packet.compl_ctxt = &comp_pkt;
3928 msg = (struct pci_message *)pkt.buffer;
3929 msg->type = PCI_BUS_D0EXIT;
3930
3931 ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
3932 (unsigned long)&pkt.teardown_packet,
3933 &trans_id, VM_PKT_DATA_INBAND,
3934 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3935 if (ret)
3936 return ret;
3937
3938 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3939 /*
3940 * The completion packet on the stack becomes invalid after
3941 * 'return'; remove the ID from the VMbus requestor if the
3942 * identifier is still mapped to/associated with the packet.
3943 *
3944 * Cf. hv_pci_onchannelcallback().
3945 */
3946 vmbus_request_addr_match(chan, trans_id,
3947 (unsigned long)&pkt.teardown_packet);
3948 return -ETIMEDOUT;
3949 }
3950
3951 return 0;
3952 }
3953
3954 /**
3955 * hv_pci_remove() - Remove routine for this VMBus channel
3956 * @hdev: VMBus's tracking struct for this root PCI bus
3957 */
hv_pci_remove(struct hv_device * hdev)3958 static void hv_pci_remove(struct hv_device *hdev)
3959 {
3960 struct hv_pcibus_device *hbus;
3961
3962 hbus = hv_get_drvdata(hdev);
3963 if (hbus->state == hv_pcibus_installed) {
3964 tasklet_disable(&hdev->channel->callback_event);
3965 hbus->state = hv_pcibus_removing;
3966 tasklet_enable(&hdev->channel->callback_event);
3967 destroy_workqueue(hbus->wq);
3968 hbus->wq = NULL;
3969 /*
3970 * At this point, no work is running or can be scheduled
3971 * on hbus-wq. We can't race with hv_pci_devices_present()
3972 * or hv_pci_eject_device(), it's safe to proceed.
3973 */
3974
3975 /* Remove the bus from PCI's point of view. */
3976 pci_lock_rescan_remove();
3977 pci_stop_root_bus(hbus->bridge->bus);
3978 hv_pci_remove_slots(hbus);
3979 pci_remove_root_bus(hbus->bridge->bus);
3980 pci_unlock_rescan_remove();
3981 }
3982
3983 hv_pci_bus_exit(hdev, false);
3984
3985 vmbus_close(hdev->channel);
3986
3987 iounmap(hbus->cfg_addr);
3988 hv_free_config_window(hbus);
3989 hv_pci_free_bridge_windows(hbus);
3990 irq_domain_remove(hbus->irq_domain);
3991 irq_domain_free_fwnode(hbus->fwnode);
3992
3993 hv_put_dom_num(hbus->bridge->domain_nr);
3994
3995 kfree(hbus);
3996 }
3997
hv_pci_suspend(struct hv_device * hdev)3998 static int hv_pci_suspend(struct hv_device *hdev)
3999 {
4000 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4001 enum hv_pcibus_state old_state;
4002 int ret;
4003
4004 /*
4005 * hv_pci_suspend() must make sure there are no pending work items
4006 * before calling vmbus_close(), since it runs in a process context
4007 * as a callback in dpm_suspend(). When it starts to run, the channel
4008 * callback hv_pci_onchannelcallback(), which runs in a tasklet
4009 * context, can be still running concurrently and scheduling new work
4010 * items onto hbus->wq in hv_pci_devices_present() and
4011 * hv_pci_eject_device(), and the work item handlers can access the
4012 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
4013 * the work item handler pci_devices_present_work() ->
4014 * new_pcichild_device() writes to the vmbus channel.
4015 *
4016 * To eliminate the race, hv_pci_suspend() disables the channel
4017 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
4018 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
4019 * it knows that no new work item can be scheduled, and then it flushes
4020 * hbus->wq and safely closes the vmbus channel.
4021 */
4022 tasklet_disable(&hdev->channel->callback_event);
4023
4024 /* Change the hbus state to prevent new work items. */
4025 old_state = hbus->state;
4026 if (hbus->state == hv_pcibus_installed)
4027 hbus->state = hv_pcibus_removing;
4028
4029 tasklet_enable(&hdev->channel->callback_event);
4030
4031 if (old_state != hv_pcibus_installed)
4032 return -EINVAL;
4033
4034 flush_workqueue(hbus->wq);
4035
4036 ret = hv_pci_bus_exit(hdev, true);
4037 if (ret)
4038 return ret;
4039
4040 vmbus_close(hdev->channel);
4041
4042 return 0;
4043 }
4044
hv_pci_restore_msi_msg(struct pci_dev * pdev,void * arg)4045 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
4046 {
4047 struct irq_data *irq_data;
4048 struct msi_desc *entry;
4049
4050 if (!pdev->msi_enabled && !pdev->msix_enabled)
4051 return 0;
4052
4053 guard(msi_descs_lock)(&pdev->dev);
4054 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
4055 irq_data = irq_get_irq_data(entry->irq);
4056 if (WARN_ON_ONCE(!irq_data))
4057 return -EINVAL;
4058 hv_compose_msi_msg(irq_data, &entry->msg);
4059 }
4060 return 0;
4061 }
4062
4063 /*
4064 * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
4065 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4066 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4067 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4068 * Table entries.
4069 */
hv_pci_restore_msi_state(struct hv_pcibus_device * hbus)4070 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4071 {
4072 pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4073 }
4074
hv_pci_resume(struct hv_device * hdev)4075 static int hv_pci_resume(struct hv_device *hdev)
4076 {
4077 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4078 enum pci_protocol_version_t version[1];
4079 int ret;
4080
4081 hbus->state = hv_pcibus_init;
4082
4083 hdev->channel->next_request_id_callback = vmbus_next_request_id;
4084 hdev->channel->request_addr_callback = vmbus_request_addr;
4085 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4086
4087 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4088 hv_pci_onchannelcallback, hbus);
4089 if (ret)
4090 return ret;
4091
4092 /* Only use the version that was in use before hibernation. */
4093 version[0] = hbus->protocol_version;
4094 ret = hv_pci_protocol_negotiation(hdev, version, 1);
4095 if (ret)
4096 goto out;
4097
4098 ret = hv_pci_query_relations(hdev);
4099 if (ret)
4100 goto out;
4101
4102 mutex_lock(&hbus->state_lock);
4103
4104 ret = hv_pci_enter_d0(hdev);
4105 if (ret)
4106 goto release_state_lock;
4107
4108 ret = hv_send_resources_allocated(hdev);
4109 if (ret)
4110 goto release_state_lock;
4111
4112 prepopulate_bars(hbus);
4113
4114 hv_pci_restore_msi_state(hbus);
4115
4116 hbus->state = hv_pcibus_installed;
4117 mutex_unlock(&hbus->state_lock);
4118 return 0;
4119
4120 release_state_lock:
4121 mutex_unlock(&hbus->state_lock);
4122 out:
4123 vmbus_close(hdev->channel);
4124 return ret;
4125 }
4126
4127 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4128 /* PCI Pass-through Class ID */
4129 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4130 { HV_PCIE_GUID, },
4131 { },
4132 };
4133
4134 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4135
4136 static struct hv_driver hv_pci_drv = {
4137 .name = "hv_pci",
4138 .id_table = hv_pci_id_table,
4139 .probe = hv_pci_probe,
4140 .remove = hv_pci_remove,
4141 .suspend = hv_pci_suspend,
4142 .resume = hv_pci_resume,
4143 };
4144
exit_hv_pci_drv(void)4145 static void __exit exit_hv_pci_drv(void)
4146 {
4147 vmbus_driver_unregister(&hv_pci_drv);
4148
4149 hvpci_block_ops.read_block = NULL;
4150 hvpci_block_ops.write_block = NULL;
4151 hvpci_block_ops.reg_blk_invalidate = NULL;
4152 }
4153
init_hv_pci_drv(void)4154 static int __init init_hv_pci_drv(void)
4155 {
4156 int ret;
4157
4158 if (!hv_is_hyperv_initialized())
4159 return -ENODEV;
4160
4161 if (hv_root_partition() && !hv_nested)
4162 return -ENODEV;
4163
4164 ret = hv_pci_irqchip_init();
4165 if (ret)
4166 return ret;
4167
4168 /* Set the invalid domain number's bit, so it will not be used */
4169 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4170
4171 /* Initialize PCI block r/w interface */
4172 hvpci_block_ops.read_block = hv_read_config_block;
4173 hvpci_block_ops.write_block = hv_write_config_block;
4174 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4175
4176 return vmbus_driver_register(&hv_pci_drv);
4177 }
4178
4179 module_init(init_hv_pci_drv);
4180 module_exit(exit_hv_pci_drv);
4181
4182 MODULE_DESCRIPTION("Hyper-V PCI");
4183 MODULE_LICENSE("GPL v2");
4184