1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) Microsoft Corporation.
4 *
5 * Author:
6 * Jake Oshins <jakeo@microsoft.com>
7 *
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
15 *
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
21 *
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
31 *
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
38 */
39
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/irqchip/irq-msi-lib.h>
48 #include <linux/msi.h>
49 #include <linux/hyperv.h>
50 #include <linux/refcount.h>
51 #include <linux/irqdomain.h>
52 #include <linux/acpi.h>
53 #include <linux/sizes.h>
54 #include <linux/of_irq.h>
55 #include <asm/mshyperv.h>
56
57 /*
58 * Protocol versions. The low word is the minor version, the high word the
59 * major version.
60 */
61
62 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
63 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
64 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
65
66 enum pci_protocol_version_t {
67 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
68 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
69 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
70 PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
71 };
72
73 #define CPU_AFFINITY_ALL -1ULL
74
75 /*
76 * Supported protocol versions in the order of probing - highest go
77 * first.
78 */
79 static enum pci_protocol_version_t pci_protocol_versions[] = {
80 PCI_PROTOCOL_VERSION_1_4,
81 PCI_PROTOCOL_VERSION_1_3,
82 PCI_PROTOCOL_VERSION_1_2,
83 PCI_PROTOCOL_VERSION_1_1,
84 };
85
86 #define PCI_CONFIG_MMIO_LENGTH 0x2000
87 #define CFG_PAGE_OFFSET 0x1000
88 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
89
90 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
91
92 #define STATUS_REVISION_MISMATCH 0xC0000059
93
94 /* space for 32bit serial number as string */
95 #define SLOT_NAME_SIZE 11
96
97 /*
98 * Size of requestor for VMbus; the value is based on the observation
99 * that having more than one request outstanding is 'rare', and so 64
100 * should be generous in ensuring that we don't ever run out.
101 */
102 #define HV_PCI_RQSTOR_SIZE 64
103
104 /*
105 * Message Types
106 */
107
108 enum pci_message_type {
109 /*
110 * Version 1.1
111 */
112 PCI_MESSAGE_BASE = 0x42490000,
113 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
114 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
115 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
116 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
117 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
118 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
119 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
120 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
121 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
122 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
123 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
124 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
125 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
126 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
127 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
128 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
129 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
130 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
131 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
132 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
133 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
134 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
135 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
136 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
137 PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
138 PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
139 PCI_MESSAGE_MAXIMUM
140 };
141
142 /*
143 * Structures defining the virtual PCI Express protocol.
144 */
145
146 union pci_version {
147 struct {
148 u16 minor_version;
149 u16 major_version;
150 } parts;
151 u32 version;
152 } __packed;
153
154 /*
155 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
156 * which is all this driver does. This representation is the one used in
157 * Windows, which is what is expected when sending this back and forth with
158 * the Hyper-V parent partition.
159 */
160 union win_slot_encoding {
161 struct {
162 u32 dev:5;
163 u32 func:3;
164 u32 reserved:24;
165 } bits;
166 u32 slot;
167 } __packed;
168
169 /*
170 * Pretty much as defined in the PCI Specifications.
171 */
172 struct pci_function_description {
173 u16 v_id; /* vendor ID */
174 u16 d_id; /* device ID */
175 u8 rev;
176 u8 prog_intf;
177 u8 subclass;
178 u8 base_class;
179 u32 subsystem_id;
180 union win_slot_encoding win_slot;
181 u32 ser; /* serial number */
182 } __packed;
183
184 enum pci_device_description_flags {
185 HV_PCI_DEVICE_FLAG_NONE = 0x0,
186 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
187 };
188
189 struct pci_function_description2 {
190 u16 v_id; /* vendor ID */
191 u16 d_id; /* device ID */
192 u8 rev;
193 u8 prog_intf;
194 u8 subclass;
195 u8 base_class;
196 u32 subsystem_id;
197 union win_slot_encoding win_slot;
198 u32 ser; /* serial number */
199 u32 flags;
200 u16 virtual_numa_node;
201 u16 reserved;
202 } __packed;
203
204 /**
205 * struct hv_msi_desc
206 * @vector: IDT entry
207 * @delivery_mode: As defined in Intel's Programmer's
208 * Reference Manual, Volume 3, Chapter 8.
209 * @vector_count: Number of contiguous entries in the
210 * Interrupt Descriptor Table that are
211 * occupied by this Message-Signaled
212 * Interrupt. For "MSI", as first defined
213 * in PCI 2.2, this can be between 1 and
214 * 32. For "MSI-X," as first defined in PCI
215 * 3.0, this must be 1, as each MSI-X table
216 * entry would have its own descriptor.
217 * @reserved: Empty space
218 * @cpu_mask: All the target virtual processors.
219 */
220 struct hv_msi_desc {
221 u8 vector;
222 u8 delivery_mode;
223 u16 vector_count;
224 u32 reserved;
225 u64 cpu_mask;
226 } __packed;
227
228 /**
229 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
230 * @vector: IDT entry
231 * @delivery_mode: As defined in Intel's Programmer's
232 * Reference Manual, Volume 3, Chapter 8.
233 * @vector_count: Number of contiguous entries in the
234 * Interrupt Descriptor Table that are
235 * occupied by this Message-Signaled
236 * Interrupt. For "MSI", as first defined
237 * in PCI 2.2, this can be between 1 and
238 * 32. For "MSI-X," as first defined in PCI
239 * 3.0, this must be 1, as each MSI-X table
240 * entry would have its own descriptor.
241 * @processor_count: number of bits enabled in array.
242 * @processor_array: All the target virtual processors.
243 */
244 struct hv_msi_desc2 {
245 u8 vector;
246 u8 delivery_mode;
247 u16 vector_count;
248 u16 processor_count;
249 u16 processor_array[32];
250 } __packed;
251
252 /*
253 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
254 * Everything is the same as in 'hv_msi_desc2' except that the size of the
255 * 'vector' field is larger to support bigger vector values. For ex: LPI
256 * vectors on ARM.
257 */
258 struct hv_msi_desc3 {
259 u32 vector;
260 u8 delivery_mode;
261 u8 reserved;
262 u16 vector_count;
263 u16 processor_count;
264 u16 processor_array[32];
265 } __packed;
266
267 /**
268 * struct tran_int_desc
269 * @reserved: unused, padding
270 * @vector_count: same as in hv_msi_desc
271 * @data: This is the "data payload" value that is
272 * written by the device when it generates
273 * a message-signaled interrupt, either MSI
274 * or MSI-X.
275 * @address: This is the address to which the data
276 * payload is written on interrupt
277 * generation.
278 */
279 struct tran_int_desc {
280 u16 reserved;
281 u16 vector_count;
282 u32 data;
283 u64 address;
284 } __packed;
285
286 /*
287 * A generic message format for virtual PCI.
288 * Specific message formats are defined later in the file.
289 */
290
291 struct pci_message {
292 u32 type;
293 } __packed;
294
295 struct pci_child_message {
296 struct pci_message message_type;
297 union win_slot_encoding wslot;
298 } __packed;
299
300 struct pci_incoming_message {
301 struct vmpacket_descriptor hdr;
302 struct pci_message message_type;
303 } __packed;
304
305 struct pci_response {
306 struct vmpacket_descriptor hdr;
307 s32 status; /* negative values are failures */
308 } __packed;
309
310 struct pci_packet {
311 void (*completion_func)(void *context, struct pci_response *resp,
312 int resp_packet_size);
313 void *compl_ctxt;
314 };
315
316 /*
317 * Specific message types supporting the PCI protocol.
318 */
319
320 /*
321 * Version negotiation message. Sent from the guest to the host.
322 * The guest is free to try different versions until the host
323 * accepts the version.
324 *
325 * pci_version: The protocol version requested.
326 * is_last_attempt: If TRUE, this is the last version guest will request.
327 * reservedz: Reserved field, set to zero.
328 */
329
330 struct pci_version_request {
331 struct pci_message message_type;
332 u32 protocol_version;
333 } __packed;
334
335 /*
336 * Bus D0 Entry. This is sent from the guest to the host when the virtual
337 * bus (PCI Express port) is ready for action.
338 */
339
340 struct pci_bus_d0_entry {
341 struct pci_message message_type;
342 u32 reserved;
343 u64 mmio_base;
344 } __packed;
345
346 struct pci_bus_relations {
347 struct pci_incoming_message incoming;
348 u32 device_count;
349 struct pci_function_description func[];
350 } __packed;
351
352 struct pci_bus_relations2 {
353 struct pci_incoming_message incoming;
354 u32 device_count;
355 struct pci_function_description2 func[];
356 } __packed;
357
358 struct pci_q_res_req_response {
359 struct vmpacket_descriptor hdr;
360 s32 status; /* negative values are failures */
361 u32 probed_bar[PCI_STD_NUM_BARS];
362 } __packed;
363
364 struct pci_set_power {
365 struct pci_message message_type;
366 union win_slot_encoding wslot;
367 u32 power_state; /* In Windows terms */
368 u32 reserved;
369 } __packed;
370
371 struct pci_set_power_response {
372 struct vmpacket_descriptor hdr;
373 s32 status; /* negative values are failures */
374 union win_slot_encoding wslot;
375 u32 resultant_state; /* In Windows terms */
376 u32 reserved;
377 } __packed;
378
379 struct pci_resources_assigned {
380 struct pci_message message_type;
381 union win_slot_encoding wslot;
382 u8 memory_range[0x14][6]; /* not used here */
383 u32 msi_descriptors;
384 u32 reserved[4];
385 } __packed;
386
387 struct pci_resources_assigned2 {
388 struct pci_message message_type;
389 union win_slot_encoding wslot;
390 u8 memory_range[0x14][6]; /* not used here */
391 u32 msi_descriptor_count;
392 u8 reserved[70];
393 } __packed;
394
395 struct pci_create_interrupt {
396 struct pci_message message_type;
397 union win_slot_encoding wslot;
398 struct hv_msi_desc int_desc;
399 } __packed;
400
401 struct pci_create_int_response {
402 struct pci_response response;
403 u32 reserved;
404 struct tran_int_desc int_desc;
405 } __packed;
406
407 struct pci_create_interrupt2 {
408 struct pci_message message_type;
409 union win_slot_encoding wslot;
410 struct hv_msi_desc2 int_desc;
411 } __packed;
412
413 struct pci_create_interrupt3 {
414 struct pci_message message_type;
415 union win_slot_encoding wslot;
416 struct hv_msi_desc3 int_desc;
417 } __packed;
418
419 struct pci_delete_interrupt {
420 struct pci_message message_type;
421 union win_slot_encoding wslot;
422 struct tran_int_desc int_desc;
423 } __packed;
424
425 /*
426 * Note: the VM must pass a valid block id, wslot and bytes_requested.
427 */
428 struct pci_read_block {
429 struct pci_message message_type;
430 u32 block_id;
431 union win_slot_encoding wslot;
432 u32 bytes_requested;
433 } __packed;
434
435 struct pci_read_block_response {
436 struct vmpacket_descriptor hdr;
437 u32 status;
438 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
439 } __packed;
440
441 /*
442 * Note: the VM must pass a valid block id, wslot and byte_count.
443 */
444 struct pci_write_block {
445 struct pci_message message_type;
446 u32 block_id;
447 union win_slot_encoding wslot;
448 u32 byte_count;
449 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
450 } __packed;
451
452 struct pci_dev_inval_block {
453 struct pci_incoming_message incoming;
454 union win_slot_encoding wslot;
455 u64 block_mask;
456 } __packed;
457
458 struct pci_dev_incoming {
459 struct pci_incoming_message incoming;
460 union win_slot_encoding wslot;
461 } __packed;
462
463 struct pci_eject_response {
464 struct pci_message message_type;
465 union win_slot_encoding wslot;
466 u32 status;
467 } __packed;
468
469 static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
470
471 /*
472 * Driver specific state.
473 */
474
475 enum hv_pcibus_state {
476 hv_pcibus_init = 0,
477 hv_pcibus_probed,
478 hv_pcibus_installed,
479 hv_pcibus_removing,
480 hv_pcibus_maximum
481 };
482
483 struct hv_pcibus_device {
484 #ifdef CONFIG_X86
485 struct pci_sysdata sysdata;
486 #elif defined(CONFIG_ARM64)
487 struct pci_config_window sysdata;
488 #endif
489 struct pci_host_bridge *bridge;
490 struct fwnode_handle *fwnode;
491 /* Protocol version negotiated with the host */
492 enum pci_protocol_version_t protocol_version;
493
494 struct mutex state_lock;
495 enum hv_pcibus_state state;
496
497 struct hv_device *hdev;
498 resource_size_t low_mmio_space;
499 resource_size_t high_mmio_space;
500 struct resource *mem_config;
501 struct resource *low_mmio_res;
502 struct resource *high_mmio_res;
503 struct completion *survey_event;
504 struct pci_bus *pci_bus;
505 spinlock_t config_lock; /* Avoid two threads writing index page */
506 spinlock_t device_list_lock; /* Protect lists below */
507 void __iomem *cfg_addr;
508
509 struct list_head children;
510 struct list_head dr_list;
511
512 struct irq_domain *irq_domain;
513
514 struct workqueue_struct *wq;
515
516 /* Highest slot of child device with resources allocated */
517 int wslot_res_allocated;
518 bool use_calls; /* Use hypercalls to access mmio cfg space */
519 };
520
521 /*
522 * Tracks "Device Relations" messages from the host, which must be both
523 * processed in order and deferred so that they don't run in the context
524 * of the incoming packet callback.
525 */
526 struct hv_dr_work {
527 struct work_struct wrk;
528 struct hv_pcibus_device *bus;
529 };
530
531 struct hv_pcidev_description {
532 u16 v_id; /* vendor ID */
533 u16 d_id; /* device ID */
534 u8 rev;
535 u8 prog_intf;
536 u8 subclass;
537 u8 base_class;
538 u32 subsystem_id;
539 union win_slot_encoding win_slot;
540 u32 ser; /* serial number */
541 u32 flags;
542 u16 virtual_numa_node;
543 };
544
545 struct hv_dr_state {
546 struct list_head list_entry;
547 u32 device_count;
548 struct hv_pcidev_description func[] __counted_by(device_count);
549 };
550
551 struct hv_pci_dev {
552 /* List protected by pci_rescan_remove_lock */
553 struct list_head list_entry;
554 refcount_t refs;
555 struct pci_slot *pci_slot;
556 struct hv_pcidev_description desc;
557 bool reported_missing;
558 struct hv_pcibus_device *hbus;
559 struct work_struct wrk;
560
561 void (*block_invalidate)(void *context, u64 block_mask);
562 void *invalidate_context;
563
564 /*
565 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
566 * read it back, for each of the BAR offsets within config space.
567 */
568 u32 probed_bar[PCI_STD_NUM_BARS];
569 };
570
571 struct hv_pci_compl {
572 struct completion host_event;
573 s32 completion_status;
574 };
575
576 static void hv_pci_onchannelcallback(void *context);
577
578 #ifdef CONFIG_X86
579 #define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
580 #define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK
581
hv_pci_irqchip_init(void)582 static int hv_pci_irqchip_init(void)
583 {
584 return 0;
585 }
586
hv_pci_get_root_domain(void)587 static struct irq_domain *hv_pci_get_root_domain(void)
588 {
589 return x86_vector_domain;
590 }
591
hv_msi_get_int_vector(struct irq_data * data)592 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
593 {
594 struct irq_cfg *cfg = irqd_cfg(data);
595
596 return cfg->vector;
597 }
598
599 #define hv_msi_prepare pci_msi_prepare
600
601 /**
602 * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
603 * affinity.
604 * @data: Describes the IRQ
605 *
606 * Build new a destination for the MSI and make a hypercall to
607 * update the Interrupt Redirection Table. "Device Logical ID"
608 * is built out of this PCI bus's instance GUID and the function
609 * number of the device.
610 */
hv_irq_retarget_interrupt(struct irq_data * data)611 static void hv_irq_retarget_interrupt(struct irq_data *data)
612 {
613 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
614 struct hv_retarget_device_interrupt *params;
615 struct tran_int_desc *int_desc;
616 struct hv_pcibus_device *hbus;
617 const struct cpumask *dest;
618 cpumask_var_t tmp;
619 struct pci_bus *pbus;
620 struct pci_dev *pdev;
621 unsigned long flags;
622 u32 var_size = 0;
623 int cpu, nr_bank;
624 u64 res;
625
626 dest = irq_data_get_effective_affinity_mask(data);
627 pdev = msi_desc_to_pci_dev(msi_desc);
628 pbus = pdev->bus;
629 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
630 int_desc = data->chip_data;
631 if (!int_desc) {
632 dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
633 __func__, data->irq);
634 return;
635 }
636
637 local_irq_save(flags);
638
639 params = *this_cpu_ptr(hyperv_pcpu_input_arg);
640 memset(params, 0, sizeof(*params));
641 params->partition_id = HV_PARTITION_ID_SELF;
642 params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
643 params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
644 params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
645 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
646 (hbus->hdev->dev_instance.b[4] << 16) |
647 (hbus->hdev->dev_instance.b[7] << 8) |
648 (hbus->hdev->dev_instance.b[6] & 0xf8) |
649 PCI_FUNC(pdev->devfn);
650 params->int_target.vector = hv_msi_get_int_vector(data);
651
652 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
653 /*
654 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
655 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
656 * with >64 VP support.
657 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
658 * is not sufficient for this hypercall.
659 */
660 params->int_target.flags |=
661 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
662
663 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
664 res = 1;
665 goto out;
666 }
667
668 cpumask_and(tmp, dest, cpu_online_mask);
669 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp);
670 free_cpumask_var(tmp);
671
672 if (nr_bank <= 0) {
673 res = 1;
674 goto out;
675 }
676
677 /*
678 * var-sized hypercall, var-size starts after vp_mask (thus
679 * vp_set.format does not count, but vp_set.valid_bank_mask
680 * does).
681 */
682 var_size = 1 + nr_bank;
683 } else {
684 for_each_cpu_and(cpu, dest, cpu_online_mask) {
685 params->int_target.vp_mask |=
686 (1ULL << hv_cpu_number_to_vp_number(cpu));
687 }
688 }
689
690 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
691 params, NULL);
692
693 out:
694 local_irq_restore(flags);
695
696 /*
697 * During hibernation, when a CPU is offlined, the kernel tries
698 * to move the interrupt to the remaining CPUs that haven't
699 * been offlined yet. In this case, the below hv_do_hypercall()
700 * always fails since the vmbus channel has been closed:
701 * refer to cpu_disable_common() -> fixup_irqs() ->
702 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
703 *
704 * Suppress the error message for hibernation because the failure
705 * during hibernation does not matter (at this time all the devices
706 * have been frozen). Note: the correct affinity info is still updated
707 * into the irqdata data structure in migrate_one_irq() ->
708 * irq_do_set_affinity(), so later when the VM resumes,
709 * hv_pci_restore_msi_state() is able to correctly restore the
710 * interrupt with the correct affinity.
711 */
712 if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
713 dev_err(&hbus->hdev->device,
714 "%s() failed: %#llx", __func__, res);
715 }
716
hv_arch_irq_unmask(struct irq_data * data)717 static void hv_arch_irq_unmask(struct irq_data *data)
718 {
719 if (hv_root_partition())
720 /*
721 * In case of the nested root partition, the nested hypervisor
722 * is taking care of interrupt remapping and thus the
723 * MAP_DEVICE_INTERRUPT hypercall is required instead of
724 * RETARGET_INTERRUPT.
725 */
726 (void)hv_map_msi_interrupt(data, NULL);
727 else
728 hv_irq_retarget_interrupt(data);
729 }
730 #elif defined(CONFIG_ARM64)
731 /*
732 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
733 * of room at the start to allow for SPIs to be specified through ACPI and
734 * starting with a power of two to satisfy power of 2 multi-MSI requirement.
735 */
736 #define HV_PCI_MSI_SPI_START 64
737 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
738 #define DELIVERY_MODE 0
739 #define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI
740 #define hv_msi_prepare NULL
741
742 struct hv_pci_chip_data {
743 DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
744 struct mutex map_lock;
745 };
746
747 /* Hyper-V vPCI MSI GIC IRQ domain */
748 static struct irq_domain *hv_msi_gic_irq_domain;
749
750 /* Hyper-V PCI MSI IRQ chip */
751 static struct irq_chip hv_arm64_msi_irq_chip = {
752 .name = "MSI",
753 .irq_set_affinity = irq_chip_set_affinity_parent,
754 .irq_eoi = irq_chip_eoi_parent,
755 .irq_mask = irq_chip_mask_parent,
756 .irq_unmask = irq_chip_unmask_parent
757 };
758
hv_msi_get_int_vector(struct irq_data * irqd)759 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
760 {
761 return irqd->parent_data->hwirq;
762 }
763
764 /*
765 * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
766 * the bitmap.
767 * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
768 * the parent domain.
769 */
hv_pci_vec_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_bm_irqs,unsigned int nr_dom_irqs)770 static void hv_pci_vec_irq_free(struct irq_domain *domain,
771 unsigned int virq,
772 unsigned int nr_bm_irqs,
773 unsigned int nr_dom_irqs)
774 {
775 struct hv_pci_chip_data *chip_data = domain->host_data;
776 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
777 int first = d->hwirq - HV_PCI_MSI_SPI_START;
778 int i;
779
780 mutex_lock(&chip_data->map_lock);
781 bitmap_release_region(chip_data->spi_map,
782 first,
783 get_count_order(nr_bm_irqs));
784 mutex_unlock(&chip_data->map_lock);
785 for (i = 0; i < nr_dom_irqs; i++) {
786 if (i)
787 d = irq_domain_get_irq_data(domain, virq + i);
788 irq_domain_reset_irq_data(d);
789 }
790
791 irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
792 }
793
hv_pci_vec_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)794 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
795 unsigned int virq,
796 unsigned int nr_irqs)
797 {
798 hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
799 }
800
hv_pci_vec_alloc_device_irq(struct irq_domain * domain,unsigned int nr_irqs,irq_hw_number_t * hwirq)801 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
802 unsigned int nr_irqs,
803 irq_hw_number_t *hwirq)
804 {
805 struct hv_pci_chip_data *chip_data = domain->host_data;
806 int index;
807
808 /* Find and allocate region from the SPI bitmap */
809 mutex_lock(&chip_data->map_lock);
810 index = bitmap_find_free_region(chip_data->spi_map,
811 HV_PCI_MSI_SPI_NR,
812 get_count_order(nr_irqs));
813 mutex_unlock(&chip_data->map_lock);
814 if (index < 0)
815 return -ENOSPC;
816
817 *hwirq = index + HV_PCI_MSI_SPI_START;
818
819 return 0;
820 }
821
hv_pci_vec_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)822 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
823 unsigned int virq,
824 irq_hw_number_t hwirq)
825 {
826 struct irq_fwspec fwspec;
827 struct irq_data *d;
828 int ret;
829
830 fwspec.fwnode = domain->parent->fwnode;
831 if (is_of_node(fwspec.fwnode)) {
832 /* SPI lines for OF translations start at offset 32 */
833 fwspec.param_count = 3;
834 fwspec.param[0] = 0;
835 fwspec.param[1] = hwirq - 32;
836 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
837 } else {
838 fwspec.param_count = 2;
839 fwspec.param[0] = hwirq;
840 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
841 }
842
843 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
844 if (ret)
845 return ret;
846
847 /*
848 * Since the interrupt specifier is not coming from ACPI or DT, the
849 * trigger type will need to be set explicitly. Otherwise, it will be
850 * set to whatever is in the GIC configuration.
851 */
852 d = irq_domain_get_irq_data(domain->parent, virq);
853
854 return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
855 }
856
hv_pci_vec_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)857 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
858 unsigned int virq, unsigned int nr_irqs,
859 void *args)
860 {
861 irq_hw_number_t hwirq;
862 unsigned int i;
863 int ret;
864
865 ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
866 if (ret)
867 return ret;
868
869 for (i = 0; i < nr_irqs; i++) {
870 ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
871 hwirq + i);
872 if (ret) {
873 hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
874 return ret;
875 }
876
877 irq_domain_set_hwirq_and_chip(domain, virq + i,
878 hwirq + i,
879 &hv_arm64_msi_irq_chip,
880 domain->host_data);
881 pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
882 }
883
884 return 0;
885 }
886
887 /*
888 * Pick the first cpu as the irq affinity that can be temporarily used for
889 * composing MSI from the hypervisor. GIC will eventually set the right
890 * affinity for the irq and the 'unmask' will retarget the interrupt to that
891 * cpu.
892 */
hv_pci_vec_irq_domain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)893 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
894 struct irq_data *irqd, bool reserve)
895 {
896 int cpu = cpumask_first(cpu_present_mask);
897
898 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
899
900 return 0;
901 }
902
903 static const struct irq_domain_ops hv_pci_domain_ops = {
904 .alloc = hv_pci_vec_irq_domain_alloc,
905 .free = hv_pci_vec_irq_domain_free,
906 .activate = hv_pci_vec_irq_domain_activate,
907 };
908
909 #ifdef CONFIG_OF
910
hv_pci_of_irq_domain_parent(void)911 static struct irq_domain *hv_pci_of_irq_domain_parent(void)
912 {
913 struct device_node *parent;
914 struct irq_domain *domain;
915
916 parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
917 if (!parent)
918 return NULL;
919 domain = irq_find_host(parent);
920 of_node_put(parent);
921
922 return domain;
923 }
924
925 #endif
926
927 #ifdef CONFIG_ACPI
928
hv_pci_acpi_irq_domain_parent(void)929 static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
930 {
931 acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
932
933 gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
934 if (!gsi_domain_disp_fn)
935 return NULL;
936 return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
937 DOMAIN_BUS_ANY);
938 }
939
940 #endif
941
hv_pci_irqchip_init(void)942 static int hv_pci_irqchip_init(void)
943 {
944 static struct hv_pci_chip_data *chip_data;
945 struct fwnode_handle *fn = NULL;
946 struct irq_domain *irq_domain_parent = NULL;
947 int ret = -ENOMEM;
948
949 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
950 if (!chip_data)
951 return ret;
952
953 mutex_init(&chip_data->map_lock);
954 fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
955 if (!fn)
956 goto free_chip;
957
958 /*
959 * IRQ domain once enabled, should not be removed since there is no
960 * way to ensure that all the corresponding devices are also gone and
961 * no interrupts will be generated.
962 */
963 #ifdef CONFIG_ACPI
964 if (!acpi_disabled)
965 irq_domain_parent = hv_pci_acpi_irq_domain_parent();
966 #endif
967 #ifdef CONFIG_OF
968 if (!irq_domain_parent)
969 irq_domain_parent = hv_pci_of_irq_domain_parent();
970 #endif
971 if (!irq_domain_parent) {
972 WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
973 ret = -EINVAL;
974 goto free_chip;
975 }
976
977 hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
978 HV_PCI_MSI_SPI_NR,
979 fn, &hv_pci_domain_ops,
980 chip_data);
981
982 if (!hv_msi_gic_irq_domain) {
983 pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
984 goto free_chip;
985 }
986
987 return 0;
988
989 free_chip:
990 kfree(chip_data);
991 if (fn)
992 irq_domain_free_fwnode(fn);
993
994 return ret;
995 }
996
hv_pci_get_root_domain(void)997 static struct irq_domain *hv_pci_get_root_domain(void)
998 {
999 return hv_msi_gic_irq_domain;
1000 }
1001
1002 /*
1003 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
1004 * registers which Hyper-V already supports, so no hypercall needed.
1005 */
hv_arch_irq_unmask(struct irq_data * data)1006 static void hv_arch_irq_unmask(struct irq_data *data) { }
1007 #endif /* CONFIG_ARM64 */
1008
1009 /**
1010 * hv_pci_generic_compl() - Invoked for a completion packet
1011 * @context: Set up by the sender of the packet.
1012 * @resp: The response packet
1013 * @resp_packet_size: Size in bytes of the packet
1014 *
1015 * This function is used to trigger an event and report status
1016 * for any message for which the completion packet contains a
1017 * status and nothing else.
1018 */
hv_pci_generic_compl(void * context,struct pci_response * resp,int resp_packet_size)1019 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
1020 int resp_packet_size)
1021 {
1022 struct hv_pci_compl *comp_pkt = context;
1023
1024 comp_pkt->completion_status = resp->status;
1025 complete(&comp_pkt->host_event);
1026 }
1027
1028 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1029 u32 wslot);
1030
get_pcichild(struct hv_pci_dev * hpdev)1031 static void get_pcichild(struct hv_pci_dev *hpdev)
1032 {
1033 refcount_inc(&hpdev->refs);
1034 }
1035
put_pcichild(struct hv_pci_dev * hpdev)1036 static void put_pcichild(struct hv_pci_dev *hpdev)
1037 {
1038 if (refcount_dec_and_test(&hpdev->refs))
1039 kfree(hpdev);
1040 }
1041
1042 /*
1043 * There is no good way to get notified from vmbus_onoffer_rescind(),
1044 * so let's use polling here, since this is not a hot path.
1045 */
wait_for_response(struct hv_device * hdev,struct completion * comp)1046 static int wait_for_response(struct hv_device *hdev,
1047 struct completion *comp)
1048 {
1049 while (true) {
1050 if (hdev->channel->rescind) {
1051 dev_warn_once(&hdev->device, "The device is gone.\n");
1052 return -ENODEV;
1053 }
1054
1055 if (wait_for_completion_timeout(comp, HZ / 10))
1056 break;
1057 }
1058
1059 return 0;
1060 }
1061
1062 /**
1063 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1064 * @devfn: The Linux representation of PCI slot
1065 *
1066 * Windows uses a slightly different representation of PCI slot.
1067 *
1068 * Return: The Windows representation
1069 */
devfn_to_wslot(int devfn)1070 static u32 devfn_to_wslot(int devfn)
1071 {
1072 union win_slot_encoding wslot;
1073
1074 wslot.slot = 0;
1075 wslot.bits.dev = PCI_SLOT(devfn);
1076 wslot.bits.func = PCI_FUNC(devfn);
1077
1078 return wslot.slot;
1079 }
1080
1081 /**
1082 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1083 * @wslot: The Windows representation of PCI slot
1084 *
1085 * Windows uses a slightly different representation of PCI slot.
1086 *
1087 * Return: The Linux representation
1088 */
wslot_to_devfn(u32 wslot)1089 static int wslot_to_devfn(u32 wslot)
1090 {
1091 union win_slot_encoding slot_no;
1092
1093 slot_no.slot = wslot;
1094 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1095 }
1096
hv_pci_read_mmio(struct device * dev,phys_addr_t gpa,int size,u32 * val)1097 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1098 {
1099 struct hv_mmio_read_input *in;
1100 struct hv_mmio_read_output *out;
1101 u64 ret;
1102
1103 /*
1104 * Must be called with interrupts disabled so it is safe
1105 * to use the per-cpu input argument page. Use it for
1106 * both input and output.
1107 */
1108 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1109 out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1110 in->gpa = gpa;
1111 in->size = size;
1112
1113 ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1114 if (hv_result_success(ret)) {
1115 switch (size) {
1116 case 1:
1117 *val = *(u8 *)(out->data);
1118 break;
1119 case 2:
1120 *val = *(u16 *)(out->data);
1121 break;
1122 default:
1123 *val = *(u32 *)(out->data);
1124 break;
1125 }
1126 } else
1127 dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1128 ret, gpa, size);
1129 }
1130
hv_pci_write_mmio(struct device * dev,phys_addr_t gpa,int size,u32 val)1131 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1132 {
1133 struct hv_mmio_write_input *in;
1134 u64 ret;
1135
1136 /*
1137 * Must be called with interrupts disabled so it is safe
1138 * to use the per-cpu input argument memory.
1139 */
1140 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1141 in->gpa = gpa;
1142 in->size = size;
1143 switch (size) {
1144 case 1:
1145 *(u8 *)(in->data) = val;
1146 break;
1147 case 2:
1148 *(u16 *)(in->data) = val;
1149 break;
1150 default:
1151 *(u32 *)(in->data) = val;
1152 break;
1153 }
1154
1155 ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1156 if (!hv_result_success(ret))
1157 dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1158 ret, gpa, size);
1159 }
1160
1161 /*
1162 * PCI Configuration Space for these root PCI buses is implemented as a pair
1163 * of pages in memory-mapped I/O space. Writing to the first page chooses
1164 * the PCI function being written or read. Once the first page has been
1165 * written to, the following page maps in the entire configuration space of
1166 * the function.
1167 */
1168
1169 /**
1170 * _hv_pcifront_read_config() - Internal PCI config read
1171 * @hpdev: The PCI driver's representation of the device
1172 * @where: Offset within config space
1173 * @size: Size of the transfer
1174 * @val: Pointer to the buffer receiving the data
1175 */
_hv_pcifront_read_config(struct hv_pci_dev * hpdev,int where,int size,u32 * val)1176 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1177 int size, u32 *val)
1178 {
1179 struct hv_pcibus_device *hbus = hpdev->hbus;
1180 struct device *dev = &hbus->hdev->device;
1181 int offset = where + CFG_PAGE_OFFSET;
1182 unsigned long flags;
1183
1184 /*
1185 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1186 */
1187 if (where + size <= PCI_COMMAND) {
1188 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1189 } else if (where >= PCI_CLASS_REVISION && where + size <=
1190 PCI_CACHE_LINE_SIZE) {
1191 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1192 PCI_CLASS_REVISION, size);
1193 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1194 PCI_ROM_ADDRESS) {
1195 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1196 PCI_SUBSYSTEM_VENDOR_ID, size);
1197 } else if (where >= PCI_ROM_ADDRESS && where + size <=
1198 PCI_CAPABILITY_LIST) {
1199 /* ROM BARs are unimplemented */
1200 *val = 0;
1201 } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
1202 (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
1203 /*
1204 * Interrupt Line and Interrupt PIN are hard-wired to zero
1205 * because this front-end only supports message-signaled
1206 * interrupts.
1207 */
1208 *val = 0;
1209 } else if (where + size <= CFG_PAGE_SIZE) {
1210
1211 spin_lock_irqsave(&hbus->config_lock, flags);
1212 if (hbus->use_calls) {
1213 phys_addr_t addr = hbus->mem_config->start + offset;
1214
1215 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1216 hpdev->desc.win_slot.slot);
1217 hv_pci_read_mmio(dev, addr, size, val);
1218 } else {
1219 void __iomem *addr = hbus->cfg_addr + offset;
1220
1221 /* Choose the function to be read. (See comment above) */
1222 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1223 /* Make sure the function was chosen before reading. */
1224 mb();
1225 /* Read from that function's config space. */
1226 switch (size) {
1227 case 1:
1228 *val = readb(addr);
1229 break;
1230 case 2:
1231 *val = readw(addr);
1232 break;
1233 default:
1234 *val = readl(addr);
1235 break;
1236 }
1237 /*
1238 * Make sure the read was done before we release the
1239 * spinlock allowing consecutive reads/writes.
1240 */
1241 mb();
1242 }
1243 spin_unlock_irqrestore(&hbus->config_lock, flags);
1244 } else {
1245 dev_err(dev, "Attempt to read beyond a function's config space.\n");
1246 }
1247 }
1248
hv_pcifront_get_vendor_id(struct hv_pci_dev * hpdev)1249 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1250 {
1251 struct hv_pcibus_device *hbus = hpdev->hbus;
1252 struct device *dev = &hbus->hdev->device;
1253 u32 val;
1254 u16 ret;
1255 unsigned long flags;
1256
1257 spin_lock_irqsave(&hbus->config_lock, flags);
1258
1259 if (hbus->use_calls) {
1260 phys_addr_t addr = hbus->mem_config->start +
1261 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1262
1263 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1264 hpdev->desc.win_slot.slot);
1265 hv_pci_read_mmio(dev, addr, 2, &val);
1266 ret = val; /* Truncates to 16 bits */
1267 } else {
1268 void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1269 PCI_VENDOR_ID;
1270 /* Choose the function to be read. (See comment above) */
1271 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1272 /* Make sure the function was chosen before we start reading. */
1273 mb();
1274 /* Read from that function's config space. */
1275 ret = readw(addr);
1276 /*
1277 * mb() is not required here, because the
1278 * spin_unlock_irqrestore() is a barrier.
1279 */
1280 }
1281
1282 spin_unlock_irqrestore(&hbus->config_lock, flags);
1283
1284 return ret;
1285 }
1286
1287 /**
1288 * _hv_pcifront_write_config() - Internal PCI config write
1289 * @hpdev: The PCI driver's representation of the device
1290 * @where: Offset within config space
1291 * @size: Size of the transfer
1292 * @val: The data being transferred
1293 */
_hv_pcifront_write_config(struct hv_pci_dev * hpdev,int where,int size,u32 val)1294 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1295 int size, u32 val)
1296 {
1297 struct hv_pcibus_device *hbus = hpdev->hbus;
1298 struct device *dev = &hbus->hdev->device;
1299 int offset = where + CFG_PAGE_OFFSET;
1300 unsigned long flags;
1301
1302 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1303 where + size <= PCI_CAPABILITY_LIST) {
1304 /* SSIDs and ROM BARs are read-only */
1305 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1306 spin_lock_irqsave(&hbus->config_lock, flags);
1307
1308 if (hbus->use_calls) {
1309 phys_addr_t addr = hbus->mem_config->start + offset;
1310
1311 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1312 hpdev->desc.win_slot.slot);
1313 hv_pci_write_mmio(dev, addr, size, val);
1314 } else {
1315 void __iomem *addr = hbus->cfg_addr + offset;
1316
1317 /* Choose the function to write. (See comment above) */
1318 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1319 /* Make sure the function was chosen before writing. */
1320 wmb();
1321 /* Write to that function's config space. */
1322 switch (size) {
1323 case 1:
1324 writeb(val, addr);
1325 break;
1326 case 2:
1327 writew(val, addr);
1328 break;
1329 default:
1330 writel(val, addr);
1331 break;
1332 }
1333 /*
1334 * Make sure the write was done before we release the
1335 * spinlock allowing consecutive reads/writes.
1336 */
1337 mb();
1338 }
1339 spin_unlock_irqrestore(&hbus->config_lock, flags);
1340 } else {
1341 dev_err(dev, "Attempt to write beyond a function's config space.\n");
1342 }
1343 }
1344
1345 /**
1346 * hv_pcifront_read_config() - Read configuration space
1347 * @bus: PCI Bus structure
1348 * @devfn: Device/function
1349 * @where: Offset from base
1350 * @size: Byte/word/dword
1351 * @val: Value to be read
1352 *
1353 * Return: PCIBIOS_SUCCESSFUL on success
1354 * PCIBIOS_DEVICE_NOT_FOUND on failure
1355 */
hv_pcifront_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1356 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1357 int where, int size, u32 *val)
1358 {
1359 struct hv_pcibus_device *hbus =
1360 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1361 struct hv_pci_dev *hpdev;
1362
1363 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1364 if (!hpdev)
1365 return PCIBIOS_DEVICE_NOT_FOUND;
1366
1367 _hv_pcifront_read_config(hpdev, where, size, val);
1368
1369 put_pcichild(hpdev);
1370 return PCIBIOS_SUCCESSFUL;
1371 }
1372
1373 /**
1374 * hv_pcifront_write_config() - Write configuration space
1375 * @bus: PCI Bus structure
1376 * @devfn: Device/function
1377 * @where: Offset from base
1378 * @size: Byte/word/dword
1379 * @val: Value to be written to device
1380 *
1381 * Return: PCIBIOS_SUCCESSFUL on success
1382 * PCIBIOS_DEVICE_NOT_FOUND on failure
1383 */
hv_pcifront_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1384 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1385 int where, int size, u32 val)
1386 {
1387 struct hv_pcibus_device *hbus =
1388 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1389 struct hv_pci_dev *hpdev;
1390
1391 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1392 if (!hpdev)
1393 return PCIBIOS_DEVICE_NOT_FOUND;
1394
1395 _hv_pcifront_write_config(hpdev, where, size, val);
1396
1397 put_pcichild(hpdev);
1398 return PCIBIOS_SUCCESSFUL;
1399 }
1400
1401 /* PCIe operations */
1402 static struct pci_ops hv_pcifront_ops = {
1403 .read = hv_pcifront_read_config,
1404 .write = hv_pcifront_write_config,
1405 };
1406
1407 /*
1408 * Paravirtual backchannel
1409 *
1410 * Hyper-V SR-IOV provides a backchannel mechanism in software for
1411 * communication between a VF driver and a PF driver. These
1412 * "configuration blocks" are similar in concept to PCI configuration space,
1413 * but instead of doing reads and writes in 32-bit chunks through a very slow
1414 * path, packets of up to 128 bytes can be sent or received asynchronously.
1415 *
1416 * Nearly every SR-IOV device contains just such a communications channel in
1417 * hardware, so using this one in software is usually optional. Using the
1418 * software channel, however, allows driver implementers to leverage software
1419 * tools that fuzz the communications channel looking for vulnerabilities.
1420 *
1421 * The usage model for these packets puts the responsibility for reading or
1422 * writing on the VF driver. The VF driver sends a read or a write packet,
1423 * indicating which "block" is being referred to by number.
1424 *
1425 * If the PF driver wishes to initiate communication, it can "invalidate" one or
1426 * more of the first 64 blocks. This invalidation is delivered via a callback
1427 * supplied to the VF driver by this driver.
1428 *
1429 * No protocol is implied, except that supplied by the PF and VF drivers.
1430 */
1431
1432 struct hv_read_config_compl {
1433 struct hv_pci_compl comp_pkt;
1434 void *buf;
1435 unsigned int len;
1436 unsigned int bytes_returned;
1437 };
1438
1439 /**
1440 * hv_pci_read_config_compl() - Invoked when a response packet
1441 * for a read config block operation arrives.
1442 * @context: Identifies the read config operation
1443 * @resp: The response packet itself
1444 * @resp_packet_size: Size in bytes of the response packet
1445 */
hv_pci_read_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1446 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1447 int resp_packet_size)
1448 {
1449 struct hv_read_config_compl *comp = context;
1450 struct pci_read_block_response *read_resp =
1451 (struct pci_read_block_response *)resp;
1452 unsigned int data_len, hdr_len;
1453
1454 hdr_len = offsetof(struct pci_read_block_response, bytes);
1455 if (resp_packet_size < hdr_len) {
1456 comp->comp_pkt.completion_status = -1;
1457 goto out;
1458 }
1459
1460 data_len = resp_packet_size - hdr_len;
1461 if (data_len > 0 && read_resp->status == 0) {
1462 comp->bytes_returned = min(comp->len, data_len);
1463 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1464 } else {
1465 comp->bytes_returned = 0;
1466 }
1467
1468 comp->comp_pkt.completion_status = read_resp->status;
1469 out:
1470 complete(&comp->comp_pkt.host_event);
1471 }
1472
1473 /**
1474 * hv_read_config_block() - Sends a read config block request to
1475 * the back-end driver running in the Hyper-V parent partition.
1476 * @pdev: The PCI driver's representation for this device.
1477 * @buf: Buffer into which the config block will be copied.
1478 * @len: Size in bytes of buf.
1479 * @block_id: Identifies the config block which has been requested.
1480 * @bytes_returned: Size which came back from the back-end driver.
1481 *
1482 * Return: 0 on success, -errno on failure
1483 */
hv_read_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id,unsigned int * bytes_returned)1484 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1485 unsigned int len, unsigned int block_id,
1486 unsigned int *bytes_returned)
1487 {
1488 struct hv_pcibus_device *hbus =
1489 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1490 sysdata);
1491 struct {
1492 struct pci_packet pkt;
1493 char buf[sizeof(struct pci_read_block)];
1494 } pkt;
1495 struct hv_read_config_compl comp_pkt;
1496 struct pci_read_block *read_blk;
1497 int ret;
1498
1499 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1500 return -EINVAL;
1501
1502 init_completion(&comp_pkt.comp_pkt.host_event);
1503 comp_pkt.buf = buf;
1504 comp_pkt.len = len;
1505
1506 memset(&pkt, 0, sizeof(pkt));
1507 pkt.pkt.completion_func = hv_pci_read_config_compl;
1508 pkt.pkt.compl_ctxt = &comp_pkt;
1509 read_blk = (struct pci_read_block *)pkt.buf;
1510 read_blk->message_type.type = PCI_READ_BLOCK;
1511 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1512 read_blk->block_id = block_id;
1513 read_blk->bytes_requested = len;
1514
1515 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1516 sizeof(*read_blk), (unsigned long)&pkt.pkt,
1517 VM_PKT_DATA_INBAND,
1518 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1519 if (ret)
1520 return ret;
1521
1522 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1523 if (ret)
1524 return ret;
1525
1526 if (comp_pkt.comp_pkt.completion_status != 0 ||
1527 comp_pkt.bytes_returned == 0) {
1528 dev_err(&hbus->hdev->device,
1529 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
1530 comp_pkt.comp_pkt.completion_status,
1531 comp_pkt.bytes_returned);
1532 return -EIO;
1533 }
1534
1535 *bytes_returned = comp_pkt.bytes_returned;
1536 return 0;
1537 }
1538
1539 /**
1540 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1541 * config block operation arrives.
1542 * @context: Identifies the write config operation
1543 * @resp: The response packet itself
1544 * @resp_packet_size: Size in bytes of the response packet
1545 */
hv_pci_write_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1546 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1547 int resp_packet_size)
1548 {
1549 struct hv_pci_compl *comp_pkt = context;
1550
1551 comp_pkt->completion_status = resp->status;
1552 complete(&comp_pkt->host_event);
1553 }
1554
1555 /**
1556 * hv_write_config_block() - Sends a write config block request to the
1557 * back-end driver running in the Hyper-V parent partition.
1558 * @pdev: The PCI driver's representation for this device.
1559 * @buf: Buffer from which the config block will be copied.
1560 * @len: Size in bytes of buf.
1561 * @block_id: Identifies the config block which is being written.
1562 *
1563 * Return: 0 on success, -errno on failure
1564 */
hv_write_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id)1565 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1566 unsigned int len, unsigned int block_id)
1567 {
1568 struct hv_pcibus_device *hbus =
1569 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1570 sysdata);
1571 struct {
1572 struct pci_packet pkt;
1573 char buf[sizeof(struct pci_write_block)];
1574 u32 reserved;
1575 } pkt;
1576 struct hv_pci_compl comp_pkt;
1577 struct pci_write_block *write_blk;
1578 u32 pkt_size;
1579 int ret;
1580
1581 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1582 return -EINVAL;
1583
1584 init_completion(&comp_pkt.host_event);
1585
1586 memset(&pkt, 0, sizeof(pkt));
1587 pkt.pkt.completion_func = hv_pci_write_config_compl;
1588 pkt.pkt.compl_ctxt = &comp_pkt;
1589 write_blk = (struct pci_write_block *)pkt.buf;
1590 write_blk->message_type.type = PCI_WRITE_BLOCK;
1591 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1592 write_blk->block_id = block_id;
1593 write_blk->byte_count = len;
1594 memcpy(write_blk->bytes, buf, len);
1595 pkt_size = offsetof(struct pci_write_block, bytes) + len;
1596 /*
1597 * This quirk is required on some hosts shipped around 2018, because
1598 * these hosts don't check the pkt_size correctly (new hosts have been
1599 * fixed since early 2019). The quirk is also safe on very old hosts
1600 * and new hosts, because, on them, what really matters is the length
1601 * specified in write_blk->byte_count.
1602 */
1603 pkt_size += sizeof(pkt.reserved);
1604
1605 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1606 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1607 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1608 if (ret)
1609 return ret;
1610
1611 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1612 if (ret)
1613 return ret;
1614
1615 if (comp_pkt.completion_status != 0) {
1616 dev_err(&hbus->hdev->device,
1617 "Write Config Block failed: 0x%x\n",
1618 comp_pkt.completion_status);
1619 return -EIO;
1620 }
1621
1622 return 0;
1623 }
1624
1625 /**
1626 * hv_register_block_invalidate() - Invoked when a config block invalidation
1627 * arrives from the back-end driver.
1628 * @pdev: The PCI driver's representation for this device.
1629 * @context: Identifies the device.
1630 * @block_invalidate: Identifies all of the blocks being invalidated.
1631 *
1632 * Return: 0 on success, -errno on failure
1633 */
hv_register_block_invalidate(struct pci_dev * pdev,void * context,void (* block_invalidate)(void * context,u64 block_mask))1634 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1635 void (*block_invalidate)(void *context,
1636 u64 block_mask))
1637 {
1638 struct hv_pcibus_device *hbus =
1639 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1640 sysdata);
1641 struct hv_pci_dev *hpdev;
1642
1643 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1644 if (!hpdev)
1645 return -ENODEV;
1646
1647 hpdev->block_invalidate = block_invalidate;
1648 hpdev->invalidate_context = context;
1649
1650 put_pcichild(hpdev);
1651 return 0;
1652
1653 }
1654
1655 /* Interrupt management hooks */
hv_int_desc_free(struct hv_pci_dev * hpdev,struct tran_int_desc * int_desc)1656 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1657 struct tran_int_desc *int_desc)
1658 {
1659 struct pci_delete_interrupt *int_pkt;
1660 struct {
1661 struct pci_packet pkt;
1662 u8 buffer[sizeof(struct pci_delete_interrupt)];
1663 } ctxt;
1664
1665 if (!int_desc->vector_count) {
1666 kfree(int_desc);
1667 return;
1668 }
1669 memset(&ctxt, 0, sizeof(ctxt));
1670 int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
1671 int_pkt->message_type.type =
1672 PCI_DELETE_INTERRUPT_MESSAGE;
1673 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1674 int_pkt->int_desc = *int_desc;
1675 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1676 0, VM_PKT_DATA_INBAND, 0);
1677 kfree(int_desc);
1678 }
1679
1680 /**
1681 * hv_msi_free() - Free the MSI.
1682 * @domain: The interrupt domain pointer
1683 * @info: Extra MSI-related context
1684 * @irq: Identifies the IRQ.
1685 *
1686 * The Hyper-V parent partition and hypervisor are tracking the
1687 * messages that are in use, keeping the interrupt redirection
1688 * table up to date. This callback sends a message that frees
1689 * the IRT entry and related tracking nonsense.
1690 */
hv_msi_free(struct irq_domain * domain,struct msi_domain_info * info,unsigned int irq)1691 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1692 unsigned int irq)
1693 {
1694 struct hv_pcibus_device *hbus;
1695 struct hv_pci_dev *hpdev;
1696 struct pci_dev *pdev;
1697 struct tran_int_desc *int_desc;
1698 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1699 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1700
1701 pdev = msi_desc_to_pci_dev(msi);
1702 hbus = domain->host_data;
1703 int_desc = irq_data_get_irq_chip_data(irq_data);
1704 if (!int_desc)
1705 return;
1706
1707 irq_data->chip_data = NULL;
1708 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1709 if (!hpdev) {
1710 kfree(int_desc);
1711 return;
1712 }
1713
1714 hv_int_desc_free(hpdev, int_desc);
1715 put_pcichild(hpdev);
1716 }
1717
hv_irq_mask(struct irq_data * data)1718 static void hv_irq_mask(struct irq_data *data)
1719 {
1720 if (data->parent_data->chip->irq_mask)
1721 irq_chip_mask_parent(data);
1722 }
1723
hv_irq_unmask(struct irq_data * data)1724 static void hv_irq_unmask(struct irq_data *data)
1725 {
1726 hv_arch_irq_unmask(data);
1727
1728 if (data->parent_data->chip->irq_unmask)
1729 irq_chip_unmask_parent(data);
1730 }
1731
1732 struct compose_comp_ctxt {
1733 struct hv_pci_compl comp_pkt;
1734 struct tran_int_desc int_desc;
1735 };
1736
hv_pci_compose_compl(void * context,struct pci_response * resp,int resp_packet_size)1737 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1738 int resp_packet_size)
1739 {
1740 struct compose_comp_ctxt *comp_pkt = context;
1741 struct pci_create_int_response *int_resp =
1742 (struct pci_create_int_response *)resp;
1743
1744 if (resp_packet_size < sizeof(*int_resp)) {
1745 comp_pkt->comp_pkt.completion_status = -1;
1746 goto out;
1747 }
1748 comp_pkt->comp_pkt.completion_status = resp->status;
1749 comp_pkt->int_desc = int_resp->int_desc;
1750 out:
1751 complete(&comp_pkt->comp_pkt.host_event);
1752 }
1753
hv_compose_msi_req_v1(struct pci_create_interrupt * int_pkt,u32 slot,u8 vector,u16 vector_count)1754 static u32 hv_compose_msi_req_v1(
1755 struct pci_create_interrupt *int_pkt,
1756 u32 slot, u8 vector, u16 vector_count)
1757 {
1758 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1759 int_pkt->wslot.slot = slot;
1760 int_pkt->int_desc.vector = vector;
1761 int_pkt->int_desc.vector_count = vector_count;
1762 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1763
1764 /*
1765 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1766 * hv_irq_unmask().
1767 */
1768 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1769
1770 return sizeof(*int_pkt);
1771 }
1772
1773 /*
1774 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1775 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1776 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1777 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1778 * not irrelevant because Hyper-V chooses the physical CPU to handle the
1779 * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1780 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1781 * but assigning too many vPCI device interrupts to the same pCPU can cause a
1782 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1783 * to spread out the pCPUs that it selects.
1784 *
1785 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1786 * to always return the same dummy vCPU, because a second call to
1787 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1788 * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1789 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1790 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1791 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1792 * the same pCPU, even though the vCPUs will be spread out by later calls
1793 * to hv_irq_unmask(), but that is the best we can do now.
1794 *
1795 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1796 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1797 * enhancement is planned for a future version. With that enhancement, the
1798 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1799 * device will be spread across multiple pCPUs.
1800 */
1801
1802 /*
1803 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1804 * by subsequent retarget in hv_irq_unmask().
1805 */
hv_compose_msi_req_get_cpu(const struct cpumask * affinity)1806 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1807 {
1808 return cpumask_first_and(affinity, cpu_online_mask);
1809 }
1810
1811 /*
1812 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1813 */
hv_compose_multi_msi_req_get_cpu(void)1814 static int hv_compose_multi_msi_req_get_cpu(void)
1815 {
1816 static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1817
1818 /* -1 means starting with CPU 0 */
1819 static int cpu_next = -1;
1820
1821 unsigned long flags;
1822 int cpu;
1823
1824 spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1825
1826 cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
1827 cpu = cpu_next;
1828
1829 spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1830
1831 return cpu;
1832 }
1833
hv_compose_msi_req_v2(struct pci_create_interrupt2 * int_pkt,int cpu,u32 slot,u8 vector,u16 vector_count)1834 static u32 hv_compose_msi_req_v2(
1835 struct pci_create_interrupt2 *int_pkt, int cpu,
1836 u32 slot, u8 vector, u16 vector_count)
1837 {
1838 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1839 int_pkt->wslot.slot = slot;
1840 int_pkt->int_desc.vector = vector;
1841 int_pkt->int_desc.vector_count = vector_count;
1842 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1843 int_pkt->int_desc.processor_array[0] =
1844 hv_cpu_number_to_vp_number(cpu);
1845 int_pkt->int_desc.processor_count = 1;
1846
1847 return sizeof(*int_pkt);
1848 }
1849
hv_compose_msi_req_v3(struct pci_create_interrupt3 * int_pkt,int cpu,u32 slot,u32 vector,u16 vector_count)1850 static u32 hv_compose_msi_req_v3(
1851 struct pci_create_interrupt3 *int_pkt, int cpu,
1852 u32 slot, u32 vector, u16 vector_count)
1853 {
1854 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1855 int_pkt->wslot.slot = slot;
1856 int_pkt->int_desc.vector = vector;
1857 int_pkt->int_desc.reserved = 0;
1858 int_pkt->int_desc.vector_count = vector_count;
1859 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1860 int_pkt->int_desc.processor_array[0] =
1861 hv_cpu_number_to_vp_number(cpu);
1862 int_pkt->int_desc.processor_count = 1;
1863
1864 return sizeof(*int_pkt);
1865 }
1866
1867 /**
1868 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1869 * @data: Everything about this MSI
1870 * @msg: Buffer that is filled in by this function
1871 *
1872 * This function unpacks the IRQ looking for target CPU set, IDT
1873 * vector and mode and sends a message to the parent partition
1874 * asking for a mapping for that tuple in this partition. The
1875 * response supplies a data value and address to which that data
1876 * should be written to trigger that interrupt.
1877 */
hv_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1878 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1879 {
1880 struct hv_pcibus_device *hbus;
1881 struct vmbus_channel *channel;
1882 struct hv_pci_dev *hpdev;
1883 struct pci_bus *pbus;
1884 struct pci_dev *pdev;
1885 const struct cpumask *dest;
1886 struct compose_comp_ctxt comp;
1887 struct tran_int_desc *int_desc;
1888 struct msi_desc *msi_desc;
1889 /*
1890 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1891 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1892 */
1893 u16 vector_count;
1894 u32 vector;
1895 struct {
1896 struct pci_packet pci_pkt;
1897 union {
1898 struct pci_create_interrupt v1;
1899 struct pci_create_interrupt2 v2;
1900 struct pci_create_interrupt3 v3;
1901 } int_pkts;
1902 } __packed ctxt;
1903 bool multi_msi;
1904 u64 trans_id;
1905 u32 size;
1906 int ret;
1907 int cpu;
1908
1909 msi_desc = irq_data_get_msi_desc(data);
1910 multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1911 msi_desc->nvec_used > 1;
1912
1913 /* Reuse the previous allocation */
1914 if (data->chip_data && multi_msi) {
1915 int_desc = data->chip_data;
1916 msg->address_hi = int_desc->address >> 32;
1917 msg->address_lo = int_desc->address & 0xffffffff;
1918 msg->data = int_desc->data;
1919 return;
1920 }
1921
1922 pdev = msi_desc_to_pci_dev(msi_desc);
1923 dest = irq_data_get_effective_affinity_mask(data);
1924 pbus = pdev->bus;
1925 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1926 channel = hbus->hdev->channel;
1927 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1928 if (!hpdev)
1929 goto return_null_message;
1930
1931 /* Free any previous message that might have already been composed. */
1932 if (data->chip_data && !multi_msi) {
1933 int_desc = data->chip_data;
1934 data->chip_data = NULL;
1935 hv_int_desc_free(hpdev, int_desc);
1936 }
1937
1938 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1939 if (!int_desc)
1940 goto drop_reference;
1941
1942 if (multi_msi) {
1943 /*
1944 * If this is not the first MSI of Multi MSI, we already have
1945 * a mapping. Can exit early.
1946 */
1947 if (msi_desc->irq != data->irq) {
1948 data->chip_data = int_desc;
1949 int_desc->address = msi_desc->msg.address_lo |
1950 (u64)msi_desc->msg.address_hi << 32;
1951 int_desc->data = msi_desc->msg.data +
1952 (data->irq - msi_desc->irq);
1953 msg->address_hi = msi_desc->msg.address_hi;
1954 msg->address_lo = msi_desc->msg.address_lo;
1955 msg->data = int_desc->data;
1956 put_pcichild(hpdev);
1957 return;
1958 }
1959 /*
1960 * The vector we select here is a dummy value. The correct
1961 * value gets sent to the hypervisor in unmask(). This needs
1962 * to be aligned with the count, and also not zero. Multi-msi
1963 * is powers of 2 up to 32, so 32 will always work here.
1964 */
1965 vector = 32;
1966 vector_count = msi_desc->nvec_used;
1967 cpu = hv_compose_multi_msi_req_get_cpu();
1968 } else {
1969 vector = hv_msi_get_int_vector(data);
1970 vector_count = 1;
1971 cpu = hv_compose_msi_req_get_cpu(dest);
1972 }
1973
1974 /*
1975 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1976 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1977 * for better readability.
1978 */
1979 memset(&ctxt, 0, sizeof(ctxt));
1980 init_completion(&comp.comp_pkt.host_event);
1981 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1982 ctxt.pci_pkt.compl_ctxt = ∁
1983
1984 switch (hbus->protocol_version) {
1985 case PCI_PROTOCOL_VERSION_1_1:
1986 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1987 hpdev->desc.win_slot.slot,
1988 (u8)vector,
1989 vector_count);
1990 break;
1991
1992 case PCI_PROTOCOL_VERSION_1_2:
1993 case PCI_PROTOCOL_VERSION_1_3:
1994 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1995 cpu,
1996 hpdev->desc.win_slot.slot,
1997 (u8)vector,
1998 vector_count);
1999 break;
2000
2001 case PCI_PROTOCOL_VERSION_1_4:
2002 size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
2003 cpu,
2004 hpdev->desc.win_slot.slot,
2005 vector,
2006 vector_count);
2007 break;
2008
2009 default:
2010 /* As we only negotiate protocol versions known to this driver,
2011 * this path should never hit. However, this is it not a hot
2012 * path so we print a message to aid future updates.
2013 */
2014 dev_err(&hbus->hdev->device,
2015 "Unexpected vPCI protocol, update driver.");
2016 goto free_int_desc;
2017 }
2018
2019 ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
2020 size, (unsigned long)&ctxt.pci_pkt,
2021 &trans_id, VM_PKT_DATA_INBAND,
2022 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2023 if (ret) {
2024 dev_err(&hbus->hdev->device,
2025 "Sending request for interrupt failed: 0x%x",
2026 comp.comp_pkt.completion_status);
2027 goto free_int_desc;
2028 }
2029
2030 /*
2031 * Prevents hv_pci_onchannelcallback() from running concurrently
2032 * in the tasklet.
2033 */
2034 tasklet_disable_in_atomic(&channel->callback_event);
2035
2036 /*
2037 * Since this function is called with IRQ locks held, can't
2038 * do normal wait for completion; instead poll.
2039 */
2040 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
2041 unsigned long flags;
2042
2043 /* 0xFFFF means an invalid PCI VENDOR ID. */
2044 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
2045 dev_err_once(&hbus->hdev->device,
2046 "the device has gone\n");
2047 goto enable_tasklet;
2048 }
2049
2050 /*
2051 * Make sure that the ring buffer data structure doesn't get
2052 * freed while we dereference the ring buffer pointer. Test
2053 * for the channel's onchannel_callback being NULL within a
2054 * sched_lock critical section. See also the inline comments
2055 * in vmbus_reset_channel_cb().
2056 */
2057 spin_lock_irqsave(&channel->sched_lock, flags);
2058 if (unlikely(channel->onchannel_callback == NULL)) {
2059 spin_unlock_irqrestore(&channel->sched_lock, flags);
2060 goto enable_tasklet;
2061 }
2062 hv_pci_onchannelcallback(hbus);
2063 spin_unlock_irqrestore(&channel->sched_lock, flags);
2064
2065 udelay(100);
2066 }
2067
2068 tasklet_enable(&channel->callback_event);
2069
2070 if (comp.comp_pkt.completion_status < 0) {
2071 dev_err(&hbus->hdev->device,
2072 "Request for interrupt failed: 0x%x",
2073 comp.comp_pkt.completion_status);
2074 goto free_int_desc;
2075 }
2076
2077 /*
2078 * Record the assignment so that this can be unwound later. Using
2079 * irq_set_chip_data() here would be appropriate, but the lock it takes
2080 * is already held.
2081 */
2082 *int_desc = comp.int_desc;
2083 data->chip_data = int_desc;
2084
2085 /* Pass up the result. */
2086 msg->address_hi = comp.int_desc.address >> 32;
2087 msg->address_lo = comp.int_desc.address & 0xffffffff;
2088 msg->data = comp.int_desc.data;
2089
2090 put_pcichild(hpdev);
2091 return;
2092
2093 enable_tasklet:
2094 tasklet_enable(&channel->callback_event);
2095 /*
2096 * The completion packet on the stack becomes invalid after 'return';
2097 * remove the ID from the VMbus requestor if the identifier is still
2098 * mapped to/associated with the packet. (The identifier could have
2099 * been 're-used', i.e., already removed and (re-)mapped.)
2100 *
2101 * Cf. hv_pci_onchannelcallback().
2102 */
2103 vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2104 free_int_desc:
2105 kfree(int_desc);
2106 drop_reference:
2107 put_pcichild(hpdev);
2108 return_null_message:
2109 msg->address_hi = 0;
2110 msg->address_lo = 0;
2111 msg->data = 0;
2112 }
2113
hv_pcie_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)2114 static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
2115 struct irq_domain *real_parent, struct msi_domain_info *info)
2116 {
2117 struct irq_chip *chip = info->chip;
2118
2119 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
2120 return false;
2121
2122 info->ops->msi_prepare = hv_msi_prepare;
2123
2124 chip->irq_set_affinity = irq_chip_set_affinity_parent;
2125
2126 if (IS_ENABLED(CONFIG_X86))
2127 chip->flags |= IRQCHIP_MOVE_DEFERRED;
2128
2129 return true;
2130 }
2131
2132 #define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
2133 MSI_FLAG_USE_DEF_CHIP_OPS | \
2134 MSI_FLAG_PCI_MSI_MASK_PARENT)
2135 #define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
2136 MSI_FLAG_PCI_MSIX | \
2137 MSI_FLAG_PCI_MSIX_ALLOC_DYN | \
2138 MSI_GENERIC_FLAGS_MASK)
2139
2140 static const struct msi_parent_ops hv_pcie_msi_parent_ops = {
2141 .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED,
2142 .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED,
2143 .bus_select_token = DOMAIN_BUS_PCI_MSI,
2144 .chip_flags = HV_MSI_CHIP_FLAGS,
2145 .prefix = "HV-",
2146 .init_dev_msi_info = hv_pcie_init_dev_msi_info,
2147 };
2148
2149 /* HW Interrupt Chip Descriptor */
2150 static struct irq_chip hv_msi_irq_chip = {
2151 .name = "Hyper-V PCIe MSI",
2152 .irq_compose_msi_msg = hv_compose_msi_msg,
2153 .irq_set_affinity = irq_chip_set_affinity_parent,
2154 .irq_ack = irq_chip_ack_parent,
2155 .irq_eoi = irq_chip_eoi_parent,
2156 .irq_mask = hv_irq_mask,
2157 .irq_unmask = hv_irq_unmask,
2158 };
2159
hv_pcie_domain_alloc(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs,void * arg)2160 static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
2161 void *arg)
2162 {
2163 /*
2164 * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg()
2165 * should be moved here.
2166 */
2167 int ret;
2168
2169 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
2170 if (ret < 0)
2171 return ret;
2172
2173 for (int i = 0; i < nr_irqs; i++) {
2174 irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL);
2175 if (IS_ENABLED(CONFIG_X86))
2176 __irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
2177 }
2178
2179 return 0;
2180 }
2181
hv_pcie_domain_free(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs)2182 static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
2183 {
2184 struct msi_domain_info *info = d->host_data;
2185
2186 for (int i = 0; i < nr_irqs; i++)
2187 hv_msi_free(d, info, virq + i);
2188
2189 irq_domain_free_irqs_top(d, virq, nr_irqs);
2190 }
2191
2192 static const struct irq_domain_ops hv_pcie_domain_ops = {
2193 .alloc = hv_pcie_domain_alloc,
2194 .free = hv_pcie_domain_free,
2195 };
2196
2197 /**
2198 * hv_pcie_init_irq_domain() - Initialize IRQ domain
2199 * @hbus: The root PCI bus
2200 *
2201 * This function creates an IRQ domain which will be used for
2202 * interrupts from devices that have been passed through. These
2203 * devices only support MSI and MSI-X, not line-based interrupts
2204 * or simulations of line-based interrupts through PCIe's
2205 * fabric-layer messages. Because interrupts are remapped, we
2206 * can support multi-message MSI here.
2207 *
2208 * Return: '0' on success and error value on failure
2209 */
hv_pcie_init_irq_domain(struct hv_pcibus_device * hbus)2210 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2211 {
2212 struct irq_domain_info info = {
2213 .fwnode = hbus->fwnode,
2214 .ops = &hv_pcie_domain_ops,
2215 .host_data = hbus,
2216 .parent = hv_pci_get_root_domain(),
2217 };
2218
2219 hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops);
2220 if (!hbus->irq_domain) {
2221 dev_err(&hbus->hdev->device,
2222 "Failed to build an MSI IRQ domain\n");
2223 return -ENODEV;
2224 }
2225
2226 dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2227
2228 return 0;
2229 }
2230
2231 /**
2232 * get_bar_size() - Get the address space consumed by a BAR
2233 * @bar_val: Value that a BAR returned after -1 was written
2234 * to it.
2235 *
2236 * This function returns the size of the BAR, rounded up to 1
2237 * page. It has to be rounded up because the hypervisor's page
2238 * table entry that maps the BAR into the VM can't specify an
2239 * offset within a page. The invariant is that the hypervisor
2240 * must place any BARs of smaller than page length at the
2241 * beginning of a page.
2242 *
2243 * Return: Size in bytes of the consumed MMIO space.
2244 */
get_bar_size(u64 bar_val)2245 static u64 get_bar_size(u64 bar_val)
2246 {
2247 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2248 PAGE_SIZE);
2249 }
2250
2251 /**
2252 * survey_child_resources() - Total all MMIO requirements
2253 * @hbus: Root PCI bus, as understood by this driver
2254 */
survey_child_resources(struct hv_pcibus_device * hbus)2255 static void survey_child_resources(struct hv_pcibus_device *hbus)
2256 {
2257 struct hv_pci_dev *hpdev;
2258 resource_size_t bar_size = 0;
2259 unsigned long flags;
2260 struct completion *event;
2261 u64 bar_val;
2262 int i;
2263
2264 /* If nobody is waiting on the answer, don't compute it. */
2265 event = xchg(&hbus->survey_event, NULL);
2266 if (!event)
2267 return;
2268
2269 /* If the answer has already been computed, go with it. */
2270 if (hbus->low_mmio_space || hbus->high_mmio_space) {
2271 complete(event);
2272 return;
2273 }
2274
2275 spin_lock_irqsave(&hbus->device_list_lock, flags);
2276
2277 /*
2278 * Due to an interesting quirk of the PCI spec, all memory regions
2279 * for a child device are a power of 2 in size and aligned in memory,
2280 * so it's sufficient to just add them up without tracking alignment.
2281 */
2282 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2283 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2284 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2285 dev_err(&hbus->hdev->device,
2286 "There's an I/O BAR in this list!\n");
2287
2288 if (hpdev->probed_bar[i] != 0) {
2289 /*
2290 * A probed BAR has all the upper bits set that
2291 * can be changed.
2292 */
2293
2294 bar_val = hpdev->probed_bar[i];
2295 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2296 bar_val |=
2297 ((u64)hpdev->probed_bar[++i] << 32);
2298 else
2299 bar_val |= 0xffffffff00000000ULL;
2300
2301 bar_size = get_bar_size(bar_val);
2302
2303 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2304 hbus->high_mmio_space += bar_size;
2305 else
2306 hbus->low_mmio_space += bar_size;
2307 }
2308 }
2309 }
2310
2311 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2312 complete(event);
2313 }
2314
2315 /**
2316 * prepopulate_bars() - Fill in BARs with defaults
2317 * @hbus: Root PCI bus, as understood by this driver
2318 *
2319 * The core PCI driver code seems much, much happier if the BARs
2320 * for a device have values upon first scan. So fill them in.
2321 * The algorithm below works down from large sizes to small,
2322 * attempting to pack the assignments optimally. The assumption,
2323 * enforced in other parts of the code, is that the beginning of
2324 * the memory-mapped I/O space will be aligned on the largest
2325 * BAR size.
2326 */
prepopulate_bars(struct hv_pcibus_device * hbus)2327 static void prepopulate_bars(struct hv_pcibus_device *hbus)
2328 {
2329 resource_size_t high_size = 0;
2330 resource_size_t low_size = 0;
2331 resource_size_t high_base = 0;
2332 resource_size_t low_base = 0;
2333 resource_size_t bar_size;
2334 struct hv_pci_dev *hpdev;
2335 unsigned long flags;
2336 u64 bar_val;
2337 u32 command;
2338 bool high;
2339 int i;
2340
2341 if (hbus->low_mmio_space) {
2342 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2343 low_base = hbus->low_mmio_res->start;
2344 }
2345
2346 if (hbus->high_mmio_space) {
2347 high_size = 1ULL <<
2348 (63 - __builtin_clzll(hbus->high_mmio_space));
2349 high_base = hbus->high_mmio_res->start;
2350 }
2351
2352 spin_lock_irqsave(&hbus->device_list_lock, flags);
2353
2354 /*
2355 * Clear the memory enable bit, in case it's already set. This occurs
2356 * in the suspend path of hibernation, where the device is suspended,
2357 * resumed and suspended again: see hibernation_snapshot() and
2358 * hibernation_platform_enter().
2359 *
2360 * If the memory enable bit is already set, Hyper-V silently ignores
2361 * the below BAR updates, and the related PCI device driver can not
2362 * work, because reading from the device register(s) always returns
2363 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2364 */
2365 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2366 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2367 command &= ~PCI_COMMAND_MEMORY;
2368 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2369 }
2370
2371 /* Pick addresses for the BARs. */
2372 do {
2373 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2374 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2375 bar_val = hpdev->probed_bar[i];
2376 if (bar_val == 0)
2377 continue;
2378 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2379 if (high) {
2380 bar_val |=
2381 ((u64)hpdev->probed_bar[i + 1]
2382 << 32);
2383 } else {
2384 bar_val |= 0xffffffffULL << 32;
2385 }
2386 bar_size = get_bar_size(bar_val);
2387 if (high) {
2388 if (high_size != bar_size) {
2389 i++;
2390 continue;
2391 }
2392 _hv_pcifront_write_config(hpdev,
2393 PCI_BASE_ADDRESS_0 + (4 * i),
2394 4,
2395 (u32)(high_base & 0xffffff00));
2396 i++;
2397 _hv_pcifront_write_config(hpdev,
2398 PCI_BASE_ADDRESS_0 + (4 * i),
2399 4, (u32)(high_base >> 32));
2400 high_base += bar_size;
2401 } else {
2402 if (low_size != bar_size)
2403 continue;
2404 _hv_pcifront_write_config(hpdev,
2405 PCI_BASE_ADDRESS_0 + (4 * i),
2406 4,
2407 (u32)(low_base & 0xffffff00));
2408 low_base += bar_size;
2409 }
2410 }
2411 if (high_size <= 1 && low_size <= 1) {
2412 /*
2413 * No need to set the PCI_COMMAND_MEMORY bit as
2414 * the core PCI driver doesn't require the bit
2415 * to be pre-set. Actually here we intentionally
2416 * keep the bit off so that the PCI BAR probing
2417 * in the core PCI driver doesn't cause Hyper-V
2418 * to unnecessarily unmap/map the virtual BARs
2419 * from/to the physical BARs multiple times.
2420 * This reduces the VM boot time significantly
2421 * if the BAR sizes are huge.
2422 */
2423 break;
2424 }
2425 }
2426
2427 high_size >>= 1;
2428 low_size >>= 1;
2429 } while (high_size || low_size);
2430
2431 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2432 }
2433
2434 /*
2435 * Assign entries in sysfs pci slot directory.
2436 *
2437 * Note that this function does not need to lock the children list
2438 * because it is called from pci_devices_present_work which
2439 * is serialized with hv_eject_device_work because they are on the
2440 * same ordered workqueue. Therefore hbus->children list will not change
2441 * even when pci_create_slot sleeps.
2442 */
hv_pci_assign_slots(struct hv_pcibus_device * hbus)2443 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2444 {
2445 struct hv_pci_dev *hpdev;
2446 char name[SLOT_NAME_SIZE];
2447 int slot_nr;
2448
2449 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2450 if (hpdev->pci_slot)
2451 continue;
2452
2453 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2454 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2455 hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2456 name, NULL);
2457 if (IS_ERR(hpdev->pci_slot)) {
2458 pr_warn("pci_create slot %s failed\n", name);
2459 hpdev->pci_slot = NULL;
2460 }
2461 }
2462 }
2463
2464 /*
2465 * Remove entries in sysfs pci slot directory.
2466 */
hv_pci_remove_slots(struct hv_pcibus_device * hbus)2467 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2468 {
2469 struct hv_pci_dev *hpdev;
2470
2471 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2472 if (!hpdev->pci_slot)
2473 continue;
2474 pci_destroy_slot(hpdev->pci_slot);
2475 hpdev->pci_slot = NULL;
2476 }
2477 }
2478
2479 /*
2480 * Set NUMA node for the devices on the bus
2481 */
hv_pci_assign_numa_node(struct hv_pcibus_device * hbus)2482 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2483 {
2484 struct pci_dev *dev;
2485 struct pci_bus *bus = hbus->bridge->bus;
2486 struct hv_pci_dev *hv_dev;
2487
2488 list_for_each_entry(dev, &bus->devices, bus_list) {
2489 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2490 if (!hv_dev)
2491 continue;
2492
2493 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2494 hv_dev->desc.virtual_numa_node < num_possible_nodes())
2495 /*
2496 * The kernel may boot with some NUMA nodes offline
2497 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2498 * "numa=off". In those cases, adjust the host provided
2499 * NUMA node to a valid NUMA node used by the kernel.
2500 */
2501 set_dev_node(&dev->dev,
2502 numa_map_to_online_node(
2503 hv_dev->desc.virtual_numa_node));
2504
2505 put_pcichild(hv_dev);
2506 }
2507 }
2508
2509 /**
2510 * create_root_hv_pci_bus() - Expose a new root PCI bus
2511 * @hbus: Root PCI bus, as understood by this driver
2512 *
2513 * Return: 0 on success, -errno on failure
2514 */
create_root_hv_pci_bus(struct hv_pcibus_device * hbus)2515 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2516 {
2517 int error;
2518 struct pci_host_bridge *bridge = hbus->bridge;
2519
2520 bridge->dev.parent = &hbus->hdev->device;
2521 bridge->sysdata = &hbus->sysdata;
2522 bridge->ops = &hv_pcifront_ops;
2523
2524 error = pci_scan_root_bus_bridge(bridge);
2525 if (error)
2526 return error;
2527
2528 pci_lock_rescan_remove();
2529 hv_pci_assign_numa_node(hbus);
2530 pci_bus_assign_resources(bridge->bus);
2531 hv_pci_assign_slots(hbus);
2532 pci_bus_add_devices(bridge->bus);
2533 pci_unlock_rescan_remove();
2534 hbus->state = hv_pcibus_installed;
2535 return 0;
2536 }
2537
2538 struct q_res_req_compl {
2539 struct completion host_event;
2540 struct hv_pci_dev *hpdev;
2541 };
2542
2543 /**
2544 * q_resource_requirements() - Query Resource Requirements
2545 * @context: The completion context.
2546 * @resp: The response that came from the host.
2547 * @resp_packet_size: The size in bytes of resp.
2548 *
2549 * This function is invoked on completion of a Query Resource
2550 * Requirements packet.
2551 */
q_resource_requirements(void * context,struct pci_response * resp,int resp_packet_size)2552 static void q_resource_requirements(void *context, struct pci_response *resp,
2553 int resp_packet_size)
2554 {
2555 struct q_res_req_compl *completion = context;
2556 struct pci_q_res_req_response *q_res_req =
2557 (struct pci_q_res_req_response *)resp;
2558 s32 status;
2559 int i;
2560
2561 status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2562 if (status < 0) {
2563 dev_err(&completion->hpdev->hbus->hdev->device,
2564 "query resource requirements failed: %x\n",
2565 status);
2566 } else {
2567 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2568 completion->hpdev->probed_bar[i] =
2569 q_res_req->probed_bar[i];
2570 }
2571 }
2572
2573 complete(&completion->host_event);
2574 }
2575
2576 /**
2577 * new_pcichild_device() - Create a new child device
2578 * @hbus: The internal struct tracking this root PCI bus.
2579 * @desc: The information supplied so far from the host
2580 * about the device.
2581 *
2582 * This function creates the tracking structure for a new child
2583 * device and kicks off the process of figuring out what it is.
2584 *
2585 * Return: Pointer to the new tracking struct
2586 */
new_pcichild_device(struct hv_pcibus_device * hbus,struct hv_pcidev_description * desc)2587 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2588 struct hv_pcidev_description *desc)
2589 {
2590 struct hv_pci_dev *hpdev;
2591 struct pci_child_message *res_req;
2592 struct q_res_req_compl comp_pkt;
2593 struct {
2594 struct pci_packet init_packet;
2595 u8 buffer[sizeof(struct pci_child_message)];
2596 } pkt;
2597 unsigned long flags;
2598 int ret;
2599
2600 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2601 if (!hpdev)
2602 return NULL;
2603
2604 hpdev->hbus = hbus;
2605
2606 memset(&pkt, 0, sizeof(pkt));
2607 init_completion(&comp_pkt.host_event);
2608 comp_pkt.hpdev = hpdev;
2609 pkt.init_packet.compl_ctxt = &comp_pkt;
2610 pkt.init_packet.completion_func = q_resource_requirements;
2611 res_req = (struct pci_child_message *)pkt.buffer;
2612 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2613 res_req->wslot.slot = desc->win_slot.slot;
2614
2615 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2616 sizeof(struct pci_child_message),
2617 (unsigned long)&pkt.init_packet,
2618 VM_PKT_DATA_INBAND,
2619 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2620 if (ret)
2621 goto error;
2622
2623 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2624 goto error;
2625
2626 hpdev->desc = *desc;
2627 refcount_set(&hpdev->refs, 1);
2628 get_pcichild(hpdev);
2629 spin_lock_irqsave(&hbus->device_list_lock, flags);
2630
2631 list_add_tail(&hpdev->list_entry, &hbus->children);
2632 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2633 return hpdev;
2634
2635 error:
2636 kfree(hpdev);
2637 return NULL;
2638 }
2639
2640 /**
2641 * get_pcichild_wslot() - Find device from slot
2642 * @hbus: Root PCI bus, as understood by this driver
2643 * @wslot: Location on the bus
2644 *
2645 * This function looks up a PCI device and returns the internal
2646 * representation of it. It acquires a reference on it, so that
2647 * the device won't be deleted while somebody is using it. The
2648 * caller is responsible for calling put_pcichild() to release
2649 * this reference.
2650 *
2651 * Return: Internal representation of a PCI device
2652 */
get_pcichild_wslot(struct hv_pcibus_device * hbus,u32 wslot)2653 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2654 u32 wslot)
2655 {
2656 unsigned long flags;
2657 struct hv_pci_dev *iter, *hpdev = NULL;
2658
2659 spin_lock_irqsave(&hbus->device_list_lock, flags);
2660 list_for_each_entry(iter, &hbus->children, list_entry) {
2661 if (iter->desc.win_slot.slot == wslot) {
2662 hpdev = iter;
2663 get_pcichild(hpdev);
2664 break;
2665 }
2666 }
2667 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2668
2669 return hpdev;
2670 }
2671
2672 /**
2673 * pci_devices_present_work() - Handle new list of child devices
2674 * @work: Work struct embedded in struct hv_dr_work
2675 *
2676 * "Bus Relations" is the Windows term for "children of this
2677 * bus." The terminology is preserved here for people trying to
2678 * debug the interaction between Hyper-V and Linux. This
2679 * function is called when the parent partition reports a list
2680 * of functions that should be observed under this PCI Express
2681 * port (bus).
2682 *
2683 * This function updates the list, and must tolerate being
2684 * called multiple times with the same information. The typical
2685 * number of child devices is one, with very atypical cases
2686 * involving three or four, so the algorithms used here can be
2687 * simple and inefficient.
2688 *
2689 * It must also treat the omission of a previously observed device as
2690 * notification that the device no longer exists.
2691 *
2692 * Note that this function is serialized with hv_eject_device_work(),
2693 * because both are pushed to the ordered workqueue hbus->wq.
2694 */
pci_devices_present_work(struct work_struct * work)2695 static void pci_devices_present_work(struct work_struct *work)
2696 {
2697 u32 child_no;
2698 bool found;
2699 struct hv_pcidev_description *new_desc;
2700 struct hv_pci_dev *hpdev;
2701 struct hv_pcibus_device *hbus;
2702 struct list_head removed;
2703 struct hv_dr_work *dr_wrk;
2704 struct hv_dr_state *dr = NULL;
2705 unsigned long flags;
2706
2707 dr_wrk = container_of(work, struct hv_dr_work, wrk);
2708 hbus = dr_wrk->bus;
2709 kfree(dr_wrk);
2710
2711 INIT_LIST_HEAD(&removed);
2712
2713 /* Pull this off the queue and process it if it was the last one. */
2714 spin_lock_irqsave(&hbus->device_list_lock, flags);
2715 while (!list_empty(&hbus->dr_list)) {
2716 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2717 list_entry);
2718 list_del(&dr->list_entry);
2719
2720 /* Throw this away if the list still has stuff in it. */
2721 if (!list_empty(&hbus->dr_list)) {
2722 kfree(dr);
2723 continue;
2724 }
2725 }
2726 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2727
2728 if (!dr)
2729 return;
2730
2731 mutex_lock(&hbus->state_lock);
2732
2733 /* First, mark all existing children as reported missing. */
2734 spin_lock_irqsave(&hbus->device_list_lock, flags);
2735 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2736 hpdev->reported_missing = true;
2737 }
2738 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2739
2740 /* Next, add back any reported devices. */
2741 for (child_no = 0; child_no < dr->device_count; child_no++) {
2742 found = false;
2743 new_desc = &dr->func[child_no];
2744
2745 spin_lock_irqsave(&hbus->device_list_lock, flags);
2746 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2747 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2748 (hpdev->desc.v_id == new_desc->v_id) &&
2749 (hpdev->desc.d_id == new_desc->d_id) &&
2750 (hpdev->desc.ser == new_desc->ser)) {
2751 hpdev->reported_missing = false;
2752 found = true;
2753 }
2754 }
2755 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2756
2757 if (!found) {
2758 hpdev = new_pcichild_device(hbus, new_desc);
2759 if (!hpdev)
2760 dev_err(&hbus->hdev->device,
2761 "couldn't record a child device.\n");
2762 }
2763 }
2764
2765 /* Move missing children to a list on the stack. */
2766 spin_lock_irqsave(&hbus->device_list_lock, flags);
2767 do {
2768 found = false;
2769 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2770 if (hpdev->reported_missing) {
2771 found = true;
2772 put_pcichild(hpdev);
2773 list_move_tail(&hpdev->list_entry, &removed);
2774 break;
2775 }
2776 }
2777 } while (found);
2778 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2779
2780 /* Delete everything that should no longer exist. */
2781 while (!list_empty(&removed)) {
2782 hpdev = list_first_entry(&removed, struct hv_pci_dev,
2783 list_entry);
2784 list_del(&hpdev->list_entry);
2785
2786 if (hpdev->pci_slot)
2787 pci_destroy_slot(hpdev->pci_slot);
2788
2789 put_pcichild(hpdev);
2790 }
2791
2792 switch (hbus->state) {
2793 case hv_pcibus_installed:
2794 /*
2795 * Tell the core to rescan bus
2796 * because there may have been changes.
2797 */
2798 pci_lock_rescan_remove();
2799 pci_scan_child_bus(hbus->bridge->bus);
2800 hv_pci_assign_numa_node(hbus);
2801 hv_pci_assign_slots(hbus);
2802 pci_unlock_rescan_remove();
2803 break;
2804
2805 case hv_pcibus_init:
2806 case hv_pcibus_probed:
2807 survey_child_resources(hbus);
2808 break;
2809
2810 default:
2811 break;
2812 }
2813
2814 mutex_unlock(&hbus->state_lock);
2815
2816 kfree(dr);
2817 }
2818
2819 /**
2820 * hv_pci_start_relations_work() - Queue work to start device discovery
2821 * @hbus: Root PCI bus, as understood by this driver
2822 * @dr: The list of children returned from host
2823 *
2824 * Return: 0 on success, -errno on failure
2825 */
hv_pci_start_relations_work(struct hv_pcibus_device * hbus,struct hv_dr_state * dr)2826 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2827 struct hv_dr_state *dr)
2828 {
2829 struct hv_dr_work *dr_wrk;
2830 unsigned long flags;
2831 bool pending_dr;
2832
2833 if (hbus->state == hv_pcibus_removing) {
2834 dev_info(&hbus->hdev->device,
2835 "PCI VMBus BUS_RELATIONS: ignored\n");
2836 return -ENOENT;
2837 }
2838
2839 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2840 if (!dr_wrk)
2841 return -ENOMEM;
2842
2843 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2844 dr_wrk->bus = hbus;
2845
2846 spin_lock_irqsave(&hbus->device_list_lock, flags);
2847 /*
2848 * If pending_dr is true, we have already queued a work,
2849 * which will see the new dr. Otherwise, we need to
2850 * queue a new work.
2851 */
2852 pending_dr = !list_empty(&hbus->dr_list);
2853 list_add_tail(&dr->list_entry, &hbus->dr_list);
2854 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2855
2856 if (pending_dr)
2857 kfree(dr_wrk);
2858 else
2859 queue_work(hbus->wq, &dr_wrk->wrk);
2860
2861 return 0;
2862 }
2863
2864 /**
2865 * hv_pci_devices_present() - Handle list of new children
2866 * @hbus: Root PCI bus, as understood by this driver
2867 * @relations: Packet from host listing children
2868 *
2869 * Process a new list of devices on the bus. The list of devices is
2870 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2871 * whenever a new list of devices for this bus appears.
2872 */
hv_pci_devices_present(struct hv_pcibus_device * hbus,struct pci_bus_relations * relations)2873 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2874 struct pci_bus_relations *relations)
2875 {
2876 struct hv_dr_state *dr;
2877 int i;
2878
2879 dr = kzalloc(struct_size(dr, func, relations->device_count),
2880 GFP_NOWAIT);
2881 if (!dr)
2882 return;
2883
2884 dr->device_count = relations->device_count;
2885 for (i = 0; i < dr->device_count; i++) {
2886 dr->func[i].v_id = relations->func[i].v_id;
2887 dr->func[i].d_id = relations->func[i].d_id;
2888 dr->func[i].rev = relations->func[i].rev;
2889 dr->func[i].prog_intf = relations->func[i].prog_intf;
2890 dr->func[i].subclass = relations->func[i].subclass;
2891 dr->func[i].base_class = relations->func[i].base_class;
2892 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2893 dr->func[i].win_slot = relations->func[i].win_slot;
2894 dr->func[i].ser = relations->func[i].ser;
2895 }
2896
2897 if (hv_pci_start_relations_work(hbus, dr))
2898 kfree(dr);
2899 }
2900
2901 /**
2902 * hv_pci_devices_present2() - Handle list of new children
2903 * @hbus: Root PCI bus, as understood by this driver
2904 * @relations: Packet from host listing children
2905 *
2906 * This function is the v2 version of hv_pci_devices_present()
2907 */
hv_pci_devices_present2(struct hv_pcibus_device * hbus,struct pci_bus_relations2 * relations)2908 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2909 struct pci_bus_relations2 *relations)
2910 {
2911 struct hv_dr_state *dr;
2912 int i;
2913
2914 dr = kzalloc(struct_size(dr, func, relations->device_count),
2915 GFP_NOWAIT);
2916 if (!dr)
2917 return;
2918
2919 dr->device_count = relations->device_count;
2920 for (i = 0; i < dr->device_count; i++) {
2921 dr->func[i].v_id = relations->func[i].v_id;
2922 dr->func[i].d_id = relations->func[i].d_id;
2923 dr->func[i].rev = relations->func[i].rev;
2924 dr->func[i].prog_intf = relations->func[i].prog_intf;
2925 dr->func[i].subclass = relations->func[i].subclass;
2926 dr->func[i].base_class = relations->func[i].base_class;
2927 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2928 dr->func[i].win_slot = relations->func[i].win_slot;
2929 dr->func[i].ser = relations->func[i].ser;
2930 dr->func[i].flags = relations->func[i].flags;
2931 dr->func[i].virtual_numa_node =
2932 relations->func[i].virtual_numa_node;
2933 }
2934
2935 if (hv_pci_start_relations_work(hbus, dr))
2936 kfree(dr);
2937 }
2938
2939 /**
2940 * hv_eject_device_work() - Asynchronously handles ejection
2941 * @work: Work struct embedded in internal device struct
2942 *
2943 * This function handles ejecting a device. Windows will
2944 * attempt to gracefully eject a device, waiting 60 seconds to
2945 * hear back from the guest OS that this completed successfully.
2946 * If this timer expires, the device will be forcibly removed.
2947 */
hv_eject_device_work(struct work_struct * work)2948 static void hv_eject_device_work(struct work_struct *work)
2949 {
2950 struct pci_eject_response *ejct_pkt;
2951 struct hv_pcibus_device *hbus;
2952 struct hv_pci_dev *hpdev;
2953 struct pci_dev *pdev;
2954 unsigned long flags;
2955 int wslot;
2956 struct {
2957 struct pci_packet pkt;
2958 u8 buffer[sizeof(struct pci_eject_response)];
2959 } ctxt;
2960
2961 hpdev = container_of(work, struct hv_pci_dev, wrk);
2962 hbus = hpdev->hbus;
2963
2964 mutex_lock(&hbus->state_lock);
2965
2966 /*
2967 * Ejection can come before or after the PCI bus has been set up, so
2968 * attempt to find it and tear down the bus state, if it exists. This
2969 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2970 * because hbus->bridge->bus may not exist yet.
2971 */
2972 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2973 pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2974 if (pdev) {
2975 pci_lock_rescan_remove();
2976 pci_stop_and_remove_bus_device(pdev);
2977 pci_dev_put(pdev);
2978 pci_unlock_rescan_remove();
2979 }
2980
2981 spin_lock_irqsave(&hbus->device_list_lock, flags);
2982 list_del(&hpdev->list_entry);
2983 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2984
2985 if (hpdev->pci_slot)
2986 pci_destroy_slot(hpdev->pci_slot);
2987
2988 memset(&ctxt, 0, sizeof(ctxt));
2989 ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
2990 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2991 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2992 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2993 sizeof(*ejct_pkt), 0,
2994 VM_PKT_DATA_INBAND, 0);
2995
2996 /* For the get_pcichild() in hv_pci_eject_device() */
2997 put_pcichild(hpdev);
2998 /* For the two refs got in new_pcichild_device() */
2999 put_pcichild(hpdev);
3000 put_pcichild(hpdev);
3001 /* hpdev has been freed. Do not use it any more. */
3002
3003 mutex_unlock(&hbus->state_lock);
3004 }
3005
3006 /**
3007 * hv_pci_eject_device() - Handles device ejection
3008 * @hpdev: Internal device tracking struct
3009 *
3010 * This function is invoked when an ejection packet arrives. It
3011 * just schedules work so that we don't re-enter the packet
3012 * delivery code handling the ejection.
3013 */
hv_pci_eject_device(struct hv_pci_dev * hpdev)3014 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
3015 {
3016 struct hv_pcibus_device *hbus = hpdev->hbus;
3017 struct hv_device *hdev = hbus->hdev;
3018
3019 if (hbus->state == hv_pcibus_removing) {
3020 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
3021 return;
3022 }
3023
3024 get_pcichild(hpdev);
3025 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
3026 queue_work(hbus->wq, &hpdev->wrk);
3027 }
3028
3029 /**
3030 * hv_pci_onchannelcallback() - Handles incoming packets
3031 * @context: Internal bus tracking struct
3032 *
3033 * This function is invoked whenever the host sends a packet to
3034 * this channel (which is private to this root PCI bus).
3035 */
hv_pci_onchannelcallback(void * context)3036 static void hv_pci_onchannelcallback(void *context)
3037 {
3038 const int packet_size = 0x100;
3039 int ret;
3040 struct hv_pcibus_device *hbus = context;
3041 struct vmbus_channel *chan = hbus->hdev->channel;
3042 u32 bytes_recvd;
3043 u64 req_id, req_addr;
3044 struct vmpacket_descriptor *desc;
3045 unsigned char *buffer;
3046 int bufferlen = packet_size;
3047 struct pci_packet *comp_packet;
3048 struct pci_response *response;
3049 struct pci_incoming_message *new_message;
3050 struct pci_bus_relations *bus_rel;
3051 struct pci_bus_relations2 *bus_rel2;
3052 struct pci_dev_inval_block *inval;
3053 struct pci_dev_incoming *dev_message;
3054 struct hv_pci_dev *hpdev;
3055 unsigned long flags;
3056
3057 buffer = kmalloc(bufferlen, GFP_ATOMIC);
3058 if (!buffer)
3059 return;
3060
3061 while (1) {
3062 ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
3063 &bytes_recvd, &req_id);
3064
3065 if (ret == -ENOBUFS) {
3066 kfree(buffer);
3067 /* Handle large packet */
3068 bufferlen = bytes_recvd;
3069 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
3070 if (!buffer)
3071 return;
3072 continue;
3073 }
3074
3075 /* Zero length indicates there are no more packets. */
3076 if (ret || !bytes_recvd)
3077 break;
3078
3079 /*
3080 * All incoming packets must be at least as large as a
3081 * response.
3082 */
3083 if (bytes_recvd <= sizeof(struct pci_response))
3084 continue;
3085 desc = (struct vmpacket_descriptor *)buffer;
3086
3087 switch (desc->type) {
3088 case VM_PKT_COMP:
3089
3090 lock_requestor(chan, flags);
3091 req_addr = __vmbus_request_addr_match(chan, req_id,
3092 VMBUS_RQST_ADDR_ANY);
3093 if (req_addr == VMBUS_RQST_ERROR) {
3094 unlock_requestor(chan, flags);
3095 dev_err(&hbus->hdev->device,
3096 "Invalid transaction ID %llx\n",
3097 req_id);
3098 break;
3099 }
3100 comp_packet = (struct pci_packet *)req_addr;
3101 response = (struct pci_response *)buffer;
3102 /*
3103 * Call ->completion_func() within the critical section to make
3104 * sure that the packet pointer is still valid during the call:
3105 * here 'valid' means that there's a task still waiting for the
3106 * completion, and that the packet data is still on the waiting
3107 * task's stack. Cf. hv_compose_msi_msg().
3108 */
3109 comp_packet->completion_func(comp_packet->compl_ctxt,
3110 response,
3111 bytes_recvd);
3112 unlock_requestor(chan, flags);
3113 break;
3114
3115 case VM_PKT_DATA_INBAND:
3116
3117 new_message = (struct pci_incoming_message *)buffer;
3118 switch (new_message->message_type.type) {
3119 case PCI_BUS_RELATIONS:
3120
3121 bus_rel = (struct pci_bus_relations *)buffer;
3122 if (bytes_recvd < sizeof(*bus_rel) ||
3123 bytes_recvd <
3124 struct_size(bus_rel, func,
3125 bus_rel->device_count)) {
3126 dev_err(&hbus->hdev->device,
3127 "bus relations too small\n");
3128 break;
3129 }
3130
3131 hv_pci_devices_present(hbus, bus_rel);
3132 break;
3133
3134 case PCI_BUS_RELATIONS2:
3135
3136 bus_rel2 = (struct pci_bus_relations2 *)buffer;
3137 if (bytes_recvd < sizeof(*bus_rel2) ||
3138 bytes_recvd <
3139 struct_size(bus_rel2, func,
3140 bus_rel2->device_count)) {
3141 dev_err(&hbus->hdev->device,
3142 "bus relations v2 too small\n");
3143 break;
3144 }
3145
3146 hv_pci_devices_present2(hbus, bus_rel2);
3147 break;
3148
3149 case PCI_EJECT:
3150
3151 dev_message = (struct pci_dev_incoming *)buffer;
3152 if (bytes_recvd < sizeof(*dev_message)) {
3153 dev_err(&hbus->hdev->device,
3154 "eject message too small\n");
3155 break;
3156 }
3157 hpdev = get_pcichild_wslot(hbus,
3158 dev_message->wslot.slot);
3159 if (hpdev) {
3160 hv_pci_eject_device(hpdev);
3161 put_pcichild(hpdev);
3162 }
3163 break;
3164
3165 case PCI_INVALIDATE_BLOCK:
3166
3167 inval = (struct pci_dev_inval_block *)buffer;
3168 if (bytes_recvd < sizeof(*inval)) {
3169 dev_err(&hbus->hdev->device,
3170 "invalidate message too small\n");
3171 break;
3172 }
3173 hpdev = get_pcichild_wslot(hbus,
3174 inval->wslot.slot);
3175 if (hpdev) {
3176 if (hpdev->block_invalidate) {
3177 hpdev->block_invalidate(
3178 hpdev->invalidate_context,
3179 inval->block_mask);
3180 }
3181 put_pcichild(hpdev);
3182 }
3183 break;
3184
3185 default:
3186 dev_warn(&hbus->hdev->device,
3187 "Unimplemented protocol message %x\n",
3188 new_message->message_type.type);
3189 break;
3190 }
3191 break;
3192
3193 default:
3194 dev_err(&hbus->hdev->device,
3195 "unhandled packet type %d, tid %llx len %d\n",
3196 desc->type, req_id, bytes_recvd);
3197 break;
3198 }
3199 }
3200
3201 kfree(buffer);
3202 }
3203
3204 /**
3205 * hv_pci_protocol_negotiation() - Set up protocol
3206 * @hdev: VMBus's tracking struct for this root PCI bus.
3207 * @version: Array of supported channel protocol versions in
3208 * the order of probing - highest go first.
3209 * @num_version: Number of elements in the version array.
3210 *
3211 * This driver is intended to support running on Windows 10
3212 * (server) and later versions. It will not run on earlier
3213 * versions, as they assume that many of the operations which
3214 * Linux needs accomplished with a spinlock held were done via
3215 * asynchronous messaging via VMBus. Windows 10 increases the
3216 * surface area of PCI emulation so that these actions can take
3217 * place by suspending a virtual processor for their duration.
3218 *
3219 * This function negotiates the channel protocol version,
3220 * failing if the host doesn't support the necessary protocol
3221 * level.
3222 */
hv_pci_protocol_negotiation(struct hv_device * hdev,enum pci_protocol_version_t version[],int num_version)3223 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3224 enum pci_protocol_version_t version[],
3225 int num_version)
3226 {
3227 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3228 struct pci_version_request *version_req;
3229 struct hv_pci_compl comp_pkt;
3230 struct pci_packet *pkt;
3231 int ret;
3232 int i;
3233
3234 /*
3235 * Initiate the handshake with the host and negotiate
3236 * a version that the host can support. We start with the
3237 * highest version number and go down if the host cannot
3238 * support it.
3239 */
3240 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3241 if (!pkt)
3242 return -ENOMEM;
3243
3244 init_completion(&comp_pkt.host_event);
3245 pkt->completion_func = hv_pci_generic_compl;
3246 pkt->compl_ctxt = &comp_pkt;
3247 version_req = (struct pci_version_request *)(pkt + 1);
3248 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3249
3250 for (i = 0; i < num_version; i++) {
3251 version_req->protocol_version = version[i];
3252 ret = vmbus_sendpacket(hdev->channel, version_req,
3253 sizeof(struct pci_version_request),
3254 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3255 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3256 if (!ret)
3257 ret = wait_for_response(hdev, &comp_pkt.host_event);
3258
3259 if (ret) {
3260 dev_err(&hdev->device,
3261 "PCI Pass-through VSP failed to request version: %d",
3262 ret);
3263 goto exit;
3264 }
3265
3266 if (comp_pkt.completion_status >= 0) {
3267 hbus->protocol_version = version[i];
3268 dev_info(&hdev->device,
3269 "PCI VMBus probing: Using version %#x\n",
3270 hbus->protocol_version);
3271 goto exit;
3272 }
3273
3274 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3275 dev_err(&hdev->device,
3276 "PCI Pass-through VSP failed version request: %#x",
3277 comp_pkt.completion_status);
3278 ret = -EPROTO;
3279 goto exit;
3280 }
3281
3282 reinit_completion(&comp_pkt.host_event);
3283 }
3284
3285 dev_err(&hdev->device,
3286 "PCI pass-through VSP failed to find supported version");
3287 ret = -EPROTO;
3288
3289 exit:
3290 kfree(pkt);
3291 return ret;
3292 }
3293
3294 /**
3295 * hv_pci_free_bridge_windows() - Release memory regions for the
3296 * bus
3297 * @hbus: Root PCI bus, as understood by this driver
3298 */
hv_pci_free_bridge_windows(struct hv_pcibus_device * hbus)3299 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3300 {
3301 /*
3302 * Set the resources back to the way they looked when they
3303 * were allocated by setting IORESOURCE_BUSY again.
3304 */
3305
3306 if (hbus->low_mmio_space && hbus->low_mmio_res) {
3307 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3308 vmbus_free_mmio(hbus->low_mmio_res->start,
3309 resource_size(hbus->low_mmio_res));
3310 }
3311
3312 if (hbus->high_mmio_space && hbus->high_mmio_res) {
3313 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3314 vmbus_free_mmio(hbus->high_mmio_res->start,
3315 resource_size(hbus->high_mmio_res));
3316 }
3317 }
3318
3319 /**
3320 * hv_pci_allocate_bridge_windows() - Allocate memory regions
3321 * for the bus
3322 * @hbus: Root PCI bus, as understood by this driver
3323 *
3324 * This function calls vmbus_allocate_mmio(), which is itself a
3325 * bit of a compromise. Ideally, we might change the pnp layer
3326 * in the kernel such that it comprehends either PCI devices
3327 * which are "grandchildren of ACPI," with some intermediate bus
3328 * node (in this case, VMBus) or change it such that it
3329 * understands VMBus. The pnp layer, however, has been declared
3330 * deprecated, and not subject to change.
3331 *
3332 * The workaround, implemented here, is to ask VMBus to allocate
3333 * MMIO space for this bus. VMBus itself knows which ranges are
3334 * appropriate by looking at its own ACPI objects. Then, after
3335 * these ranges are claimed, they're modified to look like they
3336 * would have looked if the ACPI and pnp code had allocated
3337 * bridge windows. These descriptors have to exist in this form
3338 * in order to satisfy the code which will get invoked when the
3339 * endpoint PCI function driver calls request_mem_region() or
3340 * request_mem_region_exclusive().
3341 *
3342 * Return: 0 on success, -errno on failure
3343 */
hv_pci_allocate_bridge_windows(struct hv_pcibus_device * hbus)3344 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3345 {
3346 resource_size_t align;
3347 int ret;
3348
3349 if (hbus->low_mmio_space) {
3350 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3351 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3352 (u64)(u32)0xffffffff,
3353 hbus->low_mmio_space,
3354 align, false);
3355 if (ret) {
3356 dev_err(&hbus->hdev->device,
3357 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3358 hbus->low_mmio_space);
3359 return ret;
3360 }
3361
3362 /* Modify this resource to become a bridge window. */
3363 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3364 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3365 pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3366 }
3367
3368 if (hbus->high_mmio_space) {
3369 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3370 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3371 0x100000000, -1,
3372 hbus->high_mmio_space, align,
3373 false);
3374 if (ret) {
3375 dev_err(&hbus->hdev->device,
3376 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3377 hbus->high_mmio_space);
3378 goto release_low_mmio;
3379 }
3380
3381 /* Modify this resource to become a bridge window. */
3382 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3383 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3384 pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3385 }
3386
3387 return 0;
3388
3389 release_low_mmio:
3390 if (hbus->low_mmio_res) {
3391 vmbus_free_mmio(hbus->low_mmio_res->start,
3392 resource_size(hbus->low_mmio_res));
3393 }
3394
3395 return ret;
3396 }
3397
3398 /**
3399 * hv_allocate_config_window() - Find MMIO space for PCI Config
3400 * @hbus: Root PCI bus, as understood by this driver
3401 *
3402 * This function claims memory-mapped I/O space for accessing
3403 * configuration space for the functions on this bus.
3404 *
3405 * Return: 0 on success, -errno on failure
3406 */
hv_allocate_config_window(struct hv_pcibus_device * hbus)3407 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3408 {
3409 int ret;
3410
3411 /*
3412 * Set up a region of MMIO space to use for accessing configuration
3413 * space.
3414 */
3415 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3416 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3417 if (ret)
3418 return ret;
3419
3420 /*
3421 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3422 * resource claims (those which cannot be overlapped) and the ranges
3423 * which are valid for the children of this bus, which are intended
3424 * to be overlapped by those children. Set the flag on this claim
3425 * meaning that this region can't be overlapped.
3426 */
3427
3428 hbus->mem_config->flags |= IORESOURCE_BUSY;
3429
3430 return 0;
3431 }
3432
hv_free_config_window(struct hv_pcibus_device * hbus)3433 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3434 {
3435 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3436 }
3437
3438 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3439
3440 /**
3441 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3442 * @hdev: VMBus's tracking struct for this root PCI bus
3443 *
3444 * Return: 0 on success, -errno on failure
3445 */
hv_pci_enter_d0(struct hv_device * hdev)3446 static int hv_pci_enter_d0(struct hv_device *hdev)
3447 {
3448 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3449 struct pci_bus_d0_entry *d0_entry;
3450 struct hv_pci_compl comp_pkt;
3451 struct pci_packet *pkt;
3452 bool retry = true;
3453 int ret;
3454
3455 enter_d0_retry:
3456 /*
3457 * Tell the host that the bus is ready to use, and moved into the
3458 * powered-on state. This includes telling the host which region
3459 * of memory-mapped I/O space has been chosen for configuration space
3460 * access.
3461 */
3462 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3463 if (!pkt)
3464 return -ENOMEM;
3465
3466 init_completion(&comp_pkt.host_event);
3467 pkt->completion_func = hv_pci_generic_compl;
3468 pkt->compl_ctxt = &comp_pkt;
3469 d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
3470 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3471 d0_entry->mmio_base = hbus->mem_config->start;
3472
3473 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3474 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3475 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3476 if (!ret)
3477 ret = wait_for_response(hdev, &comp_pkt.host_event);
3478
3479 if (ret)
3480 goto exit;
3481
3482 /*
3483 * In certain case (Kdump) the pci device of interest was
3484 * not cleanly shut down and resource is still held on host
3485 * side, the host could return invalid device status.
3486 * We need to explicitly request host to release the resource
3487 * and try to enter D0 again.
3488 */
3489 if (comp_pkt.completion_status < 0 && retry) {
3490 retry = false;
3491
3492 dev_err(&hdev->device, "Retrying D0 Entry\n");
3493
3494 /*
3495 * Hv_pci_bus_exit() calls hv_send_resource_released()
3496 * to free up resources of its child devices.
3497 * In the kdump kernel we need to set the
3498 * wslot_res_allocated to 255 so it scans all child
3499 * devices to release resources allocated in the
3500 * normal kernel before panic happened.
3501 */
3502 hbus->wslot_res_allocated = 255;
3503
3504 ret = hv_pci_bus_exit(hdev, true);
3505
3506 if (ret == 0) {
3507 kfree(pkt);
3508 goto enter_d0_retry;
3509 }
3510 dev_err(&hdev->device,
3511 "Retrying D0 failed with ret %d\n", ret);
3512 }
3513
3514 if (comp_pkt.completion_status < 0) {
3515 dev_err(&hdev->device,
3516 "PCI Pass-through VSP failed D0 Entry with status %x\n",
3517 comp_pkt.completion_status);
3518 ret = -EPROTO;
3519 goto exit;
3520 }
3521
3522 ret = 0;
3523
3524 exit:
3525 kfree(pkt);
3526 return ret;
3527 }
3528
3529 /**
3530 * hv_pci_query_relations() - Ask host to send list of child
3531 * devices
3532 * @hdev: VMBus's tracking struct for this root PCI bus
3533 *
3534 * Return: 0 on success, -errno on failure
3535 */
hv_pci_query_relations(struct hv_device * hdev)3536 static int hv_pci_query_relations(struct hv_device *hdev)
3537 {
3538 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3539 struct pci_message message;
3540 struct completion comp;
3541 int ret;
3542
3543 /* Ask the host to send along the list of child devices */
3544 init_completion(&comp);
3545 if (cmpxchg(&hbus->survey_event, NULL, &comp))
3546 return -ENOTEMPTY;
3547
3548 memset(&message, 0, sizeof(message));
3549 message.type = PCI_QUERY_BUS_RELATIONS;
3550
3551 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3552 0, VM_PKT_DATA_INBAND, 0);
3553 if (!ret)
3554 ret = wait_for_response(hdev, &comp);
3555
3556 /*
3557 * In the case of fast device addition/removal, it's possible that
3558 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3559 * already got a PCI_BUS_RELATIONS* message from the host and the
3560 * channel callback already scheduled a work to hbus->wq, which can be
3561 * running pci_devices_present_work() -> survey_child_resources() ->
3562 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3563 * exits and the stack variable 'comp' is no longer valid; as a result,
3564 * a hang or a page fault may happen when the complete() calls
3565 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3566 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3567 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3568 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3569 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3570 * channel->rescind = true.
3571 */
3572 flush_workqueue(hbus->wq);
3573
3574 return ret;
3575 }
3576
3577 /**
3578 * hv_send_resources_allocated() - Report local resource choices
3579 * @hdev: VMBus's tracking struct for this root PCI bus
3580 *
3581 * The host OS is expecting to be sent a request as a message
3582 * which contains all the resources that the device will use.
3583 * The response contains those same resources, "translated"
3584 * which is to say, the values which should be used by the
3585 * hardware, when it delivers an interrupt. (MMIO resources are
3586 * used in local terms.) This is nice for Windows, and lines up
3587 * with the FDO/PDO split, which doesn't exist in Linux. Linux
3588 * is deeply expecting to scan an emulated PCI configuration
3589 * space. So this message is sent here only to drive the state
3590 * machine on the host forward.
3591 *
3592 * Return: 0 on success, -errno on failure
3593 */
hv_send_resources_allocated(struct hv_device * hdev)3594 static int hv_send_resources_allocated(struct hv_device *hdev)
3595 {
3596 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3597 struct pci_resources_assigned *res_assigned;
3598 struct pci_resources_assigned2 *res_assigned2;
3599 struct hv_pci_compl comp_pkt;
3600 struct hv_pci_dev *hpdev;
3601 struct pci_packet *pkt;
3602 size_t size_res;
3603 int wslot;
3604 int ret;
3605
3606 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3607 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
3608
3609 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3610 if (!pkt)
3611 return -ENOMEM;
3612
3613 ret = 0;
3614
3615 for (wslot = 0; wslot < 256; wslot++) {
3616 hpdev = get_pcichild_wslot(hbus, wslot);
3617 if (!hpdev)
3618 continue;
3619
3620 memset(pkt, 0, sizeof(*pkt) + size_res);
3621 init_completion(&comp_pkt.host_event);
3622 pkt->completion_func = hv_pci_generic_compl;
3623 pkt->compl_ctxt = &comp_pkt;
3624
3625 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3626 res_assigned =
3627 (struct pci_resources_assigned *)(pkt + 1);
3628 res_assigned->message_type.type =
3629 PCI_RESOURCES_ASSIGNED;
3630 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3631 } else {
3632 res_assigned2 =
3633 (struct pci_resources_assigned2 *)(pkt + 1);
3634 res_assigned2->message_type.type =
3635 PCI_RESOURCES_ASSIGNED2;
3636 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3637 }
3638 put_pcichild(hpdev);
3639
3640 ret = vmbus_sendpacket(hdev->channel, pkt + 1,
3641 size_res, (unsigned long)pkt,
3642 VM_PKT_DATA_INBAND,
3643 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3644 if (!ret)
3645 ret = wait_for_response(hdev, &comp_pkt.host_event);
3646 if (ret)
3647 break;
3648
3649 if (comp_pkt.completion_status < 0) {
3650 ret = -EPROTO;
3651 dev_err(&hdev->device,
3652 "resource allocated returned 0x%x",
3653 comp_pkt.completion_status);
3654 break;
3655 }
3656
3657 hbus->wslot_res_allocated = wslot;
3658 }
3659
3660 kfree(pkt);
3661 return ret;
3662 }
3663
3664 /**
3665 * hv_send_resources_released() - Report local resources
3666 * released
3667 * @hdev: VMBus's tracking struct for this root PCI bus
3668 *
3669 * Return: 0 on success, -errno on failure
3670 */
hv_send_resources_released(struct hv_device * hdev)3671 static int hv_send_resources_released(struct hv_device *hdev)
3672 {
3673 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3674 struct pci_child_message pkt;
3675 struct hv_pci_dev *hpdev;
3676 int wslot;
3677 int ret;
3678
3679 for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3680 hpdev = get_pcichild_wslot(hbus, wslot);
3681 if (!hpdev)
3682 continue;
3683
3684 memset(&pkt, 0, sizeof(pkt));
3685 pkt.message_type.type = PCI_RESOURCES_RELEASED;
3686 pkt.wslot.slot = hpdev->desc.win_slot.slot;
3687
3688 put_pcichild(hpdev);
3689
3690 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3691 VM_PKT_DATA_INBAND, 0);
3692 if (ret)
3693 return ret;
3694
3695 hbus->wslot_res_allocated = wslot - 1;
3696 }
3697
3698 hbus->wslot_res_allocated = -1;
3699
3700 return 0;
3701 }
3702
3703 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3704 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3705
3706 /*
3707 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3708 * as invalid for passthrough PCI devices of this driver.
3709 */
3710 #define HVPCI_DOM_INVALID 0
3711
3712 /**
3713 * hv_get_dom_num() - Get a valid PCI domain number
3714 * Check if the PCI domain number is in use, and return another number if
3715 * it is in use.
3716 *
3717 * @dom: Requested domain number
3718 *
3719 * return: domain number on success, HVPCI_DOM_INVALID on failure
3720 */
hv_get_dom_num(u16 dom)3721 static u16 hv_get_dom_num(u16 dom)
3722 {
3723 unsigned int i;
3724
3725 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3726 return dom;
3727
3728 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3729 if (test_and_set_bit(i, hvpci_dom_map) == 0)
3730 return i;
3731 }
3732
3733 return HVPCI_DOM_INVALID;
3734 }
3735
3736 /**
3737 * hv_put_dom_num() - Mark the PCI domain number as free
3738 * @dom: Domain number to be freed
3739 */
hv_put_dom_num(u16 dom)3740 static void hv_put_dom_num(u16 dom)
3741 {
3742 clear_bit(dom, hvpci_dom_map);
3743 }
3744
3745 /**
3746 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3747 * @hdev: VMBus's tracking struct for this root PCI bus
3748 * @dev_id: Identifies the device itself
3749 *
3750 * Return: 0 on success, -errno on failure
3751 */
hv_pci_probe(struct hv_device * hdev,const struct hv_vmbus_device_id * dev_id)3752 static int hv_pci_probe(struct hv_device *hdev,
3753 const struct hv_vmbus_device_id *dev_id)
3754 {
3755 struct pci_host_bridge *bridge;
3756 struct hv_pcibus_device *hbus;
3757 u16 dom_req, dom;
3758 char *name;
3759 int ret;
3760
3761 bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3762 if (!bridge)
3763 return -ENOMEM;
3764
3765 hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
3766 if (!hbus)
3767 return -ENOMEM;
3768
3769 hbus->bridge = bridge;
3770 mutex_init(&hbus->state_lock);
3771 hbus->state = hv_pcibus_init;
3772 hbus->wslot_res_allocated = -1;
3773
3774 /*
3775 * The PCI bus "domain" is what is called "segment" in ACPI and other
3776 * specs. Pull it from the instance ID, to get something usually
3777 * unique. In rare cases of collision, we will find out another number
3778 * not in use.
3779 *
3780 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3781 * together with this guest driver can guarantee that (1) The only
3782 * domain used by Gen1 VMs for something that looks like a physical
3783 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3784 * (2) There will be no overlap between domains (after fixing possible
3785 * collisions) in the same VM.
3786 */
3787 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3788 dom = hv_get_dom_num(dom_req);
3789
3790 if (dom == HVPCI_DOM_INVALID) {
3791 dev_err(&hdev->device,
3792 "Unable to use dom# 0x%x or other numbers", dom_req);
3793 ret = -EINVAL;
3794 goto free_bus;
3795 }
3796
3797 if (dom != dom_req)
3798 dev_info(&hdev->device,
3799 "PCI dom# 0x%x has collision, using 0x%x",
3800 dom_req, dom);
3801
3802 hbus->bridge->domain_nr = dom;
3803 #ifdef CONFIG_X86
3804 hbus->sysdata.domain = dom;
3805 hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3806 #elif defined(CONFIG_ARM64)
3807 /*
3808 * Set the PCI bus parent to be the corresponding VMbus
3809 * device. Then the VMbus device will be assigned as the
3810 * ACPI companion in pcibios_root_bridge_prepare() and
3811 * pci_dma_configure() will propagate device coherence
3812 * information to devices created on the bus.
3813 */
3814 hbus->sysdata.parent = hdev->device.parent;
3815 hbus->use_calls = false;
3816 #endif
3817
3818 hbus->hdev = hdev;
3819 INIT_LIST_HEAD(&hbus->children);
3820 INIT_LIST_HEAD(&hbus->dr_list);
3821 spin_lock_init(&hbus->config_lock);
3822 spin_lock_init(&hbus->device_list_lock);
3823 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3824 hbus->bridge->domain_nr);
3825 if (!hbus->wq) {
3826 ret = -ENOMEM;
3827 goto free_dom;
3828 }
3829
3830 hdev->channel->next_request_id_callback = vmbus_next_request_id;
3831 hdev->channel->request_addr_callback = vmbus_request_addr;
3832 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3833
3834 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3835 hv_pci_onchannelcallback, hbus);
3836 if (ret)
3837 goto destroy_wq;
3838
3839 hv_set_drvdata(hdev, hbus);
3840
3841 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3842 ARRAY_SIZE(pci_protocol_versions));
3843 if (ret)
3844 goto close;
3845
3846 ret = hv_allocate_config_window(hbus);
3847 if (ret)
3848 goto close;
3849
3850 hbus->cfg_addr = ioremap(hbus->mem_config->start,
3851 PCI_CONFIG_MMIO_LENGTH);
3852 if (!hbus->cfg_addr) {
3853 dev_err(&hdev->device,
3854 "Unable to map a virtual address for config space\n");
3855 ret = -ENOMEM;
3856 goto free_config;
3857 }
3858
3859 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3860 if (!name) {
3861 ret = -ENOMEM;
3862 goto unmap;
3863 }
3864
3865 hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3866 kfree(name);
3867 if (!hbus->fwnode) {
3868 ret = -ENOMEM;
3869 goto unmap;
3870 }
3871
3872 ret = hv_pcie_init_irq_domain(hbus);
3873 if (ret)
3874 goto free_fwnode;
3875
3876 ret = hv_pci_query_relations(hdev);
3877 if (ret)
3878 goto free_irq_domain;
3879
3880 mutex_lock(&hbus->state_lock);
3881
3882 ret = hv_pci_enter_d0(hdev);
3883 if (ret)
3884 goto release_state_lock;
3885
3886 ret = hv_pci_allocate_bridge_windows(hbus);
3887 if (ret)
3888 goto exit_d0;
3889
3890 ret = hv_send_resources_allocated(hdev);
3891 if (ret)
3892 goto free_windows;
3893
3894 prepopulate_bars(hbus);
3895
3896 hbus->state = hv_pcibus_probed;
3897
3898 ret = create_root_hv_pci_bus(hbus);
3899 if (ret)
3900 goto free_windows;
3901
3902 mutex_unlock(&hbus->state_lock);
3903 return 0;
3904
3905 free_windows:
3906 hv_pci_free_bridge_windows(hbus);
3907 exit_d0:
3908 (void) hv_pci_bus_exit(hdev, true);
3909 release_state_lock:
3910 mutex_unlock(&hbus->state_lock);
3911 free_irq_domain:
3912 irq_domain_remove(hbus->irq_domain);
3913 free_fwnode:
3914 irq_domain_free_fwnode(hbus->fwnode);
3915 unmap:
3916 iounmap(hbus->cfg_addr);
3917 free_config:
3918 hv_free_config_window(hbus);
3919 close:
3920 vmbus_close(hdev->channel);
3921 destroy_wq:
3922 destroy_workqueue(hbus->wq);
3923 free_dom:
3924 hv_put_dom_num(hbus->bridge->domain_nr);
3925 free_bus:
3926 kfree(hbus);
3927 return ret;
3928 }
3929
hv_pci_bus_exit(struct hv_device * hdev,bool keep_devs)3930 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3931 {
3932 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3933 struct vmbus_channel *chan = hdev->channel;
3934 struct {
3935 struct pci_packet teardown_packet;
3936 u8 buffer[sizeof(struct pci_message)];
3937 } pkt;
3938 struct pci_message *msg;
3939 struct hv_pci_compl comp_pkt;
3940 struct hv_pci_dev *hpdev, *tmp;
3941 unsigned long flags;
3942 u64 trans_id;
3943 int ret;
3944
3945 /*
3946 * After the host sends the RESCIND_CHANNEL message, it doesn't
3947 * access the per-channel ringbuffer any longer.
3948 */
3949 if (chan->rescind)
3950 return 0;
3951
3952 if (!keep_devs) {
3953 struct list_head removed;
3954
3955 /* Move all present children to the list on stack */
3956 INIT_LIST_HEAD(&removed);
3957 spin_lock_irqsave(&hbus->device_list_lock, flags);
3958 list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3959 list_move_tail(&hpdev->list_entry, &removed);
3960 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3961
3962 /* Remove all children in the list */
3963 list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3964 list_del(&hpdev->list_entry);
3965 if (hpdev->pci_slot)
3966 pci_destroy_slot(hpdev->pci_slot);
3967 /* For the two refs got in new_pcichild_device() */
3968 put_pcichild(hpdev);
3969 put_pcichild(hpdev);
3970 }
3971 }
3972
3973 ret = hv_send_resources_released(hdev);
3974 if (ret) {
3975 dev_err(&hdev->device,
3976 "Couldn't send resources released packet(s)\n");
3977 return ret;
3978 }
3979
3980 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3981 init_completion(&comp_pkt.host_event);
3982 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3983 pkt.teardown_packet.compl_ctxt = &comp_pkt;
3984 msg = (struct pci_message *)pkt.buffer;
3985 msg->type = PCI_BUS_D0EXIT;
3986
3987 ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
3988 (unsigned long)&pkt.teardown_packet,
3989 &trans_id, VM_PKT_DATA_INBAND,
3990 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3991 if (ret)
3992 return ret;
3993
3994 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3995 /*
3996 * The completion packet on the stack becomes invalid after
3997 * 'return'; remove the ID from the VMbus requestor if the
3998 * identifier is still mapped to/associated with the packet.
3999 *
4000 * Cf. hv_pci_onchannelcallback().
4001 */
4002 vmbus_request_addr_match(chan, trans_id,
4003 (unsigned long)&pkt.teardown_packet);
4004 return -ETIMEDOUT;
4005 }
4006
4007 return 0;
4008 }
4009
4010 /**
4011 * hv_pci_remove() - Remove routine for this VMBus channel
4012 * @hdev: VMBus's tracking struct for this root PCI bus
4013 */
hv_pci_remove(struct hv_device * hdev)4014 static void hv_pci_remove(struct hv_device *hdev)
4015 {
4016 struct hv_pcibus_device *hbus;
4017
4018 hbus = hv_get_drvdata(hdev);
4019 if (hbus->state == hv_pcibus_installed) {
4020 tasklet_disable(&hdev->channel->callback_event);
4021 hbus->state = hv_pcibus_removing;
4022 tasklet_enable(&hdev->channel->callback_event);
4023 destroy_workqueue(hbus->wq);
4024 hbus->wq = NULL;
4025 /*
4026 * At this point, no work is running or can be scheduled
4027 * on hbus-wq. We can't race with hv_pci_devices_present()
4028 * or hv_pci_eject_device(), it's safe to proceed.
4029 */
4030
4031 /* Remove the bus from PCI's point of view. */
4032 pci_lock_rescan_remove();
4033 pci_stop_root_bus(hbus->bridge->bus);
4034 hv_pci_remove_slots(hbus);
4035 pci_remove_root_bus(hbus->bridge->bus);
4036 pci_unlock_rescan_remove();
4037 }
4038
4039 hv_pci_bus_exit(hdev, false);
4040
4041 vmbus_close(hdev->channel);
4042
4043 iounmap(hbus->cfg_addr);
4044 hv_free_config_window(hbus);
4045 hv_pci_free_bridge_windows(hbus);
4046 irq_domain_remove(hbus->irq_domain);
4047 irq_domain_free_fwnode(hbus->fwnode);
4048
4049 hv_put_dom_num(hbus->bridge->domain_nr);
4050
4051 kfree(hbus);
4052 }
4053
hv_pci_suspend(struct hv_device * hdev)4054 static int hv_pci_suspend(struct hv_device *hdev)
4055 {
4056 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4057 enum hv_pcibus_state old_state;
4058 int ret;
4059
4060 /*
4061 * hv_pci_suspend() must make sure there are no pending work items
4062 * before calling vmbus_close(), since it runs in a process context
4063 * as a callback in dpm_suspend(). When it starts to run, the channel
4064 * callback hv_pci_onchannelcallback(), which runs in a tasklet
4065 * context, can be still running concurrently and scheduling new work
4066 * items onto hbus->wq in hv_pci_devices_present() and
4067 * hv_pci_eject_device(), and the work item handlers can access the
4068 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
4069 * the work item handler pci_devices_present_work() ->
4070 * new_pcichild_device() writes to the vmbus channel.
4071 *
4072 * To eliminate the race, hv_pci_suspend() disables the channel
4073 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
4074 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
4075 * it knows that no new work item can be scheduled, and then it flushes
4076 * hbus->wq and safely closes the vmbus channel.
4077 */
4078 tasklet_disable(&hdev->channel->callback_event);
4079
4080 /* Change the hbus state to prevent new work items. */
4081 old_state = hbus->state;
4082 if (hbus->state == hv_pcibus_installed)
4083 hbus->state = hv_pcibus_removing;
4084
4085 tasklet_enable(&hdev->channel->callback_event);
4086
4087 if (old_state != hv_pcibus_installed)
4088 return -EINVAL;
4089
4090 flush_workqueue(hbus->wq);
4091
4092 ret = hv_pci_bus_exit(hdev, true);
4093 if (ret)
4094 return ret;
4095
4096 vmbus_close(hdev->channel);
4097
4098 return 0;
4099 }
4100
hv_pci_restore_msi_msg(struct pci_dev * pdev,void * arg)4101 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
4102 {
4103 struct irq_data *irq_data;
4104 struct msi_desc *entry;
4105
4106 if (!pdev->msi_enabled && !pdev->msix_enabled)
4107 return 0;
4108
4109 guard(msi_descs_lock)(&pdev->dev);
4110 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
4111 irq_data = irq_get_irq_data(entry->irq);
4112 if (WARN_ON_ONCE(!irq_data))
4113 return -EINVAL;
4114 hv_compose_msi_msg(irq_data, &entry->msg);
4115 }
4116 return 0;
4117 }
4118
4119 /*
4120 * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
4121 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4122 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4123 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4124 * Table entries.
4125 */
hv_pci_restore_msi_state(struct hv_pcibus_device * hbus)4126 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4127 {
4128 pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4129 }
4130
hv_pci_resume(struct hv_device * hdev)4131 static int hv_pci_resume(struct hv_device *hdev)
4132 {
4133 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4134 enum pci_protocol_version_t version[1];
4135 int ret;
4136
4137 hbus->state = hv_pcibus_init;
4138
4139 hdev->channel->next_request_id_callback = vmbus_next_request_id;
4140 hdev->channel->request_addr_callback = vmbus_request_addr;
4141 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4142
4143 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4144 hv_pci_onchannelcallback, hbus);
4145 if (ret)
4146 return ret;
4147
4148 /* Only use the version that was in use before hibernation. */
4149 version[0] = hbus->protocol_version;
4150 ret = hv_pci_protocol_negotiation(hdev, version, 1);
4151 if (ret)
4152 goto out;
4153
4154 ret = hv_pci_query_relations(hdev);
4155 if (ret)
4156 goto out;
4157
4158 mutex_lock(&hbus->state_lock);
4159
4160 ret = hv_pci_enter_d0(hdev);
4161 if (ret)
4162 goto release_state_lock;
4163
4164 ret = hv_send_resources_allocated(hdev);
4165 if (ret)
4166 goto release_state_lock;
4167
4168 prepopulate_bars(hbus);
4169
4170 hv_pci_restore_msi_state(hbus);
4171
4172 hbus->state = hv_pcibus_installed;
4173 mutex_unlock(&hbus->state_lock);
4174 return 0;
4175
4176 release_state_lock:
4177 mutex_unlock(&hbus->state_lock);
4178 out:
4179 vmbus_close(hdev->channel);
4180 return ret;
4181 }
4182
4183 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4184 /* PCI Pass-through Class ID */
4185 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4186 { HV_PCIE_GUID, },
4187 { },
4188 };
4189
4190 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4191
4192 static struct hv_driver hv_pci_drv = {
4193 .name = "hv_pci",
4194 .id_table = hv_pci_id_table,
4195 .probe = hv_pci_probe,
4196 .remove = hv_pci_remove,
4197 .suspend = hv_pci_suspend,
4198 .resume = hv_pci_resume,
4199 };
4200
exit_hv_pci_drv(void)4201 static void __exit exit_hv_pci_drv(void)
4202 {
4203 vmbus_driver_unregister(&hv_pci_drv);
4204
4205 hvpci_block_ops.read_block = NULL;
4206 hvpci_block_ops.write_block = NULL;
4207 hvpci_block_ops.reg_blk_invalidate = NULL;
4208 }
4209
init_hv_pci_drv(void)4210 static int __init init_hv_pci_drv(void)
4211 {
4212 int ret;
4213
4214 if (!hv_is_hyperv_initialized())
4215 return -ENODEV;
4216
4217 if (hv_root_partition() && !hv_nested)
4218 return -ENODEV;
4219
4220 ret = hv_pci_irqchip_init();
4221 if (ret)
4222 return ret;
4223
4224 /* Set the invalid domain number's bit, so it will not be used */
4225 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4226
4227 /* Initialize PCI block r/w interface */
4228 hvpci_block_ops.read_block = hv_read_config_block;
4229 hvpci_block_ops.write_block = hv_write_config_block;
4230 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4231
4232 return vmbus_driver_register(&hv_pci_drv);
4233 }
4234
4235 module_init(init_hv_pci_drv);
4236 module_exit(exit_hv_pci_drv);
4237
4238 MODULE_DESCRIPTION("Hyper-V PCI");
4239 MODULE_LICENSE("GPL v2");
4240