xref: /linux/drivers/pci/controller/pci-hyperv.c (revision cad151904379b302a62e8967b7db6ba2e883a212)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) Microsoft Corporation.
4  *
5  * Author:
6  *   Jake Oshins <jakeo@microsoft.com>
7  *
8  * This driver acts as a paravirtual front-end for PCI Express root buses.
9  * When a PCI Express function (either an entire device or an SR-IOV
10  * Virtual Function) is being passed through to the VM, this driver exposes
11  * a new bus to the guest VM.  This is modeled as a root PCI bus because
12  * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
13  * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14  * until a device as been exposed using this driver.
15  *
16  * Each root PCI bus has its own PCI domain, which is called "Segment" in
17  * the PCI Firmware Specifications.  Thus while each device passed through
18  * to the VM using this front-end will appear at "device 0", the domain will
19  * be unique.  Typically, each bus will have one PCI function on it, though
20  * this driver does support more than one.
21  *
22  * In order to map the interrupts from the device through to the guest VM,
23  * this driver also implements an IRQ Domain, which handles interrupts (either
24  * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
25  * set up, torn down, or reaffined, this driver communicates with the
26  * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27  * interrupt will be delivered to the correct virtual processor at the right
28  * vector.  This driver does not support level-triggered (line-based)
29  * interrupts, and will report that the Interrupt Line register in the
30  * function's configuration space is zero.
31  *
32  * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33  * facilities.  For instance, the configuration space of a function exposed
34  * by Hyper-V is mapped into a single page of memory space, and the
35  * read and write handlers for config space must be aware of this mechanism.
36  * Similarly, device setup and teardown involves messages sent to and from
37  * the PCI back-end driver in Hyper-V.
38  */
39 
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/msi.h>
48 #include <linux/hyperv.h>
49 #include <linux/refcount.h>
50 #include <linux/irqdomain.h>
51 #include <linux/acpi.h>
52 #include <asm/mshyperv.h>
53 
54 /*
55  * Protocol versions. The low word is the minor version, the high word the
56  * major version.
57  */
58 
59 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
60 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
61 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
62 
63 enum pci_protocol_version_t {
64 	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
65 	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
66 	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
67 	PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4),	/* WS2022 */
68 };
69 
70 #define CPU_AFFINITY_ALL	-1ULL
71 
72 /*
73  * Supported protocol versions in the order of probing - highest go
74  * first.
75  */
76 static enum pci_protocol_version_t pci_protocol_versions[] = {
77 	PCI_PROTOCOL_VERSION_1_4,
78 	PCI_PROTOCOL_VERSION_1_3,
79 	PCI_PROTOCOL_VERSION_1_2,
80 	PCI_PROTOCOL_VERSION_1_1,
81 };
82 
83 #define PCI_CONFIG_MMIO_LENGTH	0x2000
84 #define CFG_PAGE_OFFSET 0x1000
85 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
86 
87 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
88 
89 #define STATUS_REVISION_MISMATCH 0xC0000059
90 
91 /* space for 32bit serial number as string */
92 #define SLOT_NAME_SIZE 11
93 
94 /*
95  * Size of requestor for VMbus; the value is based on the observation
96  * that having more than one request outstanding is 'rare', and so 64
97  * should be generous in ensuring that we don't ever run out.
98  */
99 #define HV_PCI_RQSTOR_SIZE 64
100 
101 /*
102  * Message Types
103  */
104 
105 enum pci_message_type {
106 	/*
107 	 * Version 1.1
108 	 */
109 	PCI_MESSAGE_BASE                = 0x42490000,
110 	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
111 	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
112 	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
113 	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
114 	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
115 	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
116 	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
117 	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
118 	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
119 	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
120 	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
121 	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
122 	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
123 	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
124 	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
125 	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
126 	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
127 	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
128 	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
129 	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
130 	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
131 	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
132 	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
133 	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
134 	PCI_RESOURCES_ASSIGNED3         = PCI_MESSAGE_BASE + 0x1A,
135 	PCI_CREATE_INTERRUPT_MESSAGE3   = PCI_MESSAGE_BASE + 0x1B,
136 	PCI_MESSAGE_MAXIMUM
137 };
138 
139 /*
140  * Structures defining the virtual PCI Express protocol.
141  */
142 
143 union pci_version {
144 	struct {
145 		u16 minor_version;
146 		u16 major_version;
147 	} parts;
148 	u32 version;
149 } __packed;
150 
151 /*
152  * Function numbers are 8-bits wide on Express, as interpreted through ARI,
153  * which is all this driver does.  This representation is the one used in
154  * Windows, which is what is expected when sending this back and forth with
155  * the Hyper-V parent partition.
156  */
157 union win_slot_encoding {
158 	struct {
159 		u32	dev:5;
160 		u32	func:3;
161 		u32	reserved:24;
162 	} bits;
163 	u32 slot;
164 } __packed;
165 
166 /*
167  * Pretty much as defined in the PCI Specifications.
168  */
169 struct pci_function_description {
170 	u16	v_id;	/* vendor ID */
171 	u16	d_id;	/* device ID */
172 	u8	rev;
173 	u8	prog_intf;
174 	u8	subclass;
175 	u8	base_class;
176 	u32	subsystem_id;
177 	union win_slot_encoding win_slot;
178 	u32	ser;	/* serial number */
179 } __packed;
180 
181 enum pci_device_description_flags {
182 	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
183 	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
184 };
185 
186 struct pci_function_description2 {
187 	u16	v_id;	/* vendor ID */
188 	u16	d_id;	/* device ID */
189 	u8	rev;
190 	u8	prog_intf;
191 	u8	subclass;
192 	u8	base_class;
193 	u32	subsystem_id;
194 	union	win_slot_encoding win_slot;
195 	u32	ser;	/* serial number */
196 	u32	flags;
197 	u16	virtual_numa_node;
198 	u16	reserved;
199 } __packed;
200 
201 /**
202  * struct hv_msi_desc
203  * @vector:		IDT entry
204  * @delivery_mode:	As defined in Intel's Programmer's
205  *			Reference Manual, Volume 3, Chapter 8.
206  * @vector_count:	Number of contiguous entries in the
207  *			Interrupt Descriptor Table that are
208  *			occupied by this Message-Signaled
209  *			Interrupt. For "MSI", as first defined
210  *			in PCI 2.2, this can be between 1 and
211  *			32. For "MSI-X," as first defined in PCI
212  *			3.0, this must be 1, as each MSI-X table
213  *			entry would have its own descriptor.
214  * @reserved:		Empty space
215  * @cpu_mask:		All the target virtual processors.
216  */
217 struct hv_msi_desc {
218 	u8	vector;
219 	u8	delivery_mode;
220 	u16	vector_count;
221 	u32	reserved;
222 	u64	cpu_mask;
223 } __packed;
224 
225 /**
226  * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
227  * @vector:		IDT entry
228  * @delivery_mode:	As defined in Intel's Programmer's
229  *			Reference Manual, Volume 3, Chapter 8.
230  * @vector_count:	Number of contiguous entries in the
231  *			Interrupt Descriptor Table that are
232  *			occupied by this Message-Signaled
233  *			Interrupt. For "MSI", as first defined
234  *			in PCI 2.2, this can be between 1 and
235  *			32. For "MSI-X," as first defined in PCI
236  *			3.0, this must be 1, as each MSI-X table
237  *			entry would have its own descriptor.
238  * @processor_count:	number of bits enabled in array.
239  * @processor_array:	All the target virtual processors.
240  */
241 struct hv_msi_desc2 {
242 	u8	vector;
243 	u8	delivery_mode;
244 	u16	vector_count;
245 	u16	processor_count;
246 	u16	processor_array[32];
247 } __packed;
248 
249 /*
250  * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
251  *	Everything is the same as in 'hv_msi_desc2' except that the size of the
252  *	'vector' field is larger to support bigger vector values. For ex: LPI
253  *	vectors on ARM.
254  */
255 struct hv_msi_desc3 {
256 	u32	vector;
257 	u8	delivery_mode;
258 	u8	reserved;
259 	u16	vector_count;
260 	u16	processor_count;
261 	u16	processor_array[32];
262 } __packed;
263 
264 /**
265  * struct tran_int_desc
266  * @reserved:		unused, padding
267  * @vector_count:	same as in hv_msi_desc
268  * @data:		This is the "data payload" value that is
269  *			written by the device when it generates
270  *			a message-signaled interrupt, either MSI
271  *			or MSI-X.
272  * @address:		This is the address to which the data
273  *			payload is written on interrupt
274  *			generation.
275  */
276 struct tran_int_desc {
277 	u16	reserved;
278 	u16	vector_count;
279 	u32	data;
280 	u64	address;
281 } __packed;
282 
283 /*
284  * A generic message format for virtual PCI.
285  * Specific message formats are defined later in the file.
286  */
287 
288 struct pci_message {
289 	u32 type;
290 } __packed;
291 
292 struct pci_child_message {
293 	struct pci_message message_type;
294 	union win_slot_encoding wslot;
295 } __packed;
296 
297 struct pci_incoming_message {
298 	struct vmpacket_descriptor hdr;
299 	struct pci_message message_type;
300 } __packed;
301 
302 struct pci_response {
303 	struct vmpacket_descriptor hdr;
304 	s32 status;			/* negative values are failures */
305 } __packed;
306 
307 struct pci_packet {
308 	void (*completion_func)(void *context, struct pci_response *resp,
309 				int resp_packet_size);
310 	void *compl_ctxt;
311 
312 	struct pci_message message[];
313 };
314 
315 /*
316  * Specific message types supporting the PCI protocol.
317  */
318 
319 /*
320  * Version negotiation message. Sent from the guest to the host.
321  * The guest is free to try different versions until the host
322  * accepts the version.
323  *
324  * pci_version: The protocol version requested.
325  * is_last_attempt: If TRUE, this is the last version guest will request.
326  * reservedz: Reserved field, set to zero.
327  */
328 
329 struct pci_version_request {
330 	struct pci_message message_type;
331 	u32 protocol_version;
332 } __packed;
333 
334 /*
335  * Bus D0 Entry.  This is sent from the guest to the host when the virtual
336  * bus (PCI Express port) is ready for action.
337  */
338 
339 struct pci_bus_d0_entry {
340 	struct pci_message message_type;
341 	u32 reserved;
342 	u64 mmio_base;
343 } __packed;
344 
345 struct pci_bus_relations {
346 	struct pci_incoming_message incoming;
347 	u32 device_count;
348 	struct pci_function_description func[];
349 } __packed;
350 
351 struct pci_bus_relations2 {
352 	struct pci_incoming_message incoming;
353 	u32 device_count;
354 	struct pci_function_description2 func[];
355 } __packed;
356 
357 struct pci_q_res_req_response {
358 	struct vmpacket_descriptor hdr;
359 	s32 status;			/* negative values are failures */
360 	u32 probed_bar[PCI_STD_NUM_BARS];
361 } __packed;
362 
363 struct pci_set_power {
364 	struct pci_message message_type;
365 	union win_slot_encoding wslot;
366 	u32 power_state;		/* In Windows terms */
367 	u32 reserved;
368 } __packed;
369 
370 struct pci_set_power_response {
371 	struct vmpacket_descriptor hdr;
372 	s32 status;			/* negative values are failures */
373 	union win_slot_encoding wslot;
374 	u32 resultant_state;		/* In Windows terms */
375 	u32 reserved;
376 } __packed;
377 
378 struct pci_resources_assigned {
379 	struct pci_message message_type;
380 	union win_slot_encoding wslot;
381 	u8 memory_range[0x14][6];	/* not used here */
382 	u32 msi_descriptors;
383 	u32 reserved[4];
384 } __packed;
385 
386 struct pci_resources_assigned2 {
387 	struct pci_message message_type;
388 	union win_slot_encoding wslot;
389 	u8 memory_range[0x14][6];	/* not used here */
390 	u32 msi_descriptor_count;
391 	u8 reserved[70];
392 } __packed;
393 
394 struct pci_create_interrupt {
395 	struct pci_message message_type;
396 	union win_slot_encoding wslot;
397 	struct hv_msi_desc int_desc;
398 } __packed;
399 
400 struct pci_create_int_response {
401 	struct pci_response response;
402 	u32 reserved;
403 	struct tran_int_desc int_desc;
404 } __packed;
405 
406 struct pci_create_interrupt2 {
407 	struct pci_message message_type;
408 	union win_slot_encoding wslot;
409 	struct hv_msi_desc2 int_desc;
410 } __packed;
411 
412 struct pci_create_interrupt3 {
413 	struct pci_message message_type;
414 	union win_slot_encoding wslot;
415 	struct hv_msi_desc3 int_desc;
416 } __packed;
417 
418 struct pci_delete_interrupt {
419 	struct pci_message message_type;
420 	union win_slot_encoding wslot;
421 	struct tran_int_desc int_desc;
422 } __packed;
423 
424 /*
425  * Note: the VM must pass a valid block id, wslot and bytes_requested.
426  */
427 struct pci_read_block {
428 	struct pci_message message_type;
429 	u32 block_id;
430 	union win_slot_encoding wslot;
431 	u32 bytes_requested;
432 } __packed;
433 
434 struct pci_read_block_response {
435 	struct vmpacket_descriptor hdr;
436 	u32 status;
437 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
438 } __packed;
439 
440 /*
441  * Note: the VM must pass a valid block id, wslot and byte_count.
442  */
443 struct pci_write_block {
444 	struct pci_message message_type;
445 	u32 block_id;
446 	union win_slot_encoding wslot;
447 	u32 byte_count;
448 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
449 } __packed;
450 
451 struct pci_dev_inval_block {
452 	struct pci_incoming_message incoming;
453 	union win_slot_encoding wslot;
454 	u64 block_mask;
455 } __packed;
456 
457 struct pci_dev_incoming {
458 	struct pci_incoming_message incoming;
459 	union win_slot_encoding wslot;
460 } __packed;
461 
462 struct pci_eject_response {
463 	struct pci_message message_type;
464 	union win_slot_encoding wslot;
465 	u32 status;
466 } __packed;
467 
468 static int pci_ring_size = (4 * PAGE_SIZE);
469 
470 /*
471  * Driver specific state.
472  */
473 
474 enum hv_pcibus_state {
475 	hv_pcibus_init = 0,
476 	hv_pcibus_probed,
477 	hv_pcibus_installed,
478 	hv_pcibus_removing,
479 	hv_pcibus_maximum
480 };
481 
482 struct hv_pcibus_device {
483 #ifdef CONFIG_X86
484 	struct pci_sysdata sysdata;
485 #elif defined(CONFIG_ARM64)
486 	struct pci_config_window sysdata;
487 #endif
488 	struct pci_host_bridge *bridge;
489 	struct fwnode_handle *fwnode;
490 	/* Protocol version negotiated with the host */
491 	enum pci_protocol_version_t protocol_version;
492 
493 	struct mutex state_lock;
494 	enum hv_pcibus_state state;
495 
496 	struct hv_device *hdev;
497 	resource_size_t low_mmio_space;
498 	resource_size_t high_mmio_space;
499 	struct resource *mem_config;
500 	struct resource *low_mmio_res;
501 	struct resource *high_mmio_res;
502 	struct completion *survey_event;
503 	struct pci_bus *pci_bus;
504 	spinlock_t config_lock;	/* Avoid two threads writing index page */
505 	spinlock_t device_list_lock;	/* Protect lists below */
506 	void __iomem *cfg_addr;
507 
508 	struct list_head children;
509 	struct list_head dr_list;
510 
511 	struct msi_domain_info msi_info;
512 	struct irq_domain *irq_domain;
513 
514 	struct workqueue_struct *wq;
515 
516 	/* Highest slot of child device with resources allocated */
517 	int wslot_res_allocated;
518 	bool use_calls; /* Use hypercalls to access mmio cfg space */
519 };
520 
521 /*
522  * Tracks "Device Relations" messages from the host, which must be both
523  * processed in order and deferred so that they don't run in the context
524  * of the incoming packet callback.
525  */
526 struct hv_dr_work {
527 	struct work_struct wrk;
528 	struct hv_pcibus_device *bus;
529 };
530 
531 struct hv_pcidev_description {
532 	u16	v_id;	/* vendor ID */
533 	u16	d_id;	/* device ID */
534 	u8	rev;
535 	u8	prog_intf;
536 	u8	subclass;
537 	u8	base_class;
538 	u32	subsystem_id;
539 	union	win_slot_encoding win_slot;
540 	u32	ser;	/* serial number */
541 	u32	flags;
542 	u16	virtual_numa_node;
543 };
544 
545 struct hv_dr_state {
546 	struct list_head list_entry;
547 	u32 device_count;
548 	struct hv_pcidev_description func[] __counted_by(device_count);
549 };
550 
551 struct hv_pci_dev {
552 	/* List protected by pci_rescan_remove_lock */
553 	struct list_head list_entry;
554 	refcount_t refs;
555 	struct pci_slot *pci_slot;
556 	struct hv_pcidev_description desc;
557 	bool reported_missing;
558 	struct hv_pcibus_device *hbus;
559 	struct work_struct wrk;
560 
561 	void (*block_invalidate)(void *context, u64 block_mask);
562 	void *invalidate_context;
563 
564 	/*
565 	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
566 	 * read it back, for each of the BAR offsets within config space.
567 	 */
568 	u32 probed_bar[PCI_STD_NUM_BARS];
569 };
570 
571 struct hv_pci_compl {
572 	struct completion host_event;
573 	s32 completion_status;
574 };
575 
576 static void hv_pci_onchannelcallback(void *context);
577 
578 #ifdef CONFIG_X86
579 #define DELIVERY_MODE	APIC_DELIVERY_MODE_FIXED
580 #define FLOW_HANDLER	handle_edge_irq
581 #define FLOW_NAME	"edge"
582 
583 static int hv_pci_irqchip_init(void)
584 {
585 	return 0;
586 }
587 
588 static struct irq_domain *hv_pci_get_root_domain(void)
589 {
590 	return x86_vector_domain;
591 }
592 
593 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
594 {
595 	struct irq_cfg *cfg = irqd_cfg(data);
596 
597 	return cfg->vector;
598 }
599 
600 #define hv_msi_prepare		pci_msi_prepare
601 
602 /**
603  * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
604  * affinity.
605  * @data:	Describes the IRQ
606  *
607  * Build new a destination for the MSI and make a hypercall to
608  * update the Interrupt Redirection Table. "Device Logical ID"
609  * is built out of this PCI bus's instance GUID and the function
610  * number of the device.
611  */
612 static void hv_arch_irq_unmask(struct irq_data *data)
613 {
614 	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
615 	struct hv_retarget_device_interrupt *params;
616 	struct tran_int_desc *int_desc;
617 	struct hv_pcibus_device *hbus;
618 	const struct cpumask *dest;
619 	cpumask_var_t tmp;
620 	struct pci_bus *pbus;
621 	struct pci_dev *pdev;
622 	unsigned long flags;
623 	u32 var_size = 0;
624 	int cpu, nr_bank;
625 	u64 res;
626 
627 	dest = irq_data_get_effective_affinity_mask(data);
628 	pdev = msi_desc_to_pci_dev(msi_desc);
629 	pbus = pdev->bus;
630 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
631 	int_desc = data->chip_data;
632 	if (!int_desc) {
633 		dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
634 			 __func__, data->irq);
635 		return;
636 	}
637 
638 	local_irq_save(flags);
639 
640 	params = *this_cpu_ptr(hyperv_pcpu_input_arg);
641 	memset(params, 0, sizeof(*params));
642 	params->partition_id = HV_PARTITION_ID_SELF;
643 	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
644 	params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
645 	params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
646 	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
647 			   (hbus->hdev->dev_instance.b[4] << 16) |
648 			   (hbus->hdev->dev_instance.b[7] << 8) |
649 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
650 			   PCI_FUNC(pdev->devfn);
651 	params->int_target.vector = hv_msi_get_int_vector(data);
652 
653 	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
654 		/*
655 		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
656 		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
657 		 * with >64 VP support.
658 		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
659 		 * is not sufficient for this hypercall.
660 		 */
661 		params->int_target.flags |=
662 			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
663 
664 		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
665 			res = 1;
666 			goto out;
667 		}
668 
669 		cpumask_and(tmp, dest, cpu_online_mask);
670 		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
671 		free_cpumask_var(tmp);
672 
673 		if (nr_bank <= 0) {
674 			res = 1;
675 			goto out;
676 		}
677 
678 		/*
679 		 * var-sized hypercall, var-size starts after vp_mask (thus
680 		 * vp_set.format does not count, but vp_set.valid_bank_mask
681 		 * does).
682 		 */
683 		var_size = 1 + nr_bank;
684 	} else {
685 		for_each_cpu_and(cpu, dest, cpu_online_mask) {
686 			params->int_target.vp_mask |=
687 				(1ULL << hv_cpu_number_to_vp_number(cpu));
688 		}
689 	}
690 
691 	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
692 			      params, NULL);
693 
694 out:
695 	local_irq_restore(flags);
696 
697 	/*
698 	 * During hibernation, when a CPU is offlined, the kernel tries
699 	 * to move the interrupt to the remaining CPUs that haven't
700 	 * been offlined yet. In this case, the below hv_do_hypercall()
701 	 * always fails since the vmbus channel has been closed:
702 	 * refer to cpu_disable_common() -> fixup_irqs() ->
703 	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
704 	 *
705 	 * Suppress the error message for hibernation because the failure
706 	 * during hibernation does not matter (at this time all the devices
707 	 * have been frozen). Note: the correct affinity info is still updated
708 	 * into the irqdata data structure in migrate_one_irq() ->
709 	 * irq_do_set_affinity(), so later when the VM resumes,
710 	 * hv_pci_restore_msi_state() is able to correctly restore the
711 	 * interrupt with the correct affinity.
712 	 */
713 	if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
714 		dev_err(&hbus->hdev->device,
715 			"%s() failed: %#llx", __func__, res);
716 }
717 #elif defined(CONFIG_ARM64)
718 /*
719  * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
720  * of room at the start to allow for SPIs to be specified through ACPI and
721  * starting with a power of two to satisfy power of 2 multi-MSI requirement.
722  */
723 #define HV_PCI_MSI_SPI_START	64
724 #define HV_PCI_MSI_SPI_NR	(1020 - HV_PCI_MSI_SPI_START)
725 #define DELIVERY_MODE		0
726 #define FLOW_HANDLER		NULL
727 #define FLOW_NAME		NULL
728 #define hv_msi_prepare		NULL
729 
730 struct hv_pci_chip_data {
731 	DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
732 	struct mutex	map_lock;
733 };
734 
735 /* Hyper-V vPCI MSI GIC IRQ domain */
736 static struct irq_domain *hv_msi_gic_irq_domain;
737 
738 /* Hyper-V PCI MSI IRQ chip */
739 static struct irq_chip hv_arm64_msi_irq_chip = {
740 	.name = "MSI",
741 	.irq_set_affinity = irq_chip_set_affinity_parent,
742 	.irq_eoi = irq_chip_eoi_parent,
743 	.irq_mask = irq_chip_mask_parent,
744 	.irq_unmask = irq_chip_unmask_parent
745 };
746 
747 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
748 {
749 	return irqd->parent_data->hwirq;
750 }
751 
752 /*
753  * @nr_bm_irqs:		Indicates the number of IRQs that were allocated from
754  *			the bitmap.
755  * @nr_dom_irqs:	Indicates the number of IRQs that were allocated from
756  *			the parent domain.
757  */
758 static void hv_pci_vec_irq_free(struct irq_domain *domain,
759 				unsigned int virq,
760 				unsigned int nr_bm_irqs,
761 				unsigned int nr_dom_irqs)
762 {
763 	struct hv_pci_chip_data *chip_data = domain->host_data;
764 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
765 	int first = d->hwirq - HV_PCI_MSI_SPI_START;
766 	int i;
767 
768 	mutex_lock(&chip_data->map_lock);
769 	bitmap_release_region(chip_data->spi_map,
770 			      first,
771 			      get_count_order(nr_bm_irqs));
772 	mutex_unlock(&chip_data->map_lock);
773 	for (i = 0; i < nr_dom_irqs; i++) {
774 		if (i)
775 			d = irq_domain_get_irq_data(domain, virq + i);
776 		irq_domain_reset_irq_data(d);
777 	}
778 
779 	irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
780 }
781 
782 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
783 				       unsigned int virq,
784 				       unsigned int nr_irqs)
785 {
786 	hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
787 }
788 
789 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
790 				       unsigned int nr_irqs,
791 				       irq_hw_number_t *hwirq)
792 {
793 	struct hv_pci_chip_data *chip_data = domain->host_data;
794 	int index;
795 
796 	/* Find and allocate region from the SPI bitmap */
797 	mutex_lock(&chip_data->map_lock);
798 	index = bitmap_find_free_region(chip_data->spi_map,
799 					HV_PCI_MSI_SPI_NR,
800 					get_count_order(nr_irqs));
801 	mutex_unlock(&chip_data->map_lock);
802 	if (index < 0)
803 		return -ENOSPC;
804 
805 	*hwirq = index + HV_PCI_MSI_SPI_START;
806 
807 	return 0;
808 }
809 
810 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
811 					   unsigned int virq,
812 					   irq_hw_number_t hwirq)
813 {
814 	struct irq_fwspec fwspec;
815 	struct irq_data *d;
816 	int ret;
817 
818 	fwspec.fwnode = domain->parent->fwnode;
819 	fwspec.param_count = 2;
820 	fwspec.param[0] = hwirq;
821 	fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
822 
823 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
824 	if (ret)
825 		return ret;
826 
827 	/*
828 	 * Since the interrupt specifier is not coming from ACPI or DT, the
829 	 * trigger type will need to be set explicitly. Otherwise, it will be
830 	 * set to whatever is in the GIC configuration.
831 	 */
832 	d = irq_domain_get_irq_data(domain->parent, virq);
833 
834 	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
835 }
836 
837 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
838 				       unsigned int virq, unsigned int nr_irqs,
839 				       void *args)
840 {
841 	irq_hw_number_t hwirq;
842 	unsigned int i;
843 	int ret;
844 
845 	ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
846 	if (ret)
847 		return ret;
848 
849 	for (i = 0; i < nr_irqs; i++) {
850 		ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
851 						      hwirq + i);
852 		if (ret) {
853 			hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
854 			return ret;
855 		}
856 
857 		irq_domain_set_hwirq_and_chip(domain, virq + i,
858 					      hwirq + i,
859 					      &hv_arm64_msi_irq_chip,
860 					      domain->host_data);
861 		pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
862 	}
863 
864 	return 0;
865 }
866 
867 /*
868  * Pick the first cpu as the irq affinity that can be temporarily used for
869  * composing MSI from the hypervisor. GIC will eventually set the right
870  * affinity for the irq and the 'unmask' will retarget the interrupt to that
871  * cpu.
872  */
873 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
874 					  struct irq_data *irqd, bool reserve)
875 {
876 	int cpu = cpumask_first(cpu_present_mask);
877 
878 	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
879 
880 	return 0;
881 }
882 
883 static const struct irq_domain_ops hv_pci_domain_ops = {
884 	.alloc	= hv_pci_vec_irq_domain_alloc,
885 	.free	= hv_pci_vec_irq_domain_free,
886 	.activate = hv_pci_vec_irq_domain_activate,
887 };
888 
889 static int hv_pci_irqchip_init(void)
890 {
891 	static struct hv_pci_chip_data *chip_data;
892 	struct fwnode_handle *fn = NULL;
893 	int ret = -ENOMEM;
894 
895 	chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
896 	if (!chip_data)
897 		return ret;
898 
899 	mutex_init(&chip_data->map_lock);
900 	fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
901 	if (!fn)
902 		goto free_chip;
903 
904 	/*
905 	 * IRQ domain once enabled, should not be removed since there is no
906 	 * way to ensure that all the corresponding devices are also gone and
907 	 * no interrupts will be generated.
908 	 */
909 	hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
910 							  fn, &hv_pci_domain_ops,
911 							  chip_data);
912 
913 	if (!hv_msi_gic_irq_domain) {
914 		pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
915 		goto free_chip;
916 	}
917 
918 	return 0;
919 
920 free_chip:
921 	kfree(chip_data);
922 	if (fn)
923 		irq_domain_free_fwnode(fn);
924 
925 	return ret;
926 }
927 
928 static struct irq_domain *hv_pci_get_root_domain(void)
929 {
930 	return hv_msi_gic_irq_domain;
931 }
932 
933 /*
934  * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
935  * registers which Hyper-V already supports, so no hypercall needed.
936  */
937 static void hv_arch_irq_unmask(struct irq_data *data) { }
938 #endif /* CONFIG_ARM64 */
939 
940 /**
941  * hv_pci_generic_compl() - Invoked for a completion packet
942  * @context:		Set up by the sender of the packet.
943  * @resp:		The response packet
944  * @resp_packet_size:	Size in bytes of the packet
945  *
946  * This function is used to trigger an event and report status
947  * for any message for which the completion packet contains a
948  * status and nothing else.
949  */
950 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
951 				 int resp_packet_size)
952 {
953 	struct hv_pci_compl *comp_pkt = context;
954 
955 	comp_pkt->completion_status = resp->status;
956 	complete(&comp_pkt->host_event);
957 }
958 
959 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
960 						u32 wslot);
961 
962 static void get_pcichild(struct hv_pci_dev *hpdev)
963 {
964 	refcount_inc(&hpdev->refs);
965 }
966 
967 static void put_pcichild(struct hv_pci_dev *hpdev)
968 {
969 	if (refcount_dec_and_test(&hpdev->refs))
970 		kfree(hpdev);
971 }
972 
973 /*
974  * There is no good way to get notified from vmbus_onoffer_rescind(),
975  * so let's use polling here, since this is not a hot path.
976  */
977 static int wait_for_response(struct hv_device *hdev,
978 			     struct completion *comp)
979 {
980 	while (true) {
981 		if (hdev->channel->rescind) {
982 			dev_warn_once(&hdev->device, "The device is gone.\n");
983 			return -ENODEV;
984 		}
985 
986 		if (wait_for_completion_timeout(comp, HZ / 10))
987 			break;
988 	}
989 
990 	return 0;
991 }
992 
993 /**
994  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
995  * @devfn:	The Linux representation of PCI slot
996  *
997  * Windows uses a slightly different representation of PCI slot.
998  *
999  * Return: The Windows representation
1000  */
1001 static u32 devfn_to_wslot(int devfn)
1002 {
1003 	union win_slot_encoding wslot;
1004 
1005 	wslot.slot = 0;
1006 	wslot.bits.dev = PCI_SLOT(devfn);
1007 	wslot.bits.func = PCI_FUNC(devfn);
1008 
1009 	return wslot.slot;
1010 }
1011 
1012 /**
1013  * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1014  * @wslot:	The Windows representation of PCI slot
1015  *
1016  * Windows uses a slightly different representation of PCI slot.
1017  *
1018  * Return: The Linux representation
1019  */
1020 static int wslot_to_devfn(u32 wslot)
1021 {
1022 	union win_slot_encoding slot_no;
1023 
1024 	slot_no.slot = wslot;
1025 	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1026 }
1027 
1028 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1029 {
1030 	struct hv_mmio_read_input *in;
1031 	struct hv_mmio_read_output *out;
1032 	u64 ret;
1033 
1034 	/*
1035 	 * Must be called with interrupts disabled so it is safe
1036 	 * to use the per-cpu input argument page.  Use it for
1037 	 * both input and output.
1038 	 */
1039 	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1040 	out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1041 	in->gpa = gpa;
1042 	in->size = size;
1043 
1044 	ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1045 	if (hv_result_success(ret)) {
1046 		switch (size) {
1047 		case 1:
1048 			*val = *(u8 *)(out->data);
1049 			break;
1050 		case 2:
1051 			*val = *(u16 *)(out->data);
1052 			break;
1053 		default:
1054 			*val = *(u32 *)(out->data);
1055 			break;
1056 		}
1057 	} else
1058 		dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1059 				ret, gpa, size);
1060 }
1061 
1062 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1063 {
1064 	struct hv_mmio_write_input *in;
1065 	u64 ret;
1066 
1067 	/*
1068 	 * Must be called with interrupts disabled so it is safe
1069 	 * to use the per-cpu input argument memory.
1070 	 */
1071 	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1072 	in->gpa = gpa;
1073 	in->size = size;
1074 	switch (size) {
1075 	case 1:
1076 		*(u8 *)(in->data) = val;
1077 		break;
1078 	case 2:
1079 		*(u16 *)(in->data) = val;
1080 		break;
1081 	default:
1082 		*(u32 *)(in->data) = val;
1083 		break;
1084 	}
1085 
1086 	ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1087 	if (!hv_result_success(ret))
1088 		dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1089 				ret, gpa, size);
1090 }
1091 
1092 /*
1093  * PCI Configuration Space for these root PCI buses is implemented as a pair
1094  * of pages in memory-mapped I/O space.  Writing to the first page chooses
1095  * the PCI function being written or read.  Once the first page has been
1096  * written to, the following page maps in the entire configuration space of
1097  * the function.
1098  */
1099 
1100 /**
1101  * _hv_pcifront_read_config() - Internal PCI config read
1102  * @hpdev:	The PCI driver's representation of the device
1103  * @where:	Offset within config space
1104  * @size:	Size of the transfer
1105  * @val:	Pointer to the buffer receiving the data
1106  */
1107 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1108 				     int size, u32 *val)
1109 {
1110 	struct hv_pcibus_device *hbus = hpdev->hbus;
1111 	struct device *dev = &hbus->hdev->device;
1112 	int offset = where + CFG_PAGE_OFFSET;
1113 	unsigned long flags;
1114 
1115 	/*
1116 	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1117 	 */
1118 	if (where + size <= PCI_COMMAND) {
1119 		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1120 	} else if (where >= PCI_CLASS_REVISION && where + size <=
1121 		   PCI_CACHE_LINE_SIZE) {
1122 		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1123 		       PCI_CLASS_REVISION, size);
1124 	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1125 		   PCI_ROM_ADDRESS) {
1126 		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1127 		       PCI_SUBSYSTEM_VENDOR_ID, size);
1128 	} else if (where >= PCI_ROM_ADDRESS && where + size <=
1129 		   PCI_CAPABILITY_LIST) {
1130 		/* ROM BARs are unimplemented */
1131 		*val = 0;
1132 	} else if (where >= PCI_INTERRUPT_LINE && where + size <=
1133 		   PCI_INTERRUPT_PIN) {
1134 		/*
1135 		 * Interrupt Line and Interrupt PIN are hard-wired to zero
1136 		 * because this front-end only supports message-signaled
1137 		 * interrupts.
1138 		 */
1139 		*val = 0;
1140 	} else if (where + size <= CFG_PAGE_SIZE) {
1141 
1142 		spin_lock_irqsave(&hbus->config_lock, flags);
1143 		if (hbus->use_calls) {
1144 			phys_addr_t addr = hbus->mem_config->start + offset;
1145 
1146 			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1147 						hpdev->desc.win_slot.slot);
1148 			hv_pci_read_mmio(dev, addr, size, val);
1149 		} else {
1150 			void __iomem *addr = hbus->cfg_addr + offset;
1151 
1152 			/* Choose the function to be read. (See comment above) */
1153 			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1154 			/* Make sure the function was chosen before reading. */
1155 			mb();
1156 			/* Read from that function's config space. */
1157 			switch (size) {
1158 			case 1:
1159 				*val = readb(addr);
1160 				break;
1161 			case 2:
1162 				*val = readw(addr);
1163 				break;
1164 			default:
1165 				*val = readl(addr);
1166 				break;
1167 			}
1168 			/*
1169 			 * Make sure the read was done before we release the
1170 			 * spinlock allowing consecutive reads/writes.
1171 			 */
1172 			mb();
1173 		}
1174 		spin_unlock_irqrestore(&hbus->config_lock, flags);
1175 	} else {
1176 		dev_err(dev, "Attempt to read beyond a function's config space.\n");
1177 	}
1178 }
1179 
1180 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1181 {
1182 	struct hv_pcibus_device *hbus = hpdev->hbus;
1183 	struct device *dev = &hbus->hdev->device;
1184 	u32 val;
1185 	u16 ret;
1186 	unsigned long flags;
1187 
1188 	spin_lock_irqsave(&hbus->config_lock, flags);
1189 
1190 	if (hbus->use_calls) {
1191 		phys_addr_t addr = hbus->mem_config->start +
1192 					 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1193 
1194 		hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1195 					hpdev->desc.win_slot.slot);
1196 		hv_pci_read_mmio(dev, addr, 2, &val);
1197 		ret = val;  /* Truncates to 16 bits */
1198 	} else {
1199 		void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1200 					     PCI_VENDOR_ID;
1201 		/* Choose the function to be read. (See comment above) */
1202 		writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1203 		/* Make sure the function was chosen before we start reading. */
1204 		mb();
1205 		/* Read from that function's config space. */
1206 		ret = readw(addr);
1207 		/*
1208 		 * mb() is not required here, because the
1209 		 * spin_unlock_irqrestore() is a barrier.
1210 		 */
1211 	}
1212 
1213 	spin_unlock_irqrestore(&hbus->config_lock, flags);
1214 
1215 	return ret;
1216 }
1217 
1218 /**
1219  * _hv_pcifront_write_config() - Internal PCI config write
1220  * @hpdev:	The PCI driver's representation of the device
1221  * @where:	Offset within config space
1222  * @size:	Size of the transfer
1223  * @val:	The data being transferred
1224  */
1225 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1226 				      int size, u32 val)
1227 {
1228 	struct hv_pcibus_device *hbus = hpdev->hbus;
1229 	struct device *dev = &hbus->hdev->device;
1230 	int offset = where + CFG_PAGE_OFFSET;
1231 	unsigned long flags;
1232 
1233 	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1234 	    where + size <= PCI_CAPABILITY_LIST) {
1235 		/* SSIDs and ROM BARs are read-only */
1236 	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1237 		spin_lock_irqsave(&hbus->config_lock, flags);
1238 
1239 		if (hbus->use_calls) {
1240 			phys_addr_t addr = hbus->mem_config->start + offset;
1241 
1242 			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1243 						hpdev->desc.win_slot.slot);
1244 			hv_pci_write_mmio(dev, addr, size, val);
1245 		} else {
1246 			void __iomem *addr = hbus->cfg_addr + offset;
1247 
1248 			/* Choose the function to write. (See comment above) */
1249 			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1250 			/* Make sure the function was chosen before writing. */
1251 			wmb();
1252 			/* Write to that function's config space. */
1253 			switch (size) {
1254 			case 1:
1255 				writeb(val, addr);
1256 				break;
1257 			case 2:
1258 				writew(val, addr);
1259 				break;
1260 			default:
1261 				writel(val, addr);
1262 				break;
1263 			}
1264 			/*
1265 			 * Make sure the write was done before we release the
1266 			 * spinlock allowing consecutive reads/writes.
1267 			 */
1268 			mb();
1269 		}
1270 		spin_unlock_irqrestore(&hbus->config_lock, flags);
1271 	} else {
1272 		dev_err(dev, "Attempt to write beyond a function's config space.\n");
1273 	}
1274 }
1275 
1276 /**
1277  * hv_pcifront_read_config() - Read configuration space
1278  * @bus: PCI Bus structure
1279  * @devfn: Device/function
1280  * @where: Offset from base
1281  * @size: Byte/word/dword
1282  * @val: Value to be read
1283  *
1284  * Return: PCIBIOS_SUCCESSFUL on success
1285  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1286  */
1287 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1288 				   int where, int size, u32 *val)
1289 {
1290 	struct hv_pcibus_device *hbus =
1291 		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1292 	struct hv_pci_dev *hpdev;
1293 
1294 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1295 	if (!hpdev)
1296 		return PCIBIOS_DEVICE_NOT_FOUND;
1297 
1298 	_hv_pcifront_read_config(hpdev, where, size, val);
1299 
1300 	put_pcichild(hpdev);
1301 	return PCIBIOS_SUCCESSFUL;
1302 }
1303 
1304 /**
1305  * hv_pcifront_write_config() - Write configuration space
1306  * @bus: PCI Bus structure
1307  * @devfn: Device/function
1308  * @where: Offset from base
1309  * @size: Byte/word/dword
1310  * @val: Value to be written to device
1311  *
1312  * Return: PCIBIOS_SUCCESSFUL on success
1313  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1314  */
1315 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1316 				    int where, int size, u32 val)
1317 {
1318 	struct hv_pcibus_device *hbus =
1319 	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1320 	struct hv_pci_dev *hpdev;
1321 
1322 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1323 	if (!hpdev)
1324 		return PCIBIOS_DEVICE_NOT_FOUND;
1325 
1326 	_hv_pcifront_write_config(hpdev, where, size, val);
1327 
1328 	put_pcichild(hpdev);
1329 	return PCIBIOS_SUCCESSFUL;
1330 }
1331 
1332 /* PCIe operations */
1333 static struct pci_ops hv_pcifront_ops = {
1334 	.read  = hv_pcifront_read_config,
1335 	.write = hv_pcifront_write_config,
1336 };
1337 
1338 /*
1339  * Paravirtual backchannel
1340  *
1341  * Hyper-V SR-IOV provides a backchannel mechanism in software for
1342  * communication between a VF driver and a PF driver.  These
1343  * "configuration blocks" are similar in concept to PCI configuration space,
1344  * but instead of doing reads and writes in 32-bit chunks through a very slow
1345  * path, packets of up to 128 bytes can be sent or received asynchronously.
1346  *
1347  * Nearly every SR-IOV device contains just such a communications channel in
1348  * hardware, so using this one in software is usually optional.  Using the
1349  * software channel, however, allows driver implementers to leverage software
1350  * tools that fuzz the communications channel looking for vulnerabilities.
1351  *
1352  * The usage model for these packets puts the responsibility for reading or
1353  * writing on the VF driver.  The VF driver sends a read or a write packet,
1354  * indicating which "block" is being referred to by number.
1355  *
1356  * If the PF driver wishes to initiate communication, it can "invalidate" one or
1357  * more of the first 64 blocks.  This invalidation is delivered via a callback
1358  * supplied by the VF driver by this driver.
1359  *
1360  * No protocol is implied, except that supplied by the PF and VF drivers.
1361  */
1362 
1363 struct hv_read_config_compl {
1364 	struct hv_pci_compl comp_pkt;
1365 	void *buf;
1366 	unsigned int len;
1367 	unsigned int bytes_returned;
1368 };
1369 
1370 /**
1371  * hv_pci_read_config_compl() - Invoked when a response packet
1372  * for a read config block operation arrives.
1373  * @context:		Identifies the read config operation
1374  * @resp:		The response packet itself
1375  * @resp_packet_size:	Size in bytes of the response packet
1376  */
1377 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1378 				     int resp_packet_size)
1379 {
1380 	struct hv_read_config_compl *comp = context;
1381 	struct pci_read_block_response *read_resp =
1382 		(struct pci_read_block_response *)resp;
1383 	unsigned int data_len, hdr_len;
1384 
1385 	hdr_len = offsetof(struct pci_read_block_response, bytes);
1386 	if (resp_packet_size < hdr_len) {
1387 		comp->comp_pkt.completion_status = -1;
1388 		goto out;
1389 	}
1390 
1391 	data_len = resp_packet_size - hdr_len;
1392 	if (data_len > 0 && read_resp->status == 0) {
1393 		comp->bytes_returned = min(comp->len, data_len);
1394 		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1395 	} else {
1396 		comp->bytes_returned = 0;
1397 	}
1398 
1399 	comp->comp_pkt.completion_status = read_resp->status;
1400 out:
1401 	complete(&comp->comp_pkt.host_event);
1402 }
1403 
1404 /**
1405  * hv_read_config_block() - Sends a read config block request to
1406  * the back-end driver running in the Hyper-V parent partition.
1407  * @pdev:		The PCI driver's representation for this device.
1408  * @buf:		Buffer into which the config block will be copied.
1409  * @len:		Size in bytes of buf.
1410  * @block_id:		Identifies the config block which has been requested.
1411  * @bytes_returned:	Size which came back from the back-end driver.
1412  *
1413  * Return: 0 on success, -errno on failure
1414  */
1415 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1416 				unsigned int len, unsigned int block_id,
1417 				unsigned int *bytes_returned)
1418 {
1419 	struct hv_pcibus_device *hbus =
1420 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1421 			     sysdata);
1422 	struct {
1423 		struct pci_packet pkt;
1424 		char buf[sizeof(struct pci_read_block)];
1425 	} pkt;
1426 	struct hv_read_config_compl comp_pkt;
1427 	struct pci_read_block *read_blk;
1428 	int ret;
1429 
1430 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1431 		return -EINVAL;
1432 
1433 	init_completion(&comp_pkt.comp_pkt.host_event);
1434 	comp_pkt.buf = buf;
1435 	comp_pkt.len = len;
1436 
1437 	memset(&pkt, 0, sizeof(pkt));
1438 	pkt.pkt.completion_func = hv_pci_read_config_compl;
1439 	pkt.pkt.compl_ctxt = &comp_pkt;
1440 	read_blk = (struct pci_read_block *)&pkt.pkt.message;
1441 	read_blk->message_type.type = PCI_READ_BLOCK;
1442 	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1443 	read_blk->block_id = block_id;
1444 	read_blk->bytes_requested = len;
1445 
1446 	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1447 			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
1448 			       VM_PKT_DATA_INBAND,
1449 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1450 	if (ret)
1451 		return ret;
1452 
1453 	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1454 	if (ret)
1455 		return ret;
1456 
1457 	if (comp_pkt.comp_pkt.completion_status != 0 ||
1458 	    comp_pkt.bytes_returned == 0) {
1459 		dev_err(&hbus->hdev->device,
1460 			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
1461 			comp_pkt.comp_pkt.completion_status,
1462 			comp_pkt.bytes_returned);
1463 		return -EIO;
1464 	}
1465 
1466 	*bytes_returned = comp_pkt.bytes_returned;
1467 	return 0;
1468 }
1469 
1470 /**
1471  * hv_pci_write_config_compl() - Invoked when a response packet for a write
1472  * config block operation arrives.
1473  * @context:		Identifies the write config operation
1474  * @resp:		The response packet itself
1475  * @resp_packet_size:	Size in bytes of the response packet
1476  */
1477 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1478 				      int resp_packet_size)
1479 {
1480 	struct hv_pci_compl *comp_pkt = context;
1481 
1482 	comp_pkt->completion_status = resp->status;
1483 	complete(&comp_pkt->host_event);
1484 }
1485 
1486 /**
1487  * hv_write_config_block() - Sends a write config block request to the
1488  * back-end driver running in the Hyper-V parent partition.
1489  * @pdev:		The PCI driver's representation for this device.
1490  * @buf:		Buffer from which the config block will	be copied.
1491  * @len:		Size in bytes of buf.
1492  * @block_id:		Identifies the config block which is being written.
1493  *
1494  * Return: 0 on success, -errno on failure
1495  */
1496 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1497 				unsigned int len, unsigned int block_id)
1498 {
1499 	struct hv_pcibus_device *hbus =
1500 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1501 			     sysdata);
1502 	struct {
1503 		struct pci_packet pkt;
1504 		char buf[sizeof(struct pci_write_block)];
1505 		u32 reserved;
1506 	} pkt;
1507 	struct hv_pci_compl comp_pkt;
1508 	struct pci_write_block *write_blk;
1509 	u32 pkt_size;
1510 	int ret;
1511 
1512 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1513 		return -EINVAL;
1514 
1515 	init_completion(&comp_pkt.host_event);
1516 
1517 	memset(&pkt, 0, sizeof(pkt));
1518 	pkt.pkt.completion_func = hv_pci_write_config_compl;
1519 	pkt.pkt.compl_ctxt = &comp_pkt;
1520 	write_blk = (struct pci_write_block *)&pkt.pkt.message;
1521 	write_blk->message_type.type = PCI_WRITE_BLOCK;
1522 	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1523 	write_blk->block_id = block_id;
1524 	write_blk->byte_count = len;
1525 	memcpy(write_blk->bytes, buf, len);
1526 	pkt_size = offsetof(struct pci_write_block, bytes) + len;
1527 	/*
1528 	 * This quirk is required on some hosts shipped around 2018, because
1529 	 * these hosts don't check the pkt_size correctly (new hosts have been
1530 	 * fixed since early 2019). The quirk is also safe on very old hosts
1531 	 * and new hosts, because, on them, what really matters is the length
1532 	 * specified in write_blk->byte_count.
1533 	 */
1534 	pkt_size += sizeof(pkt.reserved);
1535 
1536 	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1537 			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1538 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1539 	if (ret)
1540 		return ret;
1541 
1542 	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1543 	if (ret)
1544 		return ret;
1545 
1546 	if (comp_pkt.completion_status != 0) {
1547 		dev_err(&hbus->hdev->device,
1548 			"Write Config Block failed: 0x%x\n",
1549 			comp_pkt.completion_status);
1550 		return -EIO;
1551 	}
1552 
1553 	return 0;
1554 }
1555 
1556 /**
1557  * hv_register_block_invalidate() - Invoked when a config block invalidation
1558  * arrives from the back-end driver.
1559  * @pdev:		The PCI driver's representation for this device.
1560  * @context:		Identifies the device.
1561  * @block_invalidate:	Identifies all of the blocks being invalidated.
1562  *
1563  * Return: 0 on success, -errno on failure
1564  */
1565 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1566 					void (*block_invalidate)(void *context,
1567 								 u64 block_mask))
1568 {
1569 	struct hv_pcibus_device *hbus =
1570 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1571 			     sysdata);
1572 	struct hv_pci_dev *hpdev;
1573 
1574 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1575 	if (!hpdev)
1576 		return -ENODEV;
1577 
1578 	hpdev->block_invalidate = block_invalidate;
1579 	hpdev->invalidate_context = context;
1580 
1581 	put_pcichild(hpdev);
1582 	return 0;
1583 
1584 }
1585 
1586 /* Interrupt management hooks */
1587 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1588 			     struct tran_int_desc *int_desc)
1589 {
1590 	struct pci_delete_interrupt *int_pkt;
1591 	struct {
1592 		struct pci_packet pkt;
1593 		u8 buffer[sizeof(struct pci_delete_interrupt)];
1594 	} ctxt;
1595 
1596 	if (!int_desc->vector_count) {
1597 		kfree(int_desc);
1598 		return;
1599 	}
1600 	memset(&ctxt, 0, sizeof(ctxt));
1601 	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1602 	int_pkt->message_type.type =
1603 		PCI_DELETE_INTERRUPT_MESSAGE;
1604 	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1605 	int_pkt->int_desc = *int_desc;
1606 	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1607 			 0, VM_PKT_DATA_INBAND, 0);
1608 	kfree(int_desc);
1609 }
1610 
1611 /**
1612  * hv_msi_free() - Free the MSI.
1613  * @domain:	The interrupt domain pointer
1614  * @info:	Extra MSI-related context
1615  * @irq:	Identifies the IRQ.
1616  *
1617  * The Hyper-V parent partition and hypervisor are tracking the
1618  * messages that are in use, keeping the interrupt redirection
1619  * table up to date.  This callback sends a message that frees
1620  * the IRT entry and related tracking nonsense.
1621  */
1622 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1623 			unsigned int irq)
1624 {
1625 	struct hv_pcibus_device *hbus;
1626 	struct hv_pci_dev *hpdev;
1627 	struct pci_dev *pdev;
1628 	struct tran_int_desc *int_desc;
1629 	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1630 	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1631 
1632 	pdev = msi_desc_to_pci_dev(msi);
1633 	hbus = info->data;
1634 	int_desc = irq_data_get_irq_chip_data(irq_data);
1635 	if (!int_desc)
1636 		return;
1637 
1638 	irq_data->chip_data = NULL;
1639 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1640 	if (!hpdev) {
1641 		kfree(int_desc);
1642 		return;
1643 	}
1644 
1645 	hv_int_desc_free(hpdev, int_desc);
1646 	put_pcichild(hpdev);
1647 }
1648 
1649 static void hv_irq_mask(struct irq_data *data)
1650 {
1651 	pci_msi_mask_irq(data);
1652 	if (data->parent_data->chip->irq_mask)
1653 		irq_chip_mask_parent(data);
1654 }
1655 
1656 static void hv_irq_unmask(struct irq_data *data)
1657 {
1658 	hv_arch_irq_unmask(data);
1659 
1660 	if (data->parent_data->chip->irq_unmask)
1661 		irq_chip_unmask_parent(data);
1662 	pci_msi_unmask_irq(data);
1663 }
1664 
1665 struct compose_comp_ctxt {
1666 	struct hv_pci_compl comp_pkt;
1667 	struct tran_int_desc int_desc;
1668 };
1669 
1670 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1671 				 int resp_packet_size)
1672 {
1673 	struct compose_comp_ctxt *comp_pkt = context;
1674 	struct pci_create_int_response *int_resp =
1675 		(struct pci_create_int_response *)resp;
1676 
1677 	if (resp_packet_size < sizeof(*int_resp)) {
1678 		comp_pkt->comp_pkt.completion_status = -1;
1679 		goto out;
1680 	}
1681 	comp_pkt->comp_pkt.completion_status = resp->status;
1682 	comp_pkt->int_desc = int_resp->int_desc;
1683 out:
1684 	complete(&comp_pkt->comp_pkt.host_event);
1685 }
1686 
1687 static u32 hv_compose_msi_req_v1(
1688 	struct pci_create_interrupt *int_pkt,
1689 	u32 slot, u8 vector, u16 vector_count)
1690 {
1691 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1692 	int_pkt->wslot.slot = slot;
1693 	int_pkt->int_desc.vector = vector;
1694 	int_pkt->int_desc.vector_count = vector_count;
1695 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1696 
1697 	/*
1698 	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1699 	 * hv_irq_unmask().
1700 	 */
1701 	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1702 
1703 	return sizeof(*int_pkt);
1704 }
1705 
1706 /*
1707  * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1708  * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1709  * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1710  * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1711  * not irrelevant because Hyper-V chooses the physical CPU to handle the
1712  * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1713  * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1714  * but assigning too many vPCI device interrupts to the same pCPU can cause a
1715  * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1716  * to spread out the pCPUs that it selects.
1717  *
1718  * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1719  * to always return the same dummy vCPU, because a second call to
1720  * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1721  * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1722  * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1723  * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1724  * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1725  * the same pCPU, even though the vCPUs will be spread out by later calls
1726  * to hv_irq_unmask(), but that is the best we can do now.
1727  *
1728  * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1729  * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1730  * enhancement is planned for a future version. With that enhancement, the
1731  * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1732  * device will be spread across multiple pCPUs.
1733  */
1734 
1735 /*
1736  * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1737  * by subsequent retarget in hv_irq_unmask().
1738  */
1739 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1740 {
1741 	return cpumask_first_and(affinity, cpu_online_mask);
1742 }
1743 
1744 /*
1745  * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1746  */
1747 static int hv_compose_multi_msi_req_get_cpu(void)
1748 {
1749 	static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1750 
1751 	/* -1 means starting with CPU 0 */
1752 	static int cpu_next = -1;
1753 
1754 	unsigned long flags;
1755 	int cpu;
1756 
1757 	spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1758 
1759 	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
1760 				     false);
1761 	cpu = cpu_next;
1762 
1763 	spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1764 
1765 	return cpu;
1766 }
1767 
1768 static u32 hv_compose_msi_req_v2(
1769 	struct pci_create_interrupt2 *int_pkt, int cpu,
1770 	u32 slot, u8 vector, u16 vector_count)
1771 {
1772 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1773 	int_pkt->wslot.slot = slot;
1774 	int_pkt->int_desc.vector = vector;
1775 	int_pkt->int_desc.vector_count = vector_count;
1776 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1777 	int_pkt->int_desc.processor_array[0] =
1778 		hv_cpu_number_to_vp_number(cpu);
1779 	int_pkt->int_desc.processor_count = 1;
1780 
1781 	return sizeof(*int_pkt);
1782 }
1783 
1784 static u32 hv_compose_msi_req_v3(
1785 	struct pci_create_interrupt3 *int_pkt, int cpu,
1786 	u32 slot, u32 vector, u16 vector_count)
1787 {
1788 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1789 	int_pkt->wslot.slot = slot;
1790 	int_pkt->int_desc.vector = vector;
1791 	int_pkt->int_desc.reserved = 0;
1792 	int_pkt->int_desc.vector_count = vector_count;
1793 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1794 	int_pkt->int_desc.processor_array[0] =
1795 		hv_cpu_number_to_vp_number(cpu);
1796 	int_pkt->int_desc.processor_count = 1;
1797 
1798 	return sizeof(*int_pkt);
1799 }
1800 
1801 /**
1802  * hv_compose_msi_msg() - Supplies a valid MSI address/data
1803  * @data:	Everything about this MSI
1804  * @msg:	Buffer that is filled in by this function
1805  *
1806  * This function unpacks the IRQ looking for target CPU set, IDT
1807  * vector and mode and sends a message to the parent partition
1808  * asking for a mapping for that tuple in this partition.  The
1809  * response supplies a data value and address to which that data
1810  * should be written to trigger that interrupt.
1811  */
1812 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1813 {
1814 	struct hv_pcibus_device *hbus;
1815 	struct vmbus_channel *channel;
1816 	struct hv_pci_dev *hpdev;
1817 	struct pci_bus *pbus;
1818 	struct pci_dev *pdev;
1819 	const struct cpumask *dest;
1820 	struct compose_comp_ctxt comp;
1821 	struct tran_int_desc *int_desc;
1822 	struct msi_desc *msi_desc;
1823 	/*
1824 	 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1825 	 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1826 	 */
1827 	u16 vector_count;
1828 	u32 vector;
1829 	struct {
1830 		struct pci_packet pci_pkt;
1831 		union {
1832 			struct pci_create_interrupt v1;
1833 			struct pci_create_interrupt2 v2;
1834 			struct pci_create_interrupt3 v3;
1835 		} int_pkts;
1836 	} __packed ctxt;
1837 	bool multi_msi;
1838 	u64 trans_id;
1839 	u32 size;
1840 	int ret;
1841 	int cpu;
1842 
1843 	msi_desc  = irq_data_get_msi_desc(data);
1844 	multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1845 		    msi_desc->nvec_used > 1;
1846 
1847 	/* Reuse the previous allocation */
1848 	if (data->chip_data && multi_msi) {
1849 		int_desc = data->chip_data;
1850 		msg->address_hi = int_desc->address >> 32;
1851 		msg->address_lo = int_desc->address & 0xffffffff;
1852 		msg->data = int_desc->data;
1853 		return;
1854 	}
1855 
1856 	pdev = msi_desc_to_pci_dev(msi_desc);
1857 	dest = irq_data_get_effective_affinity_mask(data);
1858 	pbus = pdev->bus;
1859 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1860 	channel = hbus->hdev->channel;
1861 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1862 	if (!hpdev)
1863 		goto return_null_message;
1864 
1865 	/* Free any previous message that might have already been composed. */
1866 	if (data->chip_data && !multi_msi) {
1867 		int_desc = data->chip_data;
1868 		data->chip_data = NULL;
1869 		hv_int_desc_free(hpdev, int_desc);
1870 	}
1871 
1872 	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1873 	if (!int_desc)
1874 		goto drop_reference;
1875 
1876 	if (multi_msi) {
1877 		/*
1878 		 * If this is not the first MSI of Multi MSI, we already have
1879 		 * a mapping.  Can exit early.
1880 		 */
1881 		if (msi_desc->irq != data->irq) {
1882 			data->chip_data = int_desc;
1883 			int_desc->address = msi_desc->msg.address_lo |
1884 					    (u64)msi_desc->msg.address_hi << 32;
1885 			int_desc->data = msi_desc->msg.data +
1886 					 (data->irq - msi_desc->irq);
1887 			msg->address_hi = msi_desc->msg.address_hi;
1888 			msg->address_lo = msi_desc->msg.address_lo;
1889 			msg->data = int_desc->data;
1890 			put_pcichild(hpdev);
1891 			return;
1892 		}
1893 		/*
1894 		 * The vector we select here is a dummy value.  The correct
1895 		 * value gets sent to the hypervisor in unmask().  This needs
1896 		 * to be aligned with the count, and also not zero.  Multi-msi
1897 		 * is powers of 2 up to 32, so 32 will always work here.
1898 		 */
1899 		vector = 32;
1900 		vector_count = msi_desc->nvec_used;
1901 		cpu = hv_compose_multi_msi_req_get_cpu();
1902 	} else {
1903 		vector = hv_msi_get_int_vector(data);
1904 		vector_count = 1;
1905 		cpu = hv_compose_msi_req_get_cpu(dest);
1906 	}
1907 
1908 	/*
1909 	 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1910 	 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1911 	 * for better readability.
1912 	 */
1913 	memset(&ctxt, 0, sizeof(ctxt));
1914 	init_completion(&comp.comp_pkt.host_event);
1915 	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1916 	ctxt.pci_pkt.compl_ctxt = &comp;
1917 
1918 	switch (hbus->protocol_version) {
1919 	case PCI_PROTOCOL_VERSION_1_1:
1920 		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1921 					hpdev->desc.win_slot.slot,
1922 					(u8)vector,
1923 					vector_count);
1924 		break;
1925 
1926 	case PCI_PROTOCOL_VERSION_1_2:
1927 	case PCI_PROTOCOL_VERSION_1_3:
1928 		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1929 					cpu,
1930 					hpdev->desc.win_slot.slot,
1931 					(u8)vector,
1932 					vector_count);
1933 		break;
1934 
1935 	case PCI_PROTOCOL_VERSION_1_4:
1936 		size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
1937 					cpu,
1938 					hpdev->desc.win_slot.slot,
1939 					vector,
1940 					vector_count);
1941 		break;
1942 
1943 	default:
1944 		/* As we only negotiate protocol versions known to this driver,
1945 		 * this path should never hit. However, this is it not a hot
1946 		 * path so we print a message to aid future updates.
1947 		 */
1948 		dev_err(&hbus->hdev->device,
1949 			"Unexpected vPCI protocol, update driver.");
1950 		goto free_int_desc;
1951 	}
1952 
1953 	ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1954 				     size, (unsigned long)&ctxt.pci_pkt,
1955 				     &trans_id, VM_PKT_DATA_INBAND,
1956 				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1957 	if (ret) {
1958 		dev_err(&hbus->hdev->device,
1959 			"Sending request for interrupt failed: 0x%x",
1960 			comp.comp_pkt.completion_status);
1961 		goto free_int_desc;
1962 	}
1963 
1964 	/*
1965 	 * Prevents hv_pci_onchannelcallback() from running concurrently
1966 	 * in the tasklet.
1967 	 */
1968 	tasklet_disable_in_atomic(&channel->callback_event);
1969 
1970 	/*
1971 	 * Since this function is called with IRQ locks held, can't
1972 	 * do normal wait for completion; instead poll.
1973 	 */
1974 	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1975 		unsigned long flags;
1976 
1977 		/* 0xFFFF means an invalid PCI VENDOR ID. */
1978 		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1979 			dev_err_once(&hbus->hdev->device,
1980 				     "the device has gone\n");
1981 			goto enable_tasklet;
1982 		}
1983 
1984 		/*
1985 		 * Make sure that the ring buffer data structure doesn't get
1986 		 * freed while we dereference the ring buffer pointer.  Test
1987 		 * for the channel's onchannel_callback being NULL within a
1988 		 * sched_lock critical section.  See also the inline comments
1989 		 * in vmbus_reset_channel_cb().
1990 		 */
1991 		spin_lock_irqsave(&channel->sched_lock, flags);
1992 		if (unlikely(channel->onchannel_callback == NULL)) {
1993 			spin_unlock_irqrestore(&channel->sched_lock, flags);
1994 			goto enable_tasklet;
1995 		}
1996 		hv_pci_onchannelcallback(hbus);
1997 		spin_unlock_irqrestore(&channel->sched_lock, flags);
1998 
1999 		udelay(100);
2000 	}
2001 
2002 	tasklet_enable(&channel->callback_event);
2003 
2004 	if (comp.comp_pkt.completion_status < 0) {
2005 		dev_err(&hbus->hdev->device,
2006 			"Request for interrupt failed: 0x%x",
2007 			comp.comp_pkt.completion_status);
2008 		goto free_int_desc;
2009 	}
2010 
2011 	/*
2012 	 * Record the assignment so that this can be unwound later. Using
2013 	 * irq_set_chip_data() here would be appropriate, but the lock it takes
2014 	 * is already held.
2015 	 */
2016 	*int_desc = comp.int_desc;
2017 	data->chip_data = int_desc;
2018 
2019 	/* Pass up the result. */
2020 	msg->address_hi = comp.int_desc.address >> 32;
2021 	msg->address_lo = comp.int_desc.address & 0xffffffff;
2022 	msg->data = comp.int_desc.data;
2023 
2024 	put_pcichild(hpdev);
2025 	return;
2026 
2027 enable_tasklet:
2028 	tasklet_enable(&channel->callback_event);
2029 	/*
2030 	 * The completion packet on the stack becomes invalid after 'return';
2031 	 * remove the ID from the VMbus requestor if the identifier is still
2032 	 * mapped to/associated with the packet.  (The identifier could have
2033 	 * been 're-used', i.e., already removed and (re-)mapped.)
2034 	 *
2035 	 * Cf. hv_pci_onchannelcallback().
2036 	 */
2037 	vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2038 free_int_desc:
2039 	kfree(int_desc);
2040 drop_reference:
2041 	put_pcichild(hpdev);
2042 return_null_message:
2043 	msg->address_hi = 0;
2044 	msg->address_lo = 0;
2045 	msg->data = 0;
2046 }
2047 
2048 /* HW Interrupt Chip Descriptor */
2049 static struct irq_chip hv_msi_irq_chip = {
2050 	.name			= "Hyper-V PCIe MSI",
2051 	.irq_compose_msi_msg	= hv_compose_msi_msg,
2052 	.irq_set_affinity	= irq_chip_set_affinity_parent,
2053 #ifdef CONFIG_X86
2054 	.irq_ack		= irq_chip_ack_parent,
2055 #elif defined(CONFIG_ARM64)
2056 	.irq_eoi		= irq_chip_eoi_parent,
2057 #endif
2058 	.irq_mask		= hv_irq_mask,
2059 	.irq_unmask		= hv_irq_unmask,
2060 };
2061 
2062 static struct msi_domain_ops hv_msi_ops = {
2063 	.msi_prepare	= hv_msi_prepare,
2064 	.msi_free	= hv_msi_free,
2065 };
2066 
2067 /**
2068  * hv_pcie_init_irq_domain() - Initialize IRQ domain
2069  * @hbus:	The root PCI bus
2070  *
2071  * This function creates an IRQ domain which will be used for
2072  * interrupts from devices that have been passed through.  These
2073  * devices only support MSI and MSI-X, not line-based interrupts
2074  * or simulations of line-based interrupts through PCIe's
2075  * fabric-layer messages.  Because interrupts are remapped, we
2076  * can support multi-message MSI here.
2077  *
2078  * Return: '0' on success and error value on failure
2079  */
2080 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2081 {
2082 	hbus->msi_info.chip = &hv_msi_irq_chip;
2083 	hbus->msi_info.ops = &hv_msi_ops;
2084 	hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
2085 		MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
2086 		MSI_FLAG_PCI_MSIX);
2087 	hbus->msi_info.handler = FLOW_HANDLER;
2088 	hbus->msi_info.handler_name = FLOW_NAME;
2089 	hbus->msi_info.data = hbus;
2090 	hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
2091 						     &hbus->msi_info,
2092 						     hv_pci_get_root_domain());
2093 	if (!hbus->irq_domain) {
2094 		dev_err(&hbus->hdev->device,
2095 			"Failed to build an MSI IRQ domain\n");
2096 		return -ENODEV;
2097 	}
2098 
2099 	dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2100 
2101 	return 0;
2102 }
2103 
2104 /**
2105  * get_bar_size() - Get the address space consumed by a BAR
2106  * @bar_val:	Value that a BAR returned after -1 was written
2107  *              to it.
2108  *
2109  * This function returns the size of the BAR, rounded up to 1
2110  * page.  It has to be rounded up because the hypervisor's page
2111  * table entry that maps the BAR into the VM can't specify an
2112  * offset within a page.  The invariant is that the hypervisor
2113  * must place any BARs of smaller than page length at the
2114  * beginning of a page.
2115  *
2116  * Return:	Size in bytes of the consumed MMIO space.
2117  */
2118 static u64 get_bar_size(u64 bar_val)
2119 {
2120 	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2121 			PAGE_SIZE);
2122 }
2123 
2124 /**
2125  * survey_child_resources() - Total all MMIO requirements
2126  * @hbus:	Root PCI bus, as understood by this driver
2127  */
2128 static void survey_child_resources(struct hv_pcibus_device *hbus)
2129 {
2130 	struct hv_pci_dev *hpdev;
2131 	resource_size_t bar_size = 0;
2132 	unsigned long flags;
2133 	struct completion *event;
2134 	u64 bar_val;
2135 	int i;
2136 
2137 	/* If nobody is waiting on the answer, don't compute it. */
2138 	event = xchg(&hbus->survey_event, NULL);
2139 	if (!event)
2140 		return;
2141 
2142 	/* If the answer has already been computed, go with it. */
2143 	if (hbus->low_mmio_space || hbus->high_mmio_space) {
2144 		complete(event);
2145 		return;
2146 	}
2147 
2148 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2149 
2150 	/*
2151 	 * Due to an interesting quirk of the PCI spec, all memory regions
2152 	 * for a child device are a power of 2 in size and aligned in memory,
2153 	 * so it's sufficient to just add them up without tracking alignment.
2154 	 */
2155 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2156 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2157 			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2158 				dev_err(&hbus->hdev->device,
2159 					"There's an I/O BAR in this list!\n");
2160 
2161 			if (hpdev->probed_bar[i] != 0) {
2162 				/*
2163 				 * A probed BAR has all the upper bits set that
2164 				 * can be changed.
2165 				 */
2166 
2167 				bar_val = hpdev->probed_bar[i];
2168 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2169 					bar_val |=
2170 					((u64)hpdev->probed_bar[++i] << 32);
2171 				else
2172 					bar_val |= 0xffffffff00000000ULL;
2173 
2174 				bar_size = get_bar_size(bar_val);
2175 
2176 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2177 					hbus->high_mmio_space += bar_size;
2178 				else
2179 					hbus->low_mmio_space += bar_size;
2180 			}
2181 		}
2182 	}
2183 
2184 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2185 	complete(event);
2186 }
2187 
2188 /**
2189  * prepopulate_bars() - Fill in BARs with defaults
2190  * @hbus:	Root PCI bus, as understood by this driver
2191  *
2192  * The core PCI driver code seems much, much happier if the BARs
2193  * for a device have values upon first scan. So fill them in.
2194  * The algorithm below works down from large sizes to small,
2195  * attempting to pack the assignments optimally. The assumption,
2196  * enforced in other parts of the code, is that the beginning of
2197  * the memory-mapped I/O space will be aligned on the largest
2198  * BAR size.
2199  */
2200 static void prepopulate_bars(struct hv_pcibus_device *hbus)
2201 {
2202 	resource_size_t high_size = 0;
2203 	resource_size_t low_size = 0;
2204 	resource_size_t high_base = 0;
2205 	resource_size_t low_base = 0;
2206 	resource_size_t bar_size;
2207 	struct hv_pci_dev *hpdev;
2208 	unsigned long flags;
2209 	u64 bar_val;
2210 	u32 command;
2211 	bool high;
2212 	int i;
2213 
2214 	if (hbus->low_mmio_space) {
2215 		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2216 		low_base = hbus->low_mmio_res->start;
2217 	}
2218 
2219 	if (hbus->high_mmio_space) {
2220 		high_size = 1ULL <<
2221 			(63 - __builtin_clzll(hbus->high_mmio_space));
2222 		high_base = hbus->high_mmio_res->start;
2223 	}
2224 
2225 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2226 
2227 	/*
2228 	 * Clear the memory enable bit, in case it's already set. This occurs
2229 	 * in the suspend path of hibernation, where the device is suspended,
2230 	 * resumed and suspended again: see hibernation_snapshot() and
2231 	 * hibernation_platform_enter().
2232 	 *
2233 	 * If the memory enable bit is already set, Hyper-V silently ignores
2234 	 * the below BAR updates, and the related PCI device driver can not
2235 	 * work, because reading from the device register(s) always returns
2236 	 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2237 	 */
2238 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2239 		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2240 		command &= ~PCI_COMMAND_MEMORY;
2241 		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2242 	}
2243 
2244 	/* Pick addresses for the BARs. */
2245 	do {
2246 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2247 			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2248 				bar_val = hpdev->probed_bar[i];
2249 				if (bar_val == 0)
2250 					continue;
2251 				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2252 				if (high) {
2253 					bar_val |=
2254 						((u64)hpdev->probed_bar[i + 1]
2255 						 << 32);
2256 				} else {
2257 					bar_val |= 0xffffffffULL << 32;
2258 				}
2259 				bar_size = get_bar_size(bar_val);
2260 				if (high) {
2261 					if (high_size != bar_size) {
2262 						i++;
2263 						continue;
2264 					}
2265 					_hv_pcifront_write_config(hpdev,
2266 						PCI_BASE_ADDRESS_0 + (4 * i),
2267 						4,
2268 						(u32)(high_base & 0xffffff00));
2269 					i++;
2270 					_hv_pcifront_write_config(hpdev,
2271 						PCI_BASE_ADDRESS_0 + (4 * i),
2272 						4, (u32)(high_base >> 32));
2273 					high_base += bar_size;
2274 				} else {
2275 					if (low_size != bar_size)
2276 						continue;
2277 					_hv_pcifront_write_config(hpdev,
2278 						PCI_BASE_ADDRESS_0 + (4 * i),
2279 						4,
2280 						(u32)(low_base & 0xffffff00));
2281 					low_base += bar_size;
2282 				}
2283 			}
2284 			if (high_size <= 1 && low_size <= 1) {
2285 				/*
2286 				 * No need to set the PCI_COMMAND_MEMORY bit as
2287 				 * the core PCI driver doesn't require the bit
2288 				 * to be pre-set. Actually here we intentionally
2289 				 * keep the bit off so that the PCI BAR probing
2290 				 * in the core PCI driver doesn't cause Hyper-V
2291 				 * to unnecessarily unmap/map the virtual BARs
2292 				 * from/to the physical BARs multiple times.
2293 				 * This reduces the VM boot time significantly
2294 				 * if the BAR sizes are huge.
2295 				 */
2296 				break;
2297 			}
2298 		}
2299 
2300 		high_size >>= 1;
2301 		low_size >>= 1;
2302 	}  while (high_size || low_size);
2303 
2304 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2305 }
2306 
2307 /*
2308  * Assign entries in sysfs pci slot directory.
2309  *
2310  * Note that this function does not need to lock the children list
2311  * because it is called from pci_devices_present_work which
2312  * is serialized with hv_eject_device_work because they are on the
2313  * same ordered workqueue. Therefore hbus->children list will not change
2314  * even when pci_create_slot sleeps.
2315  */
2316 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2317 {
2318 	struct hv_pci_dev *hpdev;
2319 	char name[SLOT_NAME_SIZE];
2320 	int slot_nr;
2321 
2322 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2323 		if (hpdev->pci_slot)
2324 			continue;
2325 
2326 		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2327 		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2328 		hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2329 					  name, NULL);
2330 		if (IS_ERR(hpdev->pci_slot)) {
2331 			pr_warn("pci_create slot %s failed\n", name);
2332 			hpdev->pci_slot = NULL;
2333 		}
2334 	}
2335 }
2336 
2337 /*
2338  * Remove entries in sysfs pci slot directory.
2339  */
2340 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2341 {
2342 	struct hv_pci_dev *hpdev;
2343 
2344 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2345 		if (!hpdev->pci_slot)
2346 			continue;
2347 		pci_destroy_slot(hpdev->pci_slot);
2348 		hpdev->pci_slot = NULL;
2349 	}
2350 }
2351 
2352 /*
2353  * Set NUMA node for the devices on the bus
2354  */
2355 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2356 {
2357 	struct pci_dev *dev;
2358 	struct pci_bus *bus = hbus->bridge->bus;
2359 	struct hv_pci_dev *hv_dev;
2360 
2361 	list_for_each_entry(dev, &bus->devices, bus_list) {
2362 		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2363 		if (!hv_dev)
2364 			continue;
2365 
2366 		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2367 		    hv_dev->desc.virtual_numa_node < num_possible_nodes())
2368 			/*
2369 			 * The kernel may boot with some NUMA nodes offline
2370 			 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2371 			 * "numa=off". In those cases, adjust the host provided
2372 			 * NUMA node to a valid NUMA node used by the kernel.
2373 			 */
2374 			set_dev_node(&dev->dev,
2375 				     numa_map_to_online_node(
2376 					     hv_dev->desc.virtual_numa_node));
2377 
2378 		put_pcichild(hv_dev);
2379 	}
2380 }
2381 
2382 /**
2383  * create_root_hv_pci_bus() - Expose a new root PCI bus
2384  * @hbus:	Root PCI bus, as understood by this driver
2385  *
2386  * Return: 0 on success, -errno on failure
2387  */
2388 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2389 {
2390 	int error;
2391 	struct pci_host_bridge *bridge = hbus->bridge;
2392 
2393 	bridge->dev.parent = &hbus->hdev->device;
2394 	bridge->sysdata = &hbus->sysdata;
2395 	bridge->ops = &hv_pcifront_ops;
2396 
2397 	error = pci_scan_root_bus_bridge(bridge);
2398 	if (error)
2399 		return error;
2400 
2401 	pci_lock_rescan_remove();
2402 	hv_pci_assign_numa_node(hbus);
2403 	pci_bus_assign_resources(bridge->bus);
2404 	hv_pci_assign_slots(hbus);
2405 	pci_bus_add_devices(bridge->bus);
2406 	pci_unlock_rescan_remove();
2407 	hbus->state = hv_pcibus_installed;
2408 	return 0;
2409 }
2410 
2411 struct q_res_req_compl {
2412 	struct completion host_event;
2413 	struct hv_pci_dev *hpdev;
2414 };
2415 
2416 /**
2417  * q_resource_requirements() - Query Resource Requirements
2418  * @context:		The completion context.
2419  * @resp:		The response that came from the host.
2420  * @resp_packet_size:	The size in bytes of resp.
2421  *
2422  * This function is invoked on completion of a Query Resource
2423  * Requirements packet.
2424  */
2425 static void q_resource_requirements(void *context, struct pci_response *resp,
2426 				    int resp_packet_size)
2427 {
2428 	struct q_res_req_compl *completion = context;
2429 	struct pci_q_res_req_response *q_res_req =
2430 		(struct pci_q_res_req_response *)resp;
2431 	s32 status;
2432 	int i;
2433 
2434 	status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2435 	if (status < 0) {
2436 		dev_err(&completion->hpdev->hbus->hdev->device,
2437 			"query resource requirements failed: %x\n",
2438 			status);
2439 	} else {
2440 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2441 			completion->hpdev->probed_bar[i] =
2442 				q_res_req->probed_bar[i];
2443 		}
2444 	}
2445 
2446 	complete(&completion->host_event);
2447 }
2448 
2449 /**
2450  * new_pcichild_device() - Create a new child device
2451  * @hbus:	The internal struct tracking this root PCI bus.
2452  * @desc:	The information supplied so far from the host
2453  *              about the device.
2454  *
2455  * This function creates the tracking structure for a new child
2456  * device and kicks off the process of figuring out what it is.
2457  *
2458  * Return: Pointer to the new tracking struct
2459  */
2460 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2461 		struct hv_pcidev_description *desc)
2462 {
2463 	struct hv_pci_dev *hpdev;
2464 	struct pci_child_message *res_req;
2465 	struct q_res_req_compl comp_pkt;
2466 	struct {
2467 		struct pci_packet init_packet;
2468 		u8 buffer[sizeof(struct pci_child_message)];
2469 	} pkt;
2470 	unsigned long flags;
2471 	int ret;
2472 
2473 	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2474 	if (!hpdev)
2475 		return NULL;
2476 
2477 	hpdev->hbus = hbus;
2478 
2479 	memset(&pkt, 0, sizeof(pkt));
2480 	init_completion(&comp_pkt.host_event);
2481 	comp_pkt.hpdev = hpdev;
2482 	pkt.init_packet.compl_ctxt = &comp_pkt;
2483 	pkt.init_packet.completion_func = q_resource_requirements;
2484 	res_req = (struct pci_child_message *)&pkt.init_packet.message;
2485 	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2486 	res_req->wslot.slot = desc->win_slot.slot;
2487 
2488 	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2489 			       sizeof(struct pci_child_message),
2490 			       (unsigned long)&pkt.init_packet,
2491 			       VM_PKT_DATA_INBAND,
2492 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2493 	if (ret)
2494 		goto error;
2495 
2496 	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2497 		goto error;
2498 
2499 	hpdev->desc = *desc;
2500 	refcount_set(&hpdev->refs, 1);
2501 	get_pcichild(hpdev);
2502 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2503 
2504 	list_add_tail(&hpdev->list_entry, &hbus->children);
2505 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2506 	return hpdev;
2507 
2508 error:
2509 	kfree(hpdev);
2510 	return NULL;
2511 }
2512 
2513 /**
2514  * get_pcichild_wslot() - Find device from slot
2515  * @hbus:	Root PCI bus, as understood by this driver
2516  * @wslot:	Location on the bus
2517  *
2518  * This function looks up a PCI device and returns the internal
2519  * representation of it.  It acquires a reference on it, so that
2520  * the device won't be deleted while somebody is using it.  The
2521  * caller is responsible for calling put_pcichild() to release
2522  * this reference.
2523  *
2524  * Return:	Internal representation of a PCI device
2525  */
2526 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2527 					     u32 wslot)
2528 {
2529 	unsigned long flags;
2530 	struct hv_pci_dev *iter, *hpdev = NULL;
2531 
2532 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2533 	list_for_each_entry(iter, &hbus->children, list_entry) {
2534 		if (iter->desc.win_slot.slot == wslot) {
2535 			hpdev = iter;
2536 			get_pcichild(hpdev);
2537 			break;
2538 		}
2539 	}
2540 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2541 
2542 	return hpdev;
2543 }
2544 
2545 /**
2546  * pci_devices_present_work() - Handle new list of child devices
2547  * @work:	Work struct embedded in struct hv_dr_work
2548  *
2549  * "Bus Relations" is the Windows term for "children of this
2550  * bus."  The terminology is preserved here for people trying to
2551  * debug the interaction between Hyper-V and Linux.  This
2552  * function is called when the parent partition reports a list
2553  * of functions that should be observed under this PCI Express
2554  * port (bus).
2555  *
2556  * This function updates the list, and must tolerate being
2557  * called multiple times with the same information.  The typical
2558  * number of child devices is one, with very atypical cases
2559  * involving three or four, so the algorithms used here can be
2560  * simple and inefficient.
2561  *
2562  * It must also treat the omission of a previously observed device as
2563  * notification that the device no longer exists.
2564  *
2565  * Note that this function is serialized with hv_eject_device_work(),
2566  * because both are pushed to the ordered workqueue hbus->wq.
2567  */
2568 static void pci_devices_present_work(struct work_struct *work)
2569 {
2570 	u32 child_no;
2571 	bool found;
2572 	struct hv_pcidev_description *new_desc;
2573 	struct hv_pci_dev *hpdev;
2574 	struct hv_pcibus_device *hbus;
2575 	struct list_head removed;
2576 	struct hv_dr_work *dr_wrk;
2577 	struct hv_dr_state *dr = NULL;
2578 	unsigned long flags;
2579 
2580 	dr_wrk = container_of(work, struct hv_dr_work, wrk);
2581 	hbus = dr_wrk->bus;
2582 	kfree(dr_wrk);
2583 
2584 	INIT_LIST_HEAD(&removed);
2585 
2586 	/* Pull this off the queue and process it if it was the last one. */
2587 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2588 	while (!list_empty(&hbus->dr_list)) {
2589 		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2590 				      list_entry);
2591 		list_del(&dr->list_entry);
2592 
2593 		/* Throw this away if the list still has stuff in it. */
2594 		if (!list_empty(&hbus->dr_list)) {
2595 			kfree(dr);
2596 			continue;
2597 		}
2598 	}
2599 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2600 
2601 	if (!dr)
2602 		return;
2603 
2604 	mutex_lock(&hbus->state_lock);
2605 
2606 	/* First, mark all existing children as reported missing. */
2607 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2608 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2609 		hpdev->reported_missing = true;
2610 	}
2611 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2612 
2613 	/* Next, add back any reported devices. */
2614 	for (child_no = 0; child_no < dr->device_count; child_no++) {
2615 		found = false;
2616 		new_desc = &dr->func[child_no];
2617 
2618 		spin_lock_irqsave(&hbus->device_list_lock, flags);
2619 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2620 			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2621 			    (hpdev->desc.v_id == new_desc->v_id) &&
2622 			    (hpdev->desc.d_id == new_desc->d_id) &&
2623 			    (hpdev->desc.ser == new_desc->ser)) {
2624 				hpdev->reported_missing = false;
2625 				found = true;
2626 			}
2627 		}
2628 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2629 
2630 		if (!found) {
2631 			hpdev = new_pcichild_device(hbus, new_desc);
2632 			if (!hpdev)
2633 				dev_err(&hbus->hdev->device,
2634 					"couldn't record a child device.\n");
2635 		}
2636 	}
2637 
2638 	/* Move missing children to a list on the stack. */
2639 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2640 	do {
2641 		found = false;
2642 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2643 			if (hpdev->reported_missing) {
2644 				found = true;
2645 				put_pcichild(hpdev);
2646 				list_move_tail(&hpdev->list_entry, &removed);
2647 				break;
2648 			}
2649 		}
2650 	} while (found);
2651 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2652 
2653 	/* Delete everything that should no longer exist. */
2654 	while (!list_empty(&removed)) {
2655 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
2656 					 list_entry);
2657 		list_del(&hpdev->list_entry);
2658 
2659 		if (hpdev->pci_slot)
2660 			pci_destroy_slot(hpdev->pci_slot);
2661 
2662 		put_pcichild(hpdev);
2663 	}
2664 
2665 	switch (hbus->state) {
2666 	case hv_pcibus_installed:
2667 		/*
2668 		 * Tell the core to rescan bus
2669 		 * because there may have been changes.
2670 		 */
2671 		pci_lock_rescan_remove();
2672 		pci_scan_child_bus(hbus->bridge->bus);
2673 		hv_pci_assign_numa_node(hbus);
2674 		hv_pci_assign_slots(hbus);
2675 		pci_unlock_rescan_remove();
2676 		break;
2677 
2678 	case hv_pcibus_init:
2679 	case hv_pcibus_probed:
2680 		survey_child_resources(hbus);
2681 		break;
2682 
2683 	default:
2684 		break;
2685 	}
2686 
2687 	mutex_unlock(&hbus->state_lock);
2688 
2689 	kfree(dr);
2690 }
2691 
2692 /**
2693  * hv_pci_start_relations_work() - Queue work to start device discovery
2694  * @hbus:	Root PCI bus, as understood by this driver
2695  * @dr:		The list of children returned from host
2696  *
2697  * Return:  0 on success, -errno on failure
2698  */
2699 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2700 				       struct hv_dr_state *dr)
2701 {
2702 	struct hv_dr_work *dr_wrk;
2703 	unsigned long flags;
2704 	bool pending_dr;
2705 
2706 	if (hbus->state == hv_pcibus_removing) {
2707 		dev_info(&hbus->hdev->device,
2708 			 "PCI VMBus BUS_RELATIONS: ignored\n");
2709 		return -ENOENT;
2710 	}
2711 
2712 	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2713 	if (!dr_wrk)
2714 		return -ENOMEM;
2715 
2716 	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2717 	dr_wrk->bus = hbus;
2718 
2719 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2720 	/*
2721 	 * If pending_dr is true, we have already queued a work,
2722 	 * which will see the new dr. Otherwise, we need to
2723 	 * queue a new work.
2724 	 */
2725 	pending_dr = !list_empty(&hbus->dr_list);
2726 	list_add_tail(&dr->list_entry, &hbus->dr_list);
2727 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2728 
2729 	if (pending_dr)
2730 		kfree(dr_wrk);
2731 	else
2732 		queue_work(hbus->wq, &dr_wrk->wrk);
2733 
2734 	return 0;
2735 }
2736 
2737 /**
2738  * hv_pci_devices_present() - Handle list of new children
2739  * @hbus:      Root PCI bus, as understood by this driver
2740  * @relations: Packet from host listing children
2741  *
2742  * Process a new list of devices on the bus. The list of devices is
2743  * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2744  * whenever a new list of devices for this bus appears.
2745  */
2746 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2747 				   struct pci_bus_relations *relations)
2748 {
2749 	struct hv_dr_state *dr;
2750 	int i;
2751 
2752 	dr = kzalloc(struct_size(dr, func, relations->device_count),
2753 		     GFP_NOWAIT);
2754 	if (!dr)
2755 		return;
2756 
2757 	dr->device_count = relations->device_count;
2758 	for (i = 0; i < dr->device_count; i++) {
2759 		dr->func[i].v_id = relations->func[i].v_id;
2760 		dr->func[i].d_id = relations->func[i].d_id;
2761 		dr->func[i].rev = relations->func[i].rev;
2762 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2763 		dr->func[i].subclass = relations->func[i].subclass;
2764 		dr->func[i].base_class = relations->func[i].base_class;
2765 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2766 		dr->func[i].win_slot = relations->func[i].win_slot;
2767 		dr->func[i].ser = relations->func[i].ser;
2768 	}
2769 
2770 	if (hv_pci_start_relations_work(hbus, dr))
2771 		kfree(dr);
2772 }
2773 
2774 /**
2775  * hv_pci_devices_present2() - Handle list of new children
2776  * @hbus:	Root PCI bus, as understood by this driver
2777  * @relations:	Packet from host listing children
2778  *
2779  * This function is the v2 version of hv_pci_devices_present()
2780  */
2781 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2782 				    struct pci_bus_relations2 *relations)
2783 {
2784 	struct hv_dr_state *dr;
2785 	int i;
2786 
2787 	dr = kzalloc(struct_size(dr, func, relations->device_count),
2788 		     GFP_NOWAIT);
2789 	if (!dr)
2790 		return;
2791 
2792 	dr->device_count = relations->device_count;
2793 	for (i = 0; i < dr->device_count; i++) {
2794 		dr->func[i].v_id = relations->func[i].v_id;
2795 		dr->func[i].d_id = relations->func[i].d_id;
2796 		dr->func[i].rev = relations->func[i].rev;
2797 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2798 		dr->func[i].subclass = relations->func[i].subclass;
2799 		dr->func[i].base_class = relations->func[i].base_class;
2800 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2801 		dr->func[i].win_slot = relations->func[i].win_slot;
2802 		dr->func[i].ser = relations->func[i].ser;
2803 		dr->func[i].flags = relations->func[i].flags;
2804 		dr->func[i].virtual_numa_node =
2805 			relations->func[i].virtual_numa_node;
2806 	}
2807 
2808 	if (hv_pci_start_relations_work(hbus, dr))
2809 		kfree(dr);
2810 }
2811 
2812 /**
2813  * hv_eject_device_work() - Asynchronously handles ejection
2814  * @work:	Work struct embedded in internal device struct
2815  *
2816  * This function handles ejecting a device.  Windows will
2817  * attempt to gracefully eject a device, waiting 60 seconds to
2818  * hear back from the guest OS that this completed successfully.
2819  * If this timer expires, the device will be forcibly removed.
2820  */
2821 static void hv_eject_device_work(struct work_struct *work)
2822 {
2823 	struct pci_eject_response *ejct_pkt;
2824 	struct hv_pcibus_device *hbus;
2825 	struct hv_pci_dev *hpdev;
2826 	struct pci_dev *pdev;
2827 	unsigned long flags;
2828 	int wslot;
2829 	struct {
2830 		struct pci_packet pkt;
2831 		u8 buffer[sizeof(struct pci_eject_response)];
2832 	} ctxt;
2833 
2834 	hpdev = container_of(work, struct hv_pci_dev, wrk);
2835 	hbus = hpdev->hbus;
2836 
2837 	mutex_lock(&hbus->state_lock);
2838 
2839 	/*
2840 	 * Ejection can come before or after the PCI bus has been set up, so
2841 	 * attempt to find it and tear down the bus state, if it exists.  This
2842 	 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2843 	 * because hbus->bridge->bus may not exist yet.
2844 	 */
2845 	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2846 	pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2847 	if (pdev) {
2848 		pci_lock_rescan_remove();
2849 		pci_stop_and_remove_bus_device(pdev);
2850 		pci_dev_put(pdev);
2851 		pci_unlock_rescan_remove();
2852 	}
2853 
2854 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2855 	list_del(&hpdev->list_entry);
2856 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2857 
2858 	if (hpdev->pci_slot)
2859 		pci_destroy_slot(hpdev->pci_slot);
2860 
2861 	memset(&ctxt, 0, sizeof(ctxt));
2862 	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2863 	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2864 	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2865 	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2866 			 sizeof(*ejct_pkt), 0,
2867 			 VM_PKT_DATA_INBAND, 0);
2868 
2869 	/* For the get_pcichild() in hv_pci_eject_device() */
2870 	put_pcichild(hpdev);
2871 	/* For the two refs got in new_pcichild_device() */
2872 	put_pcichild(hpdev);
2873 	put_pcichild(hpdev);
2874 	/* hpdev has been freed. Do not use it any more. */
2875 
2876 	mutex_unlock(&hbus->state_lock);
2877 }
2878 
2879 /**
2880  * hv_pci_eject_device() - Handles device ejection
2881  * @hpdev:	Internal device tracking struct
2882  *
2883  * This function is invoked when an ejection packet arrives.  It
2884  * just schedules work so that we don't re-enter the packet
2885  * delivery code handling the ejection.
2886  */
2887 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2888 {
2889 	struct hv_pcibus_device *hbus = hpdev->hbus;
2890 	struct hv_device *hdev = hbus->hdev;
2891 
2892 	if (hbus->state == hv_pcibus_removing) {
2893 		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2894 		return;
2895 	}
2896 
2897 	get_pcichild(hpdev);
2898 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2899 	queue_work(hbus->wq, &hpdev->wrk);
2900 }
2901 
2902 /**
2903  * hv_pci_onchannelcallback() - Handles incoming packets
2904  * @context:	Internal bus tracking struct
2905  *
2906  * This function is invoked whenever the host sends a packet to
2907  * this channel (which is private to this root PCI bus).
2908  */
2909 static void hv_pci_onchannelcallback(void *context)
2910 {
2911 	const int packet_size = 0x100;
2912 	int ret;
2913 	struct hv_pcibus_device *hbus = context;
2914 	struct vmbus_channel *chan = hbus->hdev->channel;
2915 	u32 bytes_recvd;
2916 	u64 req_id, req_addr;
2917 	struct vmpacket_descriptor *desc;
2918 	unsigned char *buffer;
2919 	int bufferlen = packet_size;
2920 	struct pci_packet *comp_packet;
2921 	struct pci_response *response;
2922 	struct pci_incoming_message *new_message;
2923 	struct pci_bus_relations *bus_rel;
2924 	struct pci_bus_relations2 *bus_rel2;
2925 	struct pci_dev_inval_block *inval;
2926 	struct pci_dev_incoming *dev_message;
2927 	struct hv_pci_dev *hpdev;
2928 	unsigned long flags;
2929 
2930 	buffer = kmalloc(bufferlen, GFP_ATOMIC);
2931 	if (!buffer)
2932 		return;
2933 
2934 	while (1) {
2935 		ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
2936 					   &bytes_recvd, &req_id);
2937 
2938 		if (ret == -ENOBUFS) {
2939 			kfree(buffer);
2940 			/* Handle large packet */
2941 			bufferlen = bytes_recvd;
2942 			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2943 			if (!buffer)
2944 				return;
2945 			continue;
2946 		}
2947 
2948 		/* Zero length indicates there are no more packets. */
2949 		if (ret || !bytes_recvd)
2950 			break;
2951 
2952 		/*
2953 		 * All incoming packets must be at least as large as a
2954 		 * response.
2955 		 */
2956 		if (bytes_recvd <= sizeof(struct pci_response))
2957 			continue;
2958 		desc = (struct vmpacket_descriptor *)buffer;
2959 
2960 		switch (desc->type) {
2961 		case VM_PKT_COMP:
2962 
2963 			lock_requestor(chan, flags);
2964 			req_addr = __vmbus_request_addr_match(chan, req_id,
2965 							      VMBUS_RQST_ADDR_ANY);
2966 			if (req_addr == VMBUS_RQST_ERROR) {
2967 				unlock_requestor(chan, flags);
2968 				dev_err(&hbus->hdev->device,
2969 					"Invalid transaction ID %llx\n",
2970 					req_id);
2971 				break;
2972 			}
2973 			comp_packet = (struct pci_packet *)req_addr;
2974 			response = (struct pci_response *)buffer;
2975 			/*
2976 			 * Call ->completion_func() within the critical section to make
2977 			 * sure that the packet pointer is still valid during the call:
2978 			 * here 'valid' means that there's a task still waiting for the
2979 			 * completion, and that the packet data is still on the waiting
2980 			 * task's stack.  Cf. hv_compose_msi_msg().
2981 			 */
2982 			comp_packet->completion_func(comp_packet->compl_ctxt,
2983 						     response,
2984 						     bytes_recvd);
2985 			unlock_requestor(chan, flags);
2986 			break;
2987 
2988 		case VM_PKT_DATA_INBAND:
2989 
2990 			new_message = (struct pci_incoming_message *)buffer;
2991 			switch (new_message->message_type.type) {
2992 			case PCI_BUS_RELATIONS:
2993 
2994 				bus_rel = (struct pci_bus_relations *)buffer;
2995 				if (bytes_recvd < sizeof(*bus_rel) ||
2996 				    bytes_recvd <
2997 					struct_size(bus_rel, func,
2998 						    bus_rel->device_count)) {
2999 					dev_err(&hbus->hdev->device,
3000 						"bus relations too small\n");
3001 					break;
3002 				}
3003 
3004 				hv_pci_devices_present(hbus, bus_rel);
3005 				break;
3006 
3007 			case PCI_BUS_RELATIONS2:
3008 
3009 				bus_rel2 = (struct pci_bus_relations2 *)buffer;
3010 				if (bytes_recvd < sizeof(*bus_rel2) ||
3011 				    bytes_recvd <
3012 					struct_size(bus_rel2, func,
3013 						    bus_rel2->device_count)) {
3014 					dev_err(&hbus->hdev->device,
3015 						"bus relations v2 too small\n");
3016 					break;
3017 				}
3018 
3019 				hv_pci_devices_present2(hbus, bus_rel2);
3020 				break;
3021 
3022 			case PCI_EJECT:
3023 
3024 				dev_message = (struct pci_dev_incoming *)buffer;
3025 				if (bytes_recvd < sizeof(*dev_message)) {
3026 					dev_err(&hbus->hdev->device,
3027 						"eject message too small\n");
3028 					break;
3029 				}
3030 				hpdev = get_pcichild_wslot(hbus,
3031 						      dev_message->wslot.slot);
3032 				if (hpdev) {
3033 					hv_pci_eject_device(hpdev);
3034 					put_pcichild(hpdev);
3035 				}
3036 				break;
3037 
3038 			case PCI_INVALIDATE_BLOCK:
3039 
3040 				inval = (struct pci_dev_inval_block *)buffer;
3041 				if (bytes_recvd < sizeof(*inval)) {
3042 					dev_err(&hbus->hdev->device,
3043 						"invalidate message too small\n");
3044 					break;
3045 				}
3046 				hpdev = get_pcichild_wslot(hbus,
3047 							   inval->wslot.slot);
3048 				if (hpdev) {
3049 					if (hpdev->block_invalidate) {
3050 						hpdev->block_invalidate(
3051 						    hpdev->invalidate_context,
3052 						    inval->block_mask);
3053 					}
3054 					put_pcichild(hpdev);
3055 				}
3056 				break;
3057 
3058 			default:
3059 				dev_warn(&hbus->hdev->device,
3060 					"Unimplemented protocol message %x\n",
3061 					new_message->message_type.type);
3062 				break;
3063 			}
3064 			break;
3065 
3066 		default:
3067 			dev_err(&hbus->hdev->device,
3068 				"unhandled packet type %d, tid %llx len %d\n",
3069 				desc->type, req_id, bytes_recvd);
3070 			break;
3071 		}
3072 	}
3073 
3074 	kfree(buffer);
3075 }
3076 
3077 /**
3078  * hv_pci_protocol_negotiation() - Set up protocol
3079  * @hdev:		VMBus's tracking struct for this root PCI bus.
3080  * @version:		Array of supported channel protocol versions in
3081  *			the order of probing - highest go first.
3082  * @num_version:	Number of elements in the version array.
3083  *
3084  * This driver is intended to support running on Windows 10
3085  * (server) and later versions. It will not run on earlier
3086  * versions, as they assume that many of the operations which
3087  * Linux needs accomplished with a spinlock held were done via
3088  * asynchronous messaging via VMBus.  Windows 10 increases the
3089  * surface area of PCI emulation so that these actions can take
3090  * place by suspending a virtual processor for their duration.
3091  *
3092  * This function negotiates the channel protocol version,
3093  * failing if the host doesn't support the necessary protocol
3094  * level.
3095  */
3096 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3097 				       enum pci_protocol_version_t version[],
3098 				       int num_version)
3099 {
3100 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3101 	struct pci_version_request *version_req;
3102 	struct hv_pci_compl comp_pkt;
3103 	struct pci_packet *pkt;
3104 	int ret;
3105 	int i;
3106 
3107 	/*
3108 	 * Initiate the handshake with the host and negotiate
3109 	 * a version that the host can support. We start with the
3110 	 * highest version number and go down if the host cannot
3111 	 * support it.
3112 	 */
3113 	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3114 	if (!pkt)
3115 		return -ENOMEM;
3116 
3117 	init_completion(&comp_pkt.host_event);
3118 	pkt->completion_func = hv_pci_generic_compl;
3119 	pkt->compl_ctxt = &comp_pkt;
3120 	version_req = (struct pci_version_request *)&pkt->message;
3121 	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3122 
3123 	for (i = 0; i < num_version; i++) {
3124 		version_req->protocol_version = version[i];
3125 		ret = vmbus_sendpacket(hdev->channel, version_req,
3126 				sizeof(struct pci_version_request),
3127 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
3128 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3129 		if (!ret)
3130 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3131 
3132 		if (ret) {
3133 			dev_err(&hdev->device,
3134 				"PCI Pass-through VSP failed to request version: %d",
3135 				ret);
3136 			goto exit;
3137 		}
3138 
3139 		if (comp_pkt.completion_status >= 0) {
3140 			hbus->protocol_version = version[i];
3141 			dev_info(&hdev->device,
3142 				"PCI VMBus probing: Using version %#x\n",
3143 				hbus->protocol_version);
3144 			goto exit;
3145 		}
3146 
3147 		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3148 			dev_err(&hdev->device,
3149 				"PCI Pass-through VSP failed version request: %#x",
3150 				comp_pkt.completion_status);
3151 			ret = -EPROTO;
3152 			goto exit;
3153 		}
3154 
3155 		reinit_completion(&comp_pkt.host_event);
3156 	}
3157 
3158 	dev_err(&hdev->device,
3159 		"PCI pass-through VSP failed to find supported version");
3160 	ret = -EPROTO;
3161 
3162 exit:
3163 	kfree(pkt);
3164 	return ret;
3165 }
3166 
3167 /**
3168  * hv_pci_free_bridge_windows() - Release memory regions for the
3169  * bus
3170  * @hbus:	Root PCI bus, as understood by this driver
3171  */
3172 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3173 {
3174 	/*
3175 	 * Set the resources back to the way they looked when they
3176 	 * were allocated by setting IORESOURCE_BUSY again.
3177 	 */
3178 
3179 	if (hbus->low_mmio_space && hbus->low_mmio_res) {
3180 		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3181 		vmbus_free_mmio(hbus->low_mmio_res->start,
3182 				resource_size(hbus->low_mmio_res));
3183 	}
3184 
3185 	if (hbus->high_mmio_space && hbus->high_mmio_res) {
3186 		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3187 		vmbus_free_mmio(hbus->high_mmio_res->start,
3188 				resource_size(hbus->high_mmio_res));
3189 	}
3190 }
3191 
3192 /**
3193  * hv_pci_allocate_bridge_windows() - Allocate memory regions
3194  * for the bus
3195  * @hbus:	Root PCI bus, as understood by this driver
3196  *
3197  * This function calls vmbus_allocate_mmio(), which is itself a
3198  * bit of a compromise.  Ideally, we might change the pnp layer
3199  * in the kernel such that it comprehends either PCI devices
3200  * which are "grandchildren of ACPI," with some intermediate bus
3201  * node (in this case, VMBus) or change it such that it
3202  * understands VMBus.  The pnp layer, however, has been declared
3203  * deprecated, and not subject to change.
3204  *
3205  * The workaround, implemented here, is to ask VMBus to allocate
3206  * MMIO space for this bus.  VMBus itself knows which ranges are
3207  * appropriate by looking at its own ACPI objects.  Then, after
3208  * these ranges are claimed, they're modified to look like they
3209  * would have looked if the ACPI and pnp code had allocated
3210  * bridge windows.  These descriptors have to exist in this form
3211  * in order to satisfy the code which will get invoked when the
3212  * endpoint PCI function driver calls request_mem_region() or
3213  * request_mem_region_exclusive().
3214  *
3215  * Return: 0 on success, -errno on failure
3216  */
3217 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3218 {
3219 	resource_size_t align;
3220 	int ret;
3221 
3222 	if (hbus->low_mmio_space) {
3223 		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3224 		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3225 					  (u64)(u32)0xffffffff,
3226 					  hbus->low_mmio_space,
3227 					  align, false);
3228 		if (ret) {
3229 			dev_err(&hbus->hdev->device,
3230 				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3231 				hbus->low_mmio_space);
3232 			return ret;
3233 		}
3234 
3235 		/* Modify this resource to become a bridge window. */
3236 		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3237 		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3238 		pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3239 	}
3240 
3241 	if (hbus->high_mmio_space) {
3242 		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3243 		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3244 					  0x100000000, -1,
3245 					  hbus->high_mmio_space, align,
3246 					  false);
3247 		if (ret) {
3248 			dev_err(&hbus->hdev->device,
3249 				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3250 				hbus->high_mmio_space);
3251 			goto release_low_mmio;
3252 		}
3253 
3254 		/* Modify this resource to become a bridge window. */
3255 		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3256 		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3257 		pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3258 	}
3259 
3260 	return 0;
3261 
3262 release_low_mmio:
3263 	if (hbus->low_mmio_res) {
3264 		vmbus_free_mmio(hbus->low_mmio_res->start,
3265 				resource_size(hbus->low_mmio_res));
3266 	}
3267 
3268 	return ret;
3269 }
3270 
3271 /**
3272  * hv_allocate_config_window() - Find MMIO space for PCI Config
3273  * @hbus:	Root PCI bus, as understood by this driver
3274  *
3275  * This function claims memory-mapped I/O space for accessing
3276  * configuration space for the functions on this bus.
3277  *
3278  * Return: 0 on success, -errno on failure
3279  */
3280 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3281 {
3282 	int ret;
3283 
3284 	/*
3285 	 * Set up a region of MMIO space to use for accessing configuration
3286 	 * space.
3287 	 */
3288 	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3289 				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3290 	if (ret)
3291 		return ret;
3292 
3293 	/*
3294 	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3295 	 * resource claims (those which cannot be overlapped) and the ranges
3296 	 * which are valid for the children of this bus, which are intended
3297 	 * to be overlapped by those children.  Set the flag on this claim
3298 	 * meaning that this region can't be overlapped.
3299 	 */
3300 
3301 	hbus->mem_config->flags |= IORESOURCE_BUSY;
3302 
3303 	return 0;
3304 }
3305 
3306 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3307 {
3308 	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3309 }
3310 
3311 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3312 
3313 /**
3314  * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3315  * @hdev:	VMBus's tracking struct for this root PCI bus
3316  *
3317  * Return: 0 on success, -errno on failure
3318  */
3319 static int hv_pci_enter_d0(struct hv_device *hdev)
3320 {
3321 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3322 	struct pci_bus_d0_entry *d0_entry;
3323 	struct hv_pci_compl comp_pkt;
3324 	struct pci_packet *pkt;
3325 	bool retry = true;
3326 	int ret;
3327 
3328 enter_d0_retry:
3329 	/*
3330 	 * Tell the host that the bus is ready to use, and moved into the
3331 	 * powered-on state.  This includes telling the host which region
3332 	 * of memory-mapped I/O space has been chosen for configuration space
3333 	 * access.
3334 	 */
3335 	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3336 	if (!pkt)
3337 		return -ENOMEM;
3338 
3339 	init_completion(&comp_pkt.host_event);
3340 	pkt->completion_func = hv_pci_generic_compl;
3341 	pkt->compl_ctxt = &comp_pkt;
3342 	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
3343 	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3344 	d0_entry->mmio_base = hbus->mem_config->start;
3345 
3346 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3347 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
3348 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3349 	if (!ret)
3350 		ret = wait_for_response(hdev, &comp_pkt.host_event);
3351 
3352 	if (ret)
3353 		goto exit;
3354 
3355 	/*
3356 	 * In certain case (Kdump) the pci device of interest was
3357 	 * not cleanly shut down and resource is still held on host
3358 	 * side, the host could return invalid device status.
3359 	 * We need to explicitly request host to release the resource
3360 	 * and try to enter D0 again.
3361 	 */
3362 	if (comp_pkt.completion_status < 0 && retry) {
3363 		retry = false;
3364 
3365 		dev_err(&hdev->device, "Retrying D0 Entry\n");
3366 
3367 		/*
3368 		 * Hv_pci_bus_exit() calls hv_send_resource_released()
3369 		 * to free up resources of its child devices.
3370 		 * In the kdump kernel we need to set the
3371 		 * wslot_res_allocated to 255 so it scans all child
3372 		 * devices to release resources allocated in the
3373 		 * normal kernel before panic happened.
3374 		 */
3375 		hbus->wslot_res_allocated = 255;
3376 
3377 		ret = hv_pci_bus_exit(hdev, true);
3378 
3379 		if (ret == 0) {
3380 			kfree(pkt);
3381 			goto enter_d0_retry;
3382 		}
3383 		dev_err(&hdev->device,
3384 			"Retrying D0 failed with ret %d\n", ret);
3385 	}
3386 
3387 	if (comp_pkt.completion_status < 0) {
3388 		dev_err(&hdev->device,
3389 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
3390 			comp_pkt.completion_status);
3391 		ret = -EPROTO;
3392 		goto exit;
3393 	}
3394 
3395 	ret = 0;
3396 
3397 exit:
3398 	kfree(pkt);
3399 	return ret;
3400 }
3401 
3402 /**
3403  * hv_pci_query_relations() - Ask host to send list of child
3404  * devices
3405  * @hdev:	VMBus's tracking struct for this root PCI bus
3406  *
3407  * Return: 0 on success, -errno on failure
3408  */
3409 static int hv_pci_query_relations(struct hv_device *hdev)
3410 {
3411 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3412 	struct pci_message message;
3413 	struct completion comp;
3414 	int ret;
3415 
3416 	/* Ask the host to send along the list of child devices */
3417 	init_completion(&comp);
3418 	if (cmpxchg(&hbus->survey_event, NULL, &comp))
3419 		return -ENOTEMPTY;
3420 
3421 	memset(&message, 0, sizeof(message));
3422 	message.type = PCI_QUERY_BUS_RELATIONS;
3423 
3424 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3425 			       0, VM_PKT_DATA_INBAND, 0);
3426 	if (!ret)
3427 		ret = wait_for_response(hdev, &comp);
3428 
3429 	/*
3430 	 * In the case of fast device addition/removal, it's possible that
3431 	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3432 	 * already got a PCI_BUS_RELATIONS* message from the host and the
3433 	 * channel callback already scheduled a work to hbus->wq, which can be
3434 	 * running pci_devices_present_work() -> survey_child_resources() ->
3435 	 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3436 	 * exits and the stack variable 'comp' is no longer valid; as a result,
3437 	 * a hang or a page fault may happen when the complete() calls
3438 	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3439 	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3440 	 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3441 	 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3442 	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3443 	 * channel->rescind = true.
3444 	 */
3445 	flush_workqueue(hbus->wq);
3446 
3447 	return ret;
3448 }
3449 
3450 /**
3451  * hv_send_resources_allocated() - Report local resource choices
3452  * @hdev:	VMBus's tracking struct for this root PCI bus
3453  *
3454  * The host OS is expecting to be sent a request as a message
3455  * which contains all the resources that the device will use.
3456  * The response contains those same resources, "translated"
3457  * which is to say, the values which should be used by the
3458  * hardware, when it delivers an interrupt.  (MMIO resources are
3459  * used in local terms.)  This is nice for Windows, and lines up
3460  * with the FDO/PDO split, which doesn't exist in Linux.  Linux
3461  * is deeply expecting to scan an emulated PCI configuration
3462  * space.  So this message is sent here only to drive the state
3463  * machine on the host forward.
3464  *
3465  * Return: 0 on success, -errno on failure
3466  */
3467 static int hv_send_resources_allocated(struct hv_device *hdev)
3468 {
3469 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3470 	struct pci_resources_assigned *res_assigned;
3471 	struct pci_resources_assigned2 *res_assigned2;
3472 	struct hv_pci_compl comp_pkt;
3473 	struct hv_pci_dev *hpdev;
3474 	struct pci_packet *pkt;
3475 	size_t size_res;
3476 	int wslot;
3477 	int ret;
3478 
3479 	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3480 			? sizeof(*res_assigned) : sizeof(*res_assigned2);
3481 
3482 	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3483 	if (!pkt)
3484 		return -ENOMEM;
3485 
3486 	ret = 0;
3487 
3488 	for (wslot = 0; wslot < 256; wslot++) {
3489 		hpdev = get_pcichild_wslot(hbus, wslot);
3490 		if (!hpdev)
3491 			continue;
3492 
3493 		memset(pkt, 0, sizeof(*pkt) + size_res);
3494 		init_completion(&comp_pkt.host_event);
3495 		pkt->completion_func = hv_pci_generic_compl;
3496 		pkt->compl_ctxt = &comp_pkt;
3497 
3498 		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3499 			res_assigned =
3500 				(struct pci_resources_assigned *)&pkt->message;
3501 			res_assigned->message_type.type =
3502 				PCI_RESOURCES_ASSIGNED;
3503 			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3504 		} else {
3505 			res_assigned2 =
3506 				(struct pci_resources_assigned2 *)&pkt->message;
3507 			res_assigned2->message_type.type =
3508 				PCI_RESOURCES_ASSIGNED2;
3509 			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3510 		}
3511 		put_pcichild(hpdev);
3512 
3513 		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
3514 				size_res, (unsigned long)pkt,
3515 				VM_PKT_DATA_INBAND,
3516 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3517 		if (!ret)
3518 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3519 		if (ret)
3520 			break;
3521 
3522 		if (comp_pkt.completion_status < 0) {
3523 			ret = -EPROTO;
3524 			dev_err(&hdev->device,
3525 				"resource allocated returned 0x%x",
3526 				comp_pkt.completion_status);
3527 			break;
3528 		}
3529 
3530 		hbus->wslot_res_allocated = wslot;
3531 	}
3532 
3533 	kfree(pkt);
3534 	return ret;
3535 }
3536 
3537 /**
3538  * hv_send_resources_released() - Report local resources
3539  * released
3540  * @hdev:	VMBus's tracking struct for this root PCI bus
3541  *
3542  * Return: 0 on success, -errno on failure
3543  */
3544 static int hv_send_resources_released(struct hv_device *hdev)
3545 {
3546 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3547 	struct pci_child_message pkt;
3548 	struct hv_pci_dev *hpdev;
3549 	int wslot;
3550 	int ret;
3551 
3552 	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3553 		hpdev = get_pcichild_wslot(hbus, wslot);
3554 		if (!hpdev)
3555 			continue;
3556 
3557 		memset(&pkt, 0, sizeof(pkt));
3558 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
3559 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
3560 
3561 		put_pcichild(hpdev);
3562 
3563 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3564 				       VM_PKT_DATA_INBAND, 0);
3565 		if (ret)
3566 			return ret;
3567 
3568 		hbus->wslot_res_allocated = wslot - 1;
3569 	}
3570 
3571 	hbus->wslot_res_allocated = -1;
3572 
3573 	return 0;
3574 }
3575 
3576 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3577 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3578 
3579 /*
3580  * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3581  * as invalid for passthrough PCI devices of this driver.
3582  */
3583 #define HVPCI_DOM_INVALID 0
3584 
3585 /**
3586  * hv_get_dom_num() - Get a valid PCI domain number
3587  * Check if the PCI domain number is in use, and return another number if
3588  * it is in use.
3589  *
3590  * @dom: Requested domain number
3591  *
3592  * return: domain number on success, HVPCI_DOM_INVALID on failure
3593  */
3594 static u16 hv_get_dom_num(u16 dom)
3595 {
3596 	unsigned int i;
3597 
3598 	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3599 		return dom;
3600 
3601 	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3602 		if (test_and_set_bit(i, hvpci_dom_map) == 0)
3603 			return i;
3604 	}
3605 
3606 	return HVPCI_DOM_INVALID;
3607 }
3608 
3609 /**
3610  * hv_put_dom_num() - Mark the PCI domain number as free
3611  * @dom: Domain number to be freed
3612  */
3613 static void hv_put_dom_num(u16 dom)
3614 {
3615 	clear_bit(dom, hvpci_dom_map);
3616 }
3617 
3618 /**
3619  * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3620  * @hdev:	VMBus's tracking struct for this root PCI bus
3621  * @dev_id:	Identifies the device itself
3622  *
3623  * Return: 0 on success, -errno on failure
3624  */
3625 static int hv_pci_probe(struct hv_device *hdev,
3626 			const struct hv_vmbus_device_id *dev_id)
3627 {
3628 	struct pci_host_bridge *bridge;
3629 	struct hv_pcibus_device *hbus;
3630 	u16 dom_req, dom;
3631 	char *name;
3632 	int ret;
3633 
3634 	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3635 	if (!bridge)
3636 		return -ENOMEM;
3637 
3638 	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
3639 	if (!hbus)
3640 		return -ENOMEM;
3641 
3642 	hbus->bridge = bridge;
3643 	mutex_init(&hbus->state_lock);
3644 	hbus->state = hv_pcibus_init;
3645 	hbus->wslot_res_allocated = -1;
3646 
3647 	/*
3648 	 * The PCI bus "domain" is what is called "segment" in ACPI and other
3649 	 * specs. Pull it from the instance ID, to get something usually
3650 	 * unique. In rare cases of collision, we will find out another number
3651 	 * not in use.
3652 	 *
3653 	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3654 	 * together with this guest driver can guarantee that (1) The only
3655 	 * domain used by Gen1 VMs for something that looks like a physical
3656 	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3657 	 * (2) There will be no overlap between domains (after fixing possible
3658 	 * collisions) in the same VM.
3659 	 */
3660 	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3661 	dom = hv_get_dom_num(dom_req);
3662 
3663 	if (dom == HVPCI_DOM_INVALID) {
3664 		dev_err(&hdev->device,
3665 			"Unable to use dom# 0x%x or other numbers", dom_req);
3666 		ret = -EINVAL;
3667 		goto free_bus;
3668 	}
3669 
3670 	if (dom != dom_req)
3671 		dev_info(&hdev->device,
3672 			 "PCI dom# 0x%x has collision, using 0x%x",
3673 			 dom_req, dom);
3674 
3675 	hbus->bridge->domain_nr = dom;
3676 #ifdef CONFIG_X86
3677 	hbus->sysdata.domain = dom;
3678 	hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3679 #elif defined(CONFIG_ARM64)
3680 	/*
3681 	 * Set the PCI bus parent to be the corresponding VMbus
3682 	 * device. Then the VMbus device will be assigned as the
3683 	 * ACPI companion in pcibios_root_bridge_prepare() and
3684 	 * pci_dma_configure() will propagate device coherence
3685 	 * information to devices created on the bus.
3686 	 */
3687 	hbus->sysdata.parent = hdev->device.parent;
3688 	hbus->use_calls = false;
3689 #endif
3690 
3691 	hbus->hdev = hdev;
3692 	INIT_LIST_HEAD(&hbus->children);
3693 	INIT_LIST_HEAD(&hbus->dr_list);
3694 	spin_lock_init(&hbus->config_lock);
3695 	spin_lock_init(&hbus->device_list_lock);
3696 	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3697 					   hbus->bridge->domain_nr);
3698 	if (!hbus->wq) {
3699 		ret = -ENOMEM;
3700 		goto free_dom;
3701 	}
3702 
3703 	hdev->channel->next_request_id_callback = vmbus_next_request_id;
3704 	hdev->channel->request_addr_callback = vmbus_request_addr;
3705 	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3706 
3707 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3708 			 hv_pci_onchannelcallback, hbus);
3709 	if (ret)
3710 		goto destroy_wq;
3711 
3712 	hv_set_drvdata(hdev, hbus);
3713 
3714 	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3715 					  ARRAY_SIZE(pci_protocol_versions));
3716 	if (ret)
3717 		goto close;
3718 
3719 	ret = hv_allocate_config_window(hbus);
3720 	if (ret)
3721 		goto close;
3722 
3723 	hbus->cfg_addr = ioremap(hbus->mem_config->start,
3724 				 PCI_CONFIG_MMIO_LENGTH);
3725 	if (!hbus->cfg_addr) {
3726 		dev_err(&hdev->device,
3727 			"Unable to map a virtual address for config space\n");
3728 		ret = -ENOMEM;
3729 		goto free_config;
3730 	}
3731 
3732 	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3733 	if (!name) {
3734 		ret = -ENOMEM;
3735 		goto unmap;
3736 	}
3737 
3738 	hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3739 	kfree(name);
3740 	if (!hbus->fwnode) {
3741 		ret = -ENOMEM;
3742 		goto unmap;
3743 	}
3744 
3745 	ret = hv_pcie_init_irq_domain(hbus);
3746 	if (ret)
3747 		goto free_fwnode;
3748 
3749 	ret = hv_pci_query_relations(hdev);
3750 	if (ret)
3751 		goto free_irq_domain;
3752 
3753 	mutex_lock(&hbus->state_lock);
3754 
3755 	ret = hv_pci_enter_d0(hdev);
3756 	if (ret)
3757 		goto release_state_lock;
3758 
3759 	ret = hv_pci_allocate_bridge_windows(hbus);
3760 	if (ret)
3761 		goto exit_d0;
3762 
3763 	ret = hv_send_resources_allocated(hdev);
3764 	if (ret)
3765 		goto free_windows;
3766 
3767 	prepopulate_bars(hbus);
3768 
3769 	hbus->state = hv_pcibus_probed;
3770 
3771 	ret = create_root_hv_pci_bus(hbus);
3772 	if (ret)
3773 		goto free_windows;
3774 
3775 	mutex_unlock(&hbus->state_lock);
3776 	return 0;
3777 
3778 free_windows:
3779 	hv_pci_free_bridge_windows(hbus);
3780 exit_d0:
3781 	(void) hv_pci_bus_exit(hdev, true);
3782 release_state_lock:
3783 	mutex_unlock(&hbus->state_lock);
3784 free_irq_domain:
3785 	irq_domain_remove(hbus->irq_domain);
3786 free_fwnode:
3787 	irq_domain_free_fwnode(hbus->fwnode);
3788 unmap:
3789 	iounmap(hbus->cfg_addr);
3790 free_config:
3791 	hv_free_config_window(hbus);
3792 close:
3793 	vmbus_close(hdev->channel);
3794 destroy_wq:
3795 	destroy_workqueue(hbus->wq);
3796 free_dom:
3797 	hv_put_dom_num(hbus->bridge->domain_nr);
3798 free_bus:
3799 	kfree(hbus);
3800 	return ret;
3801 }
3802 
3803 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3804 {
3805 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3806 	struct vmbus_channel *chan = hdev->channel;
3807 	struct {
3808 		struct pci_packet teardown_packet;
3809 		u8 buffer[sizeof(struct pci_message)];
3810 	} pkt;
3811 	struct hv_pci_compl comp_pkt;
3812 	struct hv_pci_dev *hpdev, *tmp;
3813 	unsigned long flags;
3814 	u64 trans_id;
3815 	int ret;
3816 
3817 	/*
3818 	 * After the host sends the RESCIND_CHANNEL message, it doesn't
3819 	 * access the per-channel ringbuffer any longer.
3820 	 */
3821 	if (chan->rescind)
3822 		return 0;
3823 
3824 	if (!keep_devs) {
3825 		struct list_head removed;
3826 
3827 		/* Move all present children to the list on stack */
3828 		INIT_LIST_HEAD(&removed);
3829 		spin_lock_irqsave(&hbus->device_list_lock, flags);
3830 		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3831 			list_move_tail(&hpdev->list_entry, &removed);
3832 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3833 
3834 		/* Remove all children in the list */
3835 		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3836 			list_del(&hpdev->list_entry);
3837 			if (hpdev->pci_slot)
3838 				pci_destroy_slot(hpdev->pci_slot);
3839 			/* For the two refs got in new_pcichild_device() */
3840 			put_pcichild(hpdev);
3841 			put_pcichild(hpdev);
3842 		}
3843 	}
3844 
3845 	ret = hv_send_resources_released(hdev);
3846 	if (ret) {
3847 		dev_err(&hdev->device,
3848 			"Couldn't send resources released packet(s)\n");
3849 		return ret;
3850 	}
3851 
3852 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3853 	init_completion(&comp_pkt.host_event);
3854 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3855 	pkt.teardown_packet.compl_ctxt = &comp_pkt;
3856 	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3857 
3858 	ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
3859 				     sizeof(struct pci_message),
3860 				     (unsigned long)&pkt.teardown_packet,
3861 				     &trans_id, VM_PKT_DATA_INBAND,
3862 				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3863 	if (ret)
3864 		return ret;
3865 
3866 	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3867 		/*
3868 		 * The completion packet on the stack becomes invalid after
3869 		 * 'return'; remove the ID from the VMbus requestor if the
3870 		 * identifier is still mapped to/associated with the packet.
3871 		 *
3872 		 * Cf. hv_pci_onchannelcallback().
3873 		 */
3874 		vmbus_request_addr_match(chan, trans_id,
3875 					 (unsigned long)&pkt.teardown_packet);
3876 		return -ETIMEDOUT;
3877 	}
3878 
3879 	return 0;
3880 }
3881 
3882 /**
3883  * hv_pci_remove() - Remove routine for this VMBus channel
3884  * @hdev:	VMBus's tracking struct for this root PCI bus
3885  */
3886 static void hv_pci_remove(struct hv_device *hdev)
3887 {
3888 	struct hv_pcibus_device *hbus;
3889 
3890 	hbus = hv_get_drvdata(hdev);
3891 	if (hbus->state == hv_pcibus_installed) {
3892 		tasklet_disable(&hdev->channel->callback_event);
3893 		hbus->state = hv_pcibus_removing;
3894 		tasklet_enable(&hdev->channel->callback_event);
3895 		destroy_workqueue(hbus->wq);
3896 		hbus->wq = NULL;
3897 		/*
3898 		 * At this point, no work is running or can be scheduled
3899 		 * on hbus-wq. We can't race with hv_pci_devices_present()
3900 		 * or hv_pci_eject_device(), it's safe to proceed.
3901 		 */
3902 
3903 		/* Remove the bus from PCI's point of view. */
3904 		pci_lock_rescan_remove();
3905 		pci_stop_root_bus(hbus->bridge->bus);
3906 		hv_pci_remove_slots(hbus);
3907 		pci_remove_root_bus(hbus->bridge->bus);
3908 		pci_unlock_rescan_remove();
3909 	}
3910 
3911 	hv_pci_bus_exit(hdev, false);
3912 
3913 	vmbus_close(hdev->channel);
3914 
3915 	iounmap(hbus->cfg_addr);
3916 	hv_free_config_window(hbus);
3917 	hv_pci_free_bridge_windows(hbus);
3918 	irq_domain_remove(hbus->irq_domain);
3919 	irq_domain_free_fwnode(hbus->fwnode);
3920 
3921 	hv_put_dom_num(hbus->bridge->domain_nr);
3922 
3923 	kfree(hbus);
3924 }
3925 
3926 static int hv_pci_suspend(struct hv_device *hdev)
3927 {
3928 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3929 	enum hv_pcibus_state old_state;
3930 	int ret;
3931 
3932 	/*
3933 	 * hv_pci_suspend() must make sure there are no pending work items
3934 	 * before calling vmbus_close(), since it runs in a process context
3935 	 * as a callback in dpm_suspend().  When it starts to run, the channel
3936 	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3937 	 * context, can be still running concurrently and scheduling new work
3938 	 * items onto hbus->wq in hv_pci_devices_present() and
3939 	 * hv_pci_eject_device(), and the work item handlers can access the
3940 	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3941 	 * the work item handler pci_devices_present_work() ->
3942 	 * new_pcichild_device() writes to the vmbus channel.
3943 	 *
3944 	 * To eliminate the race, hv_pci_suspend() disables the channel
3945 	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3946 	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3947 	 * it knows that no new work item can be scheduled, and then it flushes
3948 	 * hbus->wq and safely closes the vmbus channel.
3949 	 */
3950 	tasklet_disable(&hdev->channel->callback_event);
3951 
3952 	/* Change the hbus state to prevent new work items. */
3953 	old_state = hbus->state;
3954 	if (hbus->state == hv_pcibus_installed)
3955 		hbus->state = hv_pcibus_removing;
3956 
3957 	tasklet_enable(&hdev->channel->callback_event);
3958 
3959 	if (old_state != hv_pcibus_installed)
3960 		return -EINVAL;
3961 
3962 	flush_workqueue(hbus->wq);
3963 
3964 	ret = hv_pci_bus_exit(hdev, true);
3965 	if (ret)
3966 		return ret;
3967 
3968 	vmbus_close(hdev->channel);
3969 
3970 	return 0;
3971 }
3972 
3973 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3974 {
3975 	struct irq_data *irq_data;
3976 	struct msi_desc *entry;
3977 	int ret = 0;
3978 
3979 	if (!pdev->msi_enabled && !pdev->msix_enabled)
3980 		return 0;
3981 
3982 	msi_lock_descs(&pdev->dev);
3983 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
3984 		irq_data = irq_get_irq_data(entry->irq);
3985 		if (WARN_ON_ONCE(!irq_data)) {
3986 			ret = -EINVAL;
3987 			break;
3988 		}
3989 
3990 		hv_compose_msi_msg(irq_data, &entry->msg);
3991 	}
3992 	msi_unlock_descs(&pdev->dev);
3993 
3994 	return ret;
3995 }
3996 
3997 /*
3998  * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
3999  * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4000  * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4001  * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4002  * Table entries.
4003  */
4004 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4005 {
4006 	pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4007 }
4008 
4009 static int hv_pci_resume(struct hv_device *hdev)
4010 {
4011 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4012 	enum pci_protocol_version_t version[1];
4013 	int ret;
4014 
4015 	hbus->state = hv_pcibus_init;
4016 
4017 	hdev->channel->next_request_id_callback = vmbus_next_request_id;
4018 	hdev->channel->request_addr_callback = vmbus_request_addr;
4019 	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4020 
4021 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4022 			 hv_pci_onchannelcallback, hbus);
4023 	if (ret)
4024 		return ret;
4025 
4026 	/* Only use the version that was in use before hibernation. */
4027 	version[0] = hbus->protocol_version;
4028 	ret = hv_pci_protocol_negotiation(hdev, version, 1);
4029 	if (ret)
4030 		goto out;
4031 
4032 	ret = hv_pci_query_relations(hdev);
4033 	if (ret)
4034 		goto out;
4035 
4036 	mutex_lock(&hbus->state_lock);
4037 
4038 	ret = hv_pci_enter_d0(hdev);
4039 	if (ret)
4040 		goto release_state_lock;
4041 
4042 	ret = hv_send_resources_allocated(hdev);
4043 	if (ret)
4044 		goto release_state_lock;
4045 
4046 	prepopulate_bars(hbus);
4047 
4048 	hv_pci_restore_msi_state(hbus);
4049 
4050 	hbus->state = hv_pcibus_installed;
4051 	mutex_unlock(&hbus->state_lock);
4052 	return 0;
4053 
4054 release_state_lock:
4055 	mutex_unlock(&hbus->state_lock);
4056 out:
4057 	vmbus_close(hdev->channel);
4058 	return ret;
4059 }
4060 
4061 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4062 	/* PCI Pass-through Class ID */
4063 	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4064 	{ HV_PCIE_GUID, },
4065 	{ },
4066 };
4067 
4068 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4069 
4070 static struct hv_driver hv_pci_drv = {
4071 	.name		= "hv_pci",
4072 	.id_table	= hv_pci_id_table,
4073 	.probe		= hv_pci_probe,
4074 	.remove		= hv_pci_remove,
4075 	.suspend	= hv_pci_suspend,
4076 	.resume		= hv_pci_resume,
4077 };
4078 
4079 static void __exit exit_hv_pci_drv(void)
4080 {
4081 	vmbus_driver_unregister(&hv_pci_drv);
4082 
4083 	hvpci_block_ops.read_block = NULL;
4084 	hvpci_block_ops.write_block = NULL;
4085 	hvpci_block_ops.reg_blk_invalidate = NULL;
4086 }
4087 
4088 static int __init init_hv_pci_drv(void)
4089 {
4090 	int ret;
4091 
4092 	if (!hv_is_hyperv_initialized())
4093 		return -ENODEV;
4094 
4095 	ret = hv_pci_irqchip_init();
4096 	if (ret)
4097 		return ret;
4098 
4099 	/* Set the invalid domain number's bit, so it will not be used */
4100 	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4101 
4102 	/* Initialize PCI block r/w interface */
4103 	hvpci_block_ops.read_block = hv_read_config_block;
4104 	hvpci_block_ops.write_block = hv_write_config_block;
4105 	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4106 
4107 	return vmbus_driver_register(&hv_pci_drv);
4108 }
4109 
4110 module_init(init_hv_pci_drv);
4111 module_exit(exit_hv_pci_drv);
4112 
4113 MODULE_DESCRIPTION("Hyper-V PCI");
4114 MODULE_LICENSE("GPL v2");
4115