xref: /linux/drivers/pci/controller/pci-hyperv.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) Microsoft Corporation.
4  *
5  * Author:
6  *   Jake Oshins <jakeo@microsoft.com>
7  *
8  * This driver acts as a paravirtual front-end for PCI Express root buses.
9  * When a PCI Express function (either an entire device or an SR-IOV
10  * Virtual Function) is being passed through to the VM, this driver exposes
11  * a new bus to the guest VM.  This is modeled as a root PCI bus because
12  * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
13  * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14  * until a device as been exposed using this driver.
15  *
16  * Each root PCI bus has its own PCI domain, which is called "Segment" in
17  * the PCI Firmware Specifications.  Thus while each device passed through
18  * to the VM using this front-end will appear at "device 0", the domain will
19  * be unique.  Typically, each bus will have one PCI function on it, though
20  * this driver does support more than one.
21  *
22  * In order to map the interrupts from the device through to the guest VM,
23  * this driver also implements an IRQ Domain, which handles interrupts (either
24  * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
25  * set up, torn down, or reaffined, this driver communicates with the
26  * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27  * interrupt will be delivered to the correct virtual processor at the right
28  * vector.  This driver does not support level-triggered (line-based)
29  * interrupts, and will report that the Interrupt Line register in the
30  * function's configuration space is zero.
31  *
32  * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33  * facilities.  For instance, the configuration space of a function exposed
34  * by Hyper-V is mapped into a single page of memory space, and the
35  * read and write handlers for config space must be aware of this mechanism.
36  * Similarly, device setup and teardown involves messages sent to and from
37  * the PCI back-end driver in Hyper-V.
38  */
39 
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/irqchip/irq-msi-lib.h>
48 #include <linux/msi.h>
49 #include <linux/hyperv.h>
50 #include <linux/refcount.h>
51 #include <linux/irqdomain.h>
52 #include <linux/acpi.h>
53 #include <linux/sizes.h>
54 #include <linux/of_irq.h>
55 #include <asm/mshyperv.h>
56 
57 /*
58  * Protocol versions. The low word is the minor version, the high word the
59  * major version.
60  */
61 
62 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
63 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
64 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
65 
66 enum pci_protocol_version_t {
67 	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
68 	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
69 	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
70 	PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4),	/* WS2022 */
71 };
72 
73 #define CPU_AFFINITY_ALL	-1ULL
74 
75 /*
76  * Supported protocol versions in the order of probing - highest go
77  * first.
78  */
79 static enum pci_protocol_version_t pci_protocol_versions[] = {
80 	PCI_PROTOCOL_VERSION_1_4,
81 	PCI_PROTOCOL_VERSION_1_3,
82 	PCI_PROTOCOL_VERSION_1_2,
83 	PCI_PROTOCOL_VERSION_1_1,
84 };
85 
86 #define PCI_CONFIG_MMIO_LENGTH	0x2000
87 #define CFG_PAGE_OFFSET 0x1000
88 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
89 
90 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
91 
92 #define STATUS_REVISION_MISMATCH 0xC0000059
93 
94 /* space for 32bit serial number as string */
95 #define SLOT_NAME_SIZE 11
96 
97 /*
98  * Size of requestor for VMbus; the value is based on the observation
99  * that having more than one request outstanding is 'rare', and so 64
100  * should be generous in ensuring that we don't ever run out.
101  */
102 #define HV_PCI_RQSTOR_SIZE 64
103 
104 /*
105  * Message Types
106  */
107 
108 enum pci_message_type {
109 	/*
110 	 * Version 1.1
111 	 */
112 	PCI_MESSAGE_BASE                = 0x42490000,
113 	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
114 	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
115 	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
116 	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
117 	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
118 	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
119 	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
120 	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
121 	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
122 	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
123 	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
124 	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
125 	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
126 	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
127 	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
128 	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
129 	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
130 	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
131 	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
132 	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
133 	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
134 	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
135 	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
136 	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
137 	PCI_RESOURCES_ASSIGNED3         = PCI_MESSAGE_BASE + 0x1A,
138 	PCI_CREATE_INTERRUPT_MESSAGE3   = PCI_MESSAGE_BASE + 0x1B,
139 	PCI_MESSAGE_MAXIMUM
140 };
141 
142 /*
143  * Structures defining the virtual PCI Express protocol.
144  */
145 
146 union pci_version {
147 	struct {
148 		u16 minor_version;
149 		u16 major_version;
150 	} parts;
151 	u32 version;
152 } __packed;
153 
154 /*
155  * Function numbers are 8-bits wide on Express, as interpreted through ARI,
156  * which is all this driver does.  This representation is the one used in
157  * Windows, which is what is expected when sending this back and forth with
158  * the Hyper-V parent partition.
159  */
160 union win_slot_encoding {
161 	struct {
162 		u32	dev:5;
163 		u32	func:3;
164 		u32	reserved:24;
165 	} bits;
166 	u32 slot;
167 } __packed;
168 
169 /*
170  * Pretty much as defined in the PCI Specifications.
171  */
172 struct pci_function_description {
173 	u16	v_id;	/* vendor ID */
174 	u16	d_id;	/* device ID */
175 	u8	rev;
176 	u8	prog_intf;
177 	u8	subclass;
178 	u8	base_class;
179 	u32	subsystem_id;
180 	union win_slot_encoding win_slot;
181 	u32	ser;	/* serial number */
182 } __packed;
183 
184 enum pci_device_description_flags {
185 	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
186 	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
187 };
188 
189 struct pci_function_description2 {
190 	u16	v_id;	/* vendor ID */
191 	u16	d_id;	/* device ID */
192 	u8	rev;
193 	u8	prog_intf;
194 	u8	subclass;
195 	u8	base_class;
196 	u32	subsystem_id;
197 	union	win_slot_encoding win_slot;
198 	u32	ser;	/* serial number */
199 	u32	flags;
200 	u16	virtual_numa_node;
201 	u16	reserved;
202 } __packed;
203 
204 /**
205  * struct hv_msi_desc
206  * @vector:		IDT entry
207  * @delivery_mode:	As defined in Intel's Programmer's
208  *			Reference Manual, Volume 3, Chapter 8.
209  * @vector_count:	Number of contiguous entries in the
210  *			Interrupt Descriptor Table that are
211  *			occupied by this Message-Signaled
212  *			Interrupt. For "MSI", as first defined
213  *			in PCI 2.2, this can be between 1 and
214  *			32. For "MSI-X," as first defined in PCI
215  *			3.0, this must be 1, as each MSI-X table
216  *			entry would have its own descriptor.
217  * @reserved:		Empty space
218  * @cpu_mask:		All the target virtual processors.
219  */
220 struct hv_msi_desc {
221 	u8	vector;
222 	u8	delivery_mode;
223 	u16	vector_count;
224 	u32	reserved;
225 	u64	cpu_mask;
226 } __packed;
227 
228 /**
229  * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
230  * @vector:		IDT entry
231  * @delivery_mode:	As defined in Intel's Programmer's
232  *			Reference Manual, Volume 3, Chapter 8.
233  * @vector_count:	Number of contiguous entries in the
234  *			Interrupt Descriptor Table that are
235  *			occupied by this Message-Signaled
236  *			Interrupt. For "MSI", as first defined
237  *			in PCI 2.2, this can be between 1 and
238  *			32. For "MSI-X," as first defined in PCI
239  *			3.0, this must be 1, as each MSI-X table
240  *			entry would have its own descriptor.
241  * @processor_count:	number of bits enabled in array.
242  * @processor_array:	All the target virtual processors.
243  */
244 struct hv_msi_desc2 {
245 	u8	vector;
246 	u8	delivery_mode;
247 	u16	vector_count;
248 	u16	processor_count;
249 	u16	processor_array[32];
250 } __packed;
251 
252 /*
253  * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
254  *	Everything is the same as in 'hv_msi_desc2' except that the size of the
255  *	'vector' field is larger to support bigger vector values. For ex: LPI
256  *	vectors on ARM.
257  */
258 struct hv_msi_desc3 {
259 	u32	vector;
260 	u8	delivery_mode;
261 	u8	reserved;
262 	u16	vector_count;
263 	u16	processor_count;
264 	u16	processor_array[32];
265 } __packed;
266 
267 /**
268  * struct tran_int_desc
269  * @reserved:		unused, padding
270  * @vector_count:	same as in hv_msi_desc
271  * @data:		This is the "data payload" value that is
272  *			written by the device when it generates
273  *			a message-signaled interrupt, either MSI
274  *			or MSI-X.
275  * @address:		This is the address to which the data
276  *			payload is written on interrupt
277  *			generation.
278  */
279 struct tran_int_desc {
280 	u16	reserved;
281 	u16	vector_count;
282 	u32	data;
283 	u64	address;
284 } __packed;
285 
286 /*
287  * A generic message format for virtual PCI.
288  * Specific message formats are defined later in the file.
289  */
290 
291 struct pci_message {
292 	u32 type;
293 } __packed;
294 
295 struct pci_child_message {
296 	struct pci_message message_type;
297 	union win_slot_encoding wslot;
298 } __packed;
299 
300 struct pci_incoming_message {
301 	struct vmpacket_descriptor hdr;
302 	struct pci_message message_type;
303 } __packed;
304 
305 struct pci_response {
306 	struct vmpacket_descriptor hdr;
307 	s32 status;			/* negative values are failures */
308 } __packed;
309 
310 struct pci_packet {
311 	void (*completion_func)(void *context, struct pci_response *resp,
312 				int resp_packet_size);
313 	void *compl_ctxt;
314 };
315 
316 /*
317  * Specific message types supporting the PCI protocol.
318  */
319 
320 /*
321  * Version negotiation message. Sent from the guest to the host.
322  * The guest is free to try different versions until the host
323  * accepts the version.
324  *
325  * pci_version: The protocol version requested.
326  * is_last_attempt: If TRUE, this is the last version guest will request.
327  * reservedz: Reserved field, set to zero.
328  */
329 
330 struct pci_version_request {
331 	struct pci_message message_type;
332 	u32 protocol_version;
333 } __packed;
334 
335 /*
336  * Bus D0 Entry.  This is sent from the guest to the host when the virtual
337  * bus (PCI Express port) is ready for action.
338  */
339 
340 struct pci_bus_d0_entry {
341 	struct pci_message message_type;
342 	u32 reserved;
343 	u64 mmio_base;
344 } __packed;
345 
346 struct pci_bus_relations {
347 	struct pci_incoming_message incoming;
348 	u32 device_count;
349 	struct pci_function_description func[];
350 } __packed;
351 
352 struct pci_bus_relations2 {
353 	struct pci_incoming_message incoming;
354 	u32 device_count;
355 	struct pci_function_description2 func[];
356 } __packed;
357 
358 struct pci_q_res_req_response {
359 	struct vmpacket_descriptor hdr;
360 	s32 status;			/* negative values are failures */
361 	u32 probed_bar[PCI_STD_NUM_BARS];
362 } __packed;
363 
364 struct pci_set_power {
365 	struct pci_message message_type;
366 	union win_slot_encoding wslot;
367 	u32 power_state;		/* In Windows terms */
368 	u32 reserved;
369 } __packed;
370 
371 struct pci_set_power_response {
372 	struct vmpacket_descriptor hdr;
373 	s32 status;			/* negative values are failures */
374 	union win_slot_encoding wslot;
375 	u32 resultant_state;		/* In Windows terms */
376 	u32 reserved;
377 } __packed;
378 
379 struct pci_resources_assigned {
380 	struct pci_message message_type;
381 	union win_slot_encoding wslot;
382 	u8 memory_range[0x14][6];	/* not used here */
383 	u32 msi_descriptors;
384 	u32 reserved[4];
385 } __packed;
386 
387 struct pci_resources_assigned2 {
388 	struct pci_message message_type;
389 	union win_slot_encoding wslot;
390 	u8 memory_range[0x14][6];	/* not used here */
391 	u32 msi_descriptor_count;
392 	u8 reserved[70];
393 } __packed;
394 
395 struct pci_create_interrupt {
396 	struct pci_message message_type;
397 	union win_slot_encoding wslot;
398 	struct hv_msi_desc int_desc;
399 } __packed;
400 
401 struct pci_create_int_response {
402 	struct pci_response response;
403 	u32 reserved;
404 	struct tran_int_desc int_desc;
405 } __packed;
406 
407 struct pci_create_interrupt2 {
408 	struct pci_message message_type;
409 	union win_slot_encoding wslot;
410 	struct hv_msi_desc2 int_desc;
411 } __packed;
412 
413 struct pci_create_interrupt3 {
414 	struct pci_message message_type;
415 	union win_slot_encoding wslot;
416 	struct hv_msi_desc3 int_desc;
417 } __packed;
418 
419 struct pci_delete_interrupt {
420 	struct pci_message message_type;
421 	union win_slot_encoding wslot;
422 	struct tran_int_desc int_desc;
423 } __packed;
424 
425 /*
426  * Note: the VM must pass a valid block id, wslot and bytes_requested.
427  */
428 struct pci_read_block {
429 	struct pci_message message_type;
430 	u32 block_id;
431 	union win_slot_encoding wslot;
432 	u32 bytes_requested;
433 } __packed;
434 
435 struct pci_read_block_response {
436 	struct vmpacket_descriptor hdr;
437 	u32 status;
438 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
439 } __packed;
440 
441 /*
442  * Note: the VM must pass a valid block id, wslot and byte_count.
443  */
444 struct pci_write_block {
445 	struct pci_message message_type;
446 	u32 block_id;
447 	union win_slot_encoding wslot;
448 	u32 byte_count;
449 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
450 } __packed;
451 
452 struct pci_dev_inval_block {
453 	struct pci_incoming_message incoming;
454 	union win_slot_encoding wslot;
455 	u64 block_mask;
456 } __packed;
457 
458 struct pci_dev_incoming {
459 	struct pci_incoming_message incoming;
460 	union win_slot_encoding wslot;
461 } __packed;
462 
463 struct pci_eject_response {
464 	struct pci_message message_type;
465 	union win_slot_encoding wslot;
466 	u32 status;
467 } __packed;
468 
469 static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
470 
471 /*
472  * Driver specific state.
473  */
474 
475 enum hv_pcibus_state {
476 	hv_pcibus_init = 0,
477 	hv_pcibus_probed,
478 	hv_pcibus_installed,
479 	hv_pcibus_removing,
480 	hv_pcibus_maximum
481 };
482 
483 struct hv_pcibus_device {
484 #ifdef CONFIG_X86
485 	struct pci_sysdata sysdata;
486 #elif defined(CONFIG_ARM64)
487 	struct pci_config_window sysdata;
488 #endif
489 	struct pci_host_bridge *bridge;
490 	struct fwnode_handle *fwnode;
491 	/* Protocol version negotiated with the host */
492 	enum pci_protocol_version_t protocol_version;
493 
494 	struct mutex state_lock;
495 	enum hv_pcibus_state state;
496 
497 	struct hv_device *hdev;
498 	resource_size_t low_mmio_space;
499 	resource_size_t high_mmio_space;
500 	struct resource *mem_config;
501 	struct resource *low_mmio_res;
502 	struct resource *high_mmio_res;
503 	struct completion *survey_event;
504 	struct pci_bus *pci_bus;
505 	spinlock_t config_lock;	/* Avoid two threads writing index page */
506 	spinlock_t device_list_lock;	/* Protect lists below */
507 	void __iomem *cfg_addr;
508 
509 	struct list_head children;
510 	struct list_head dr_list;
511 
512 	struct irq_domain *irq_domain;
513 
514 	struct workqueue_struct *wq;
515 
516 	/* Highest slot of child device with resources allocated */
517 	int wslot_res_allocated;
518 	bool use_calls; /* Use hypercalls to access mmio cfg space */
519 };
520 
521 /*
522  * Tracks "Device Relations" messages from the host, which must be both
523  * processed in order and deferred so that they don't run in the context
524  * of the incoming packet callback.
525  */
526 struct hv_dr_work {
527 	struct work_struct wrk;
528 	struct hv_pcibus_device *bus;
529 };
530 
531 struct hv_pcidev_description {
532 	u16	v_id;	/* vendor ID */
533 	u16	d_id;	/* device ID */
534 	u8	rev;
535 	u8	prog_intf;
536 	u8	subclass;
537 	u8	base_class;
538 	u32	subsystem_id;
539 	union	win_slot_encoding win_slot;
540 	u32	ser;	/* serial number */
541 	u32	flags;
542 	u16	virtual_numa_node;
543 };
544 
545 struct hv_dr_state {
546 	struct list_head list_entry;
547 	u32 device_count;
548 	struct hv_pcidev_description func[] __counted_by(device_count);
549 };
550 
551 struct hv_pci_dev {
552 	/* List protected by pci_rescan_remove_lock */
553 	struct list_head list_entry;
554 	refcount_t refs;
555 	struct pci_slot *pci_slot;
556 	struct hv_pcidev_description desc;
557 	bool reported_missing;
558 	struct hv_pcibus_device *hbus;
559 	struct work_struct wrk;
560 
561 	void (*block_invalidate)(void *context, u64 block_mask);
562 	void *invalidate_context;
563 
564 	/*
565 	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
566 	 * read it back, for each of the BAR offsets within config space.
567 	 */
568 	u32 probed_bar[PCI_STD_NUM_BARS];
569 };
570 
571 struct hv_pci_compl {
572 	struct completion host_event;
573 	s32 completion_status;
574 };
575 
576 static void hv_pci_onchannelcallback(void *context);
577 
578 #ifdef CONFIG_X86
579 #define DELIVERY_MODE		APIC_DELIVERY_MODE_FIXED
580 #define HV_MSI_CHIP_FLAGS	MSI_CHIP_FLAG_SET_ACK
581 
hv_pci_irqchip_init(void)582 static int hv_pci_irqchip_init(void)
583 {
584 	return 0;
585 }
586 
hv_pci_get_root_domain(void)587 static struct irq_domain *hv_pci_get_root_domain(void)
588 {
589 	return x86_vector_domain;
590 }
591 
hv_msi_get_int_vector(struct irq_data * data)592 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
593 {
594 	struct irq_cfg *cfg = irqd_cfg(data);
595 
596 	return cfg->vector;
597 }
598 
599 #define hv_msi_prepare		pci_msi_prepare
600 
601 /**
602  * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
603  * affinity.
604  * @data:	Describes the IRQ
605  *
606  * Build new a destination for the MSI and make a hypercall to
607  * update the Interrupt Redirection Table. "Device Logical ID"
608  * is built out of this PCI bus's instance GUID and the function
609  * number of the device.
610  */
hv_irq_retarget_interrupt(struct irq_data * data)611 static void hv_irq_retarget_interrupt(struct irq_data *data)
612 {
613 	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
614 	struct hv_retarget_device_interrupt *params;
615 	struct tran_int_desc *int_desc;
616 	struct hv_pcibus_device *hbus;
617 	const struct cpumask *dest;
618 	cpumask_var_t tmp;
619 	struct pci_bus *pbus;
620 	struct pci_dev *pdev;
621 	unsigned long flags;
622 	u32 var_size = 0;
623 	int cpu, nr_bank;
624 	u64 res;
625 
626 	dest = irq_data_get_effective_affinity_mask(data);
627 	pdev = msi_desc_to_pci_dev(msi_desc);
628 	pbus = pdev->bus;
629 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
630 	int_desc = data->chip_data;
631 	if (!int_desc) {
632 		dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
633 			 __func__, data->irq);
634 		return;
635 	}
636 
637 	local_irq_save(flags);
638 
639 	params = *this_cpu_ptr(hyperv_pcpu_input_arg);
640 	memset(params, 0, sizeof(*params));
641 	params->partition_id = HV_PARTITION_ID_SELF;
642 	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
643 	params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
644 	params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
645 	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
646 			   (hbus->hdev->dev_instance.b[4] << 16) |
647 			   (hbus->hdev->dev_instance.b[7] << 8) |
648 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
649 			   PCI_FUNC(pdev->devfn);
650 	params->int_target.vector = hv_msi_get_int_vector(data);
651 
652 	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
653 		/*
654 		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
655 		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
656 		 * with >64 VP support.
657 		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
658 		 * is not sufficient for this hypercall.
659 		 */
660 		params->int_target.flags |=
661 			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
662 
663 		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
664 			res = 1;
665 			goto out;
666 		}
667 
668 		cpumask_and(tmp, dest, cpu_online_mask);
669 		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
670 		free_cpumask_var(tmp);
671 
672 		if (nr_bank <= 0) {
673 			res = 1;
674 			goto out;
675 		}
676 
677 		/*
678 		 * var-sized hypercall, var-size starts after vp_mask (thus
679 		 * vp_set.format does not count, but vp_set.valid_bank_mask
680 		 * does).
681 		 */
682 		var_size = 1 + nr_bank;
683 	} else {
684 		for_each_cpu_and(cpu, dest, cpu_online_mask) {
685 			params->int_target.vp_mask |=
686 				(1ULL << hv_cpu_number_to_vp_number(cpu));
687 		}
688 	}
689 
690 	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
691 			      params, NULL);
692 
693 out:
694 	local_irq_restore(flags);
695 
696 	/*
697 	 * During hibernation, when a CPU is offlined, the kernel tries
698 	 * to move the interrupt to the remaining CPUs that haven't
699 	 * been offlined yet. In this case, the below hv_do_hypercall()
700 	 * always fails since the vmbus channel has been closed:
701 	 * refer to cpu_disable_common() -> fixup_irqs() ->
702 	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
703 	 *
704 	 * Suppress the error message for hibernation because the failure
705 	 * during hibernation does not matter (at this time all the devices
706 	 * have been frozen). Note: the correct affinity info is still updated
707 	 * into the irqdata data structure in migrate_one_irq() ->
708 	 * irq_do_set_affinity(), so later when the VM resumes,
709 	 * hv_pci_restore_msi_state() is able to correctly restore the
710 	 * interrupt with the correct affinity.
711 	 */
712 	if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
713 		dev_err(&hbus->hdev->device,
714 			"%s() failed: %#llx", __func__, res);
715 }
716 
hv_arch_irq_unmask(struct irq_data * data)717 static void hv_arch_irq_unmask(struct irq_data *data)
718 {
719 	if (hv_root_partition())
720 		/*
721 		 * In case of the nested root partition, the nested hypervisor
722 		 * is taking care of interrupt remapping and thus the
723 		 * MAP_DEVICE_INTERRUPT hypercall is required instead of
724 		 * RETARGET_INTERRUPT.
725 		 */
726 		(void)hv_map_msi_interrupt(data, NULL);
727 	else
728 		hv_irq_retarget_interrupt(data);
729 }
730 #elif defined(CONFIG_ARM64)
731 /*
732  * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
733  * of room at the start to allow for SPIs to be specified through ACPI and
734  * starting with a power of two to satisfy power of 2 multi-MSI requirement.
735  */
736 #define HV_PCI_MSI_SPI_START	64
737 #define HV_PCI_MSI_SPI_NR	(1020 - HV_PCI_MSI_SPI_START)
738 #define DELIVERY_MODE		0
739 #define HV_MSI_CHIP_FLAGS	MSI_CHIP_FLAG_SET_EOI
740 #define hv_msi_prepare		NULL
741 
742 struct hv_pci_chip_data {
743 	DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
744 	struct mutex	map_lock;
745 };
746 
747 /* Hyper-V vPCI MSI GIC IRQ domain */
748 static struct irq_domain *hv_msi_gic_irq_domain;
749 
750 /* Hyper-V PCI MSI IRQ chip */
751 static struct irq_chip hv_arm64_msi_irq_chip = {
752 	.name = "MSI",
753 	.irq_set_affinity = irq_chip_set_affinity_parent,
754 	.irq_eoi = irq_chip_eoi_parent,
755 	.irq_mask = irq_chip_mask_parent,
756 	.irq_unmask = irq_chip_unmask_parent
757 };
758 
hv_msi_get_int_vector(struct irq_data * irqd)759 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
760 {
761 	return irqd->parent_data->hwirq;
762 }
763 
764 /*
765  * @nr_bm_irqs:		Indicates the number of IRQs that were allocated from
766  *			the bitmap.
767  * @nr_dom_irqs:	Indicates the number of IRQs that were allocated from
768  *			the parent domain.
769  */
hv_pci_vec_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_bm_irqs,unsigned int nr_dom_irqs)770 static void hv_pci_vec_irq_free(struct irq_domain *domain,
771 				unsigned int virq,
772 				unsigned int nr_bm_irqs,
773 				unsigned int nr_dom_irqs)
774 {
775 	struct hv_pci_chip_data *chip_data = domain->host_data;
776 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
777 	int first = d->hwirq - HV_PCI_MSI_SPI_START;
778 	int i;
779 
780 	mutex_lock(&chip_data->map_lock);
781 	bitmap_release_region(chip_data->spi_map,
782 			      first,
783 			      get_count_order(nr_bm_irqs));
784 	mutex_unlock(&chip_data->map_lock);
785 	for (i = 0; i < nr_dom_irqs; i++) {
786 		if (i)
787 			d = irq_domain_get_irq_data(domain, virq + i);
788 		irq_domain_reset_irq_data(d);
789 	}
790 
791 	irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
792 }
793 
hv_pci_vec_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)794 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
795 				       unsigned int virq,
796 				       unsigned int nr_irqs)
797 {
798 	hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
799 }
800 
hv_pci_vec_alloc_device_irq(struct irq_domain * domain,unsigned int nr_irqs,irq_hw_number_t * hwirq)801 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
802 				       unsigned int nr_irqs,
803 				       irq_hw_number_t *hwirq)
804 {
805 	struct hv_pci_chip_data *chip_data = domain->host_data;
806 	int index;
807 
808 	/* Find and allocate region from the SPI bitmap */
809 	mutex_lock(&chip_data->map_lock);
810 	index = bitmap_find_free_region(chip_data->spi_map,
811 					HV_PCI_MSI_SPI_NR,
812 					get_count_order(nr_irqs));
813 	mutex_unlock(&chip_data->map_lock);
814 	if (index < 0)
815 		return -ENOSPC;
816 
817 	*hwirq = index + HV_PCI_MSI_SPI_START;
818 
819 	return 0;
820 }
821 
hv_pci_vec_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)822 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
823 					   unsigned int virq,
824 					   irq_hw_number_t hwirq)
825 {
826 	struct irq_fwspec fwspec;
827 	struct irq_data *d;
828 	int ret;
829 
830 	fwspec.fwnode = domain->parent->fwnode;
831 	if (is_of_node(fwspec.fwnode)) {
832 		/* SPI lines for OF translations start at offset 32 */
833 		fwspec.param_count = 3;
834 		fwspec.param[0] = 0;
835 		fwspec.param[1] = hwirq - 32;
836 		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
837 	} else {
838 		fwspec.param_count = 2;
839 		fwspec.param[0] = hwirq;
840 		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
841 	}
842 
843 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
844 	if (ret)
845 		return ret;
846 
847 	/*
848 	 * Since the interrupt specifier is not coming from ACPI or DT, the
849 	 * trigger type will need to be set explicitly. Otherwise, it will be
850 	 * set to whatever is in the GIC configuration.
851 	 */
852 	d = irq_domain_get_irq_data(domain->parent, virq);
853 
854 	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
855 }
856 
hv_pci_vec_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)857 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
858 				       unsigned int virq, unsigned int nr_irqs,
859 				       void *args)
860 {
861 	irq_hw_number_t hwirq;
862 	unsigned int i;
863 	int ret;
864 
865 	ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
866 	if (ret)
867 		return ret;
868 
869 	for (i = 0; i < nr_irqs; i++) {
870 		ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
871 						      hwirq + i);
872 		if (ret) {
873 			hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
874 			return ret;
875 		}
876 
877 		irq_domain_set_hwirq_and_chip(domain, virq + i,
878 					      hwirq + i,
879 					      &hv_arm64_msi_irq_chip,
880 					      domain->host_data);
881 		pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
882 	}
883 
884 	return 0;
885 }
886 
887 /*
888  * Pick the first cpu as the irq affinity that can be temporarily used for
889  * composing MSI from the hypervisor. GIC will eventually set the right
890  * affinity for the irq and the 'unmask' will retarget the interrupt to that
891  * cpu.
892  */
hv_pci_vec_irq_domain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)893 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
894 					  struct irq_data *irqd, bool reserve)
895 {
896 	int cpu = cpumask_first(cpu_present_mask);
897 
898 	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
899 
900 	return 0;
901 }
902 
903 static const struct irq_domain_ops hv_pci_domain_ops = {
904 	.alloc	= hv_pci_vec_irq_domain_alloc,
905 	.free	= hv_pci_vec_irq_domain_free,
906 	.activate = hv_pci_vec_irq_domain_activate,
907 };
908 
909 #ifdef CONFIG_OF
910 
hv_pci_of_irq_domain_parent(void)911 static struct irq_domain *hv_pci_of_irq_domain_parent(void)
912 {
913 	struct device_node *parent;
914 	struct irq_domain *domain;
915 
916 	parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
917 	if (!parent)
918 		return NULL;
919 	domain = irq_find_host(parent);
920 	of_node_put(parent);
921 
922 	return domain;
923 }
924 
925 #endif
926 
927 #ifdef CONFIG_ACPI
928 
hv_pci_acpi_irq_domain_parent(void)929 static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
930 {
931 	acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
932 
933 	gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
934 	if (!gsi_domain_disp_fn)
935 		return NULL;
936 	return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
937 				     DOMAIN_BUS_ANY);
938 }
939 
940 #endif
941 
hv_pci_irqchip_init(void)942 static int hv_pci_irqchip_init(void)
943 {
944 	static struct hv_pci_chip_data *chip_data;
945 	struct fwnode_handle *fn = NULL;
946 	struct irq_domain *irq_domain_parent = NULL;
947 	int ret = -ENOMEM;
948 
949 	chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
950 	if (!chip_data)
951 		return ret;
952 
953 	mutex_init(&chip_data->map_lock);
954 	fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
955 	if (!fn)
956 		goto free_chip;
957 
958 	/*
959 	 * IRQ domain once enabled, should not be removed since there is no
960 	 * way to ensure that all the corresponding devices are also gone and
961 	 * no interrupts will be generated.
962 	 */
963 #ifdef CONFIG_ACPI
964 	if (!acpi_disabled)
965 		irq_domain_parent = hv_pci_acpi_irq_domain_parent();
966 #endif
967 #ifdef CONFIG_OF
968 	if (!irq_domain_parent)
969 		irq_domain_parent = hv_pci_of_irq_domain_parent();
970 #endif
971 	if (!irq_domain_parent) {
972 		WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
973 		ret = -EINVAL;
974 		goto free_chip;
975 	}
976 
977 	hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
978 		HV_PCI_MSI_SPI_NR,
979 		fn, &hv_pci_domain_ops,
980 		chip_data);
981 
982 	if (!hv_msi_gic_irq_domain) {
983 		pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
984 		goto free_chip;
985 	}
986 
987 	return 0;
988 
989 free_chip:
990 	kfree(chip_data);
991 	if (fn)
992 		irq_domain_free_fwnode(fn);
993 
994 	return ret;
995 }
996 
hv_pci_get_root_domain(void)997 static struct irq_domain *hv_pci_get_root_domain(void)
998 {
999 	return hv_msi_gic_irq_domain;
1000 }
1001 
1002 /*
1003  * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
1004  * registers which Hyper-V already supports, so no hypercall needed.
1005  */
hv_arch_irq_unmask(struct irq_data * data)1006 static void hv_arch_irq_unmask(struct irq_data *data) { }
1007 #endif /* CONFIG_ARM64 */
1008 
1009 /**
1010  * hv_pci_generic_compl() - Invoked for a completion packet
1011  * @context:		Set up by the sender of the packet.
1012  * @resp:		The response packet
1013  * @resp_packet_size:	Size in bytes of the packet
1014  *
1015  * This function is used to trigger an event and report status
1016  * for any message for which the completion packet contains a
1017  * status and nothing else.
1018  */
hv_pci_generic_compl(void * context,struct pci_response * resp,int resp_packet_size)1019 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
1020 				 int resp_packet_size)
1021 {
1022 	struct hv_pci_compl *comp_pkt = context;
1023 
1024 	comp_pkt->completion_status = resp->status;
1025 	complete(&comp_pkt->host_event);
1026 }
1027 
1028 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1029 						u32 wslot);
1030 
get_pcichild(struct hv_pci_dev * hpdev)1031 static void get_pcichild(struct hv_pci_dev *hpdev)
1032 {
1033 	refcount_inc(&hpdev->refs);
1034 }
1035 
put_pcichild(struct hv_pci_dev * hpdev)1036 static void put_pcichild(struct hv_pci_dev *hpdev)
1037 {
1038 	if (refcount_dec_and_test(&hpdev->refs))
1039 		kfree(hpdev);
1040 }
1041 
1042 /*
1043  * There is no good way to get notified from vmbus_onoffer_rescind(),
1044  * so let's use polling here, since this is not a hot path.
1045  */
wait_for_response(struct hv_device * hdev,struct completion * comp)1046 static int wait_for_response(struct hv_device *hdev,
1047 			     struct completion *comp)
1048 {
1049 	while (true) {
1050 		if (hdev->channel->rescind) {
1051 			dev_warn_once(&hdev->device, "The device is gone.\n");
1052 			return -ENODEV;
1053 		}
1054 
1055 		if (wait_for_completion_timeout(comp, HZ / 10))
1056 			break;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 /**
1063  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1064  * @devfn:	The Linux representation of PCI slot
1065  *
1066  * Windows uses a slightly different representation of PCI slot.
1067  *
1068  * Return: The Windows representation
1069  */
devfn_to_wslot(int devfn)1070 static u32 devfn_to_wslot(int devfn)
1071 {
1072 	union win_slot_encoding wslot;
1073 
1074 	wslot.slot = 0;
1075 	wslot.bits.dev = PCI_SLOT(devfn);
1076 	wslot.bits.func = PCI_FUNC(devfn);
1077 
1078 	return wslot.slot;
1079 }
1080 
1081 /**
1082  * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1083  * @wslot:	The Windows representation of PCI slot
1084  *
1085  * Windows uses a slightly different representation of PCI slot.
1086  *
1087  * Return: The Linux representation
1088  */
wslot_to_devfn(u32 wslot)1089 static int wslot_to_devfn(u32 wslot)
1090 {
1091 	union win_slot_encoding slot_no;
1092 
1093 	slot_no.slot = wslot;
1094 	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1095 }
1096 
hv_pci_read_mmio(struct device * dev,phys_addr_t gpa,int size,u32 * val)1097 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1098 {
1099 	struct hv_mmio_read_input *in;
1100 	struct hv_mmio_read_output *out;
1101 	u64 ret;
1102 
1103 	/*
1104 	 * Must be called with interrupts disabled so it is safe
1105 	 * to use the per-cpu input argument page.  Use it for
1106 	 * both input and output.
1107 	 */
1108 	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1109 	out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1110 	in->gpa = gpa;
1111 	in->size = size;
1112 
1113 	ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1114 	if (hv_result_success(ret)) {
1115 		switch (size) {
1116 		case 1:
1117 			*val = *(u8 *)(out->data);
1118 			break;
1119 		case 2:
1120 			*val = *(u16 *)(out->data);
1121 			break;
1122 		default:
1123 			*val = *(u32 *)(out->data);
1124 			break;
1125 		}
1126 	} else
1127 		dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1128 				ret, gpa, size);
1129 }
1130 
hv_pci_write_mmio(struct device * dev,phys_addr_t gpa,int size,u32 val)1131 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1132 {
1133 	struct hv_mmio_write_input *in;
1134 	u64 ret;
1135 
1136 	/*
1137 	 * Must be called with interrupts disabled so it is safe
1138 	 * to use the per-cpu input argument memory.
1139 	 */
1140 	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1141 	in->gpa = gpa;
1142 	in->size = size;
1143 	switch (size) {
1144 	case 1:
1145 		*(u8 *)(in->data) = val;
1146 		break;
1147 	case 2:
1148 		*(u16 *)(in->data) = val;
1149 		break;
1150 	default:
1151 		*(u32 *)(in->data) = val;
1152 		break;
1153 	}
1154 
1155 	ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1156 	if (!hv_result_success(ret))
1157 		dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1158 				ret, gpa, size);
1159 }
1160 
1161 /*
1162  * PCI Configuration Space for these root PCI buses is implemented as a pair
1163  * of pages in memory-mapped I/O space.  Writing to the first page chooses
1164  * the PCI function being written or read.  Once the first page has been
1165  * written to, the following page maps in the entire configuration space of
1166  * the function.
1167  */
1168 
1169 /**
1170  * _hv_pcifront_read_config() - Internal PCI config read
1171  * @hpdev:	The PCI driver's representation of the device
1172  * @where:	Offset within config space
1173  * @size:	Size of the transfer
1174  * @val:	Pointer to the buffer receiving the data
1175  */
_hv_pcifront_read_config(struct hv_pci_dev * hpdev,int where,int size,u32 * val)1176 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1177 				     int size, u32 *val)
1178 {
1179 	struct hv_pcibus_device *hbus = hpdev->hbus;
1180 	struct device *dev = &hbus->hdev->device;
1181 	int offset = where + CFG_PAGE_OFFSET;
1182 	unsigned long flags;
1183 
1184 	/*
1185 	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1186 	 */
1187 	if (where + size <= PCI_COMMAND) {
1188 		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1189 	} else if (where >= PCI_CLASS_REVISION && where + size <=
1190 		   PCI_CACHE_LINE_SIZE) {
1191 		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1192 		       PCI_CLASS_REVISION, size);
1193 	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1194 		   PCI_ROM_ADDRESS) {
1195 		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1196 		       PCI_SUBSYSTEM_VENDOR_ID, size);
1197 	} else if (where >= PCI_ROM_ADDRESS && where + size <=
1198 		   PCI_CAPABILITY_LIST) {
1199 		/* ROM BARs are unimplemented */
1200 		*val = 0;
1201 	} else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
1202 		   (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
1203 		/*
1204 		 * Interrupt Line and Interrupt PIN are hard-wired to zero
1205 		 * because this front-end only supports message-signaled
1206 		 * interrupts.
1207 		 */
1208 		*val = 0;
1209 	} else if (where + size <= CFG_PAGE_SIZE) {
1210 
1211 		spin_lock_irqsave(&hbus->config_lock, flags);
1212 		if (hbus->use_calls) {
1213 			phys_addr_t addr = hbus->mem_config->start + offset;
1214 
1215 			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1216 						hpdev->desc.win_slot.slot);
1217 			hv_pci_read_mmio(dev, addr, size, val);
1218 		} else {
1219 			void __iomem *addr = hbus->cfg_addr + offset;
1220 
1221 			/* Choose the function to be read. (See comment above) */
1222 			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1223 			/* Make sure the function was chosen before reading. */
1224 			mb();
1225 			/* Read from that function's config space. */
1226 			switch (size) {
1227 			case 1:
1228 				*val = readb(addr);
1229 				break;
1230 			case 2:
1231 				*val = readw(addr);
1232 				break;
1233 			default:
1234 				*val = readl(addr);
1235 				break;
1236 			}
1237 			/*
1238 			 * Make sure the read was done before we release the
1239 			 * spinlock allowing consecutive reads/writes.
1240 			 */
1241 			mb();
1242 		}
1243 		spin_unlock_irqrestore(&hbus->config_lock, flags);
1244 	} else {
1245 		dev_err(dev, "Attempt to read beyond a function's config space.\n");
1246 	}
1247 }
1248 
hv_pcifront_get_vendor_id(struct hv_pci_dev * hpdev)1249 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1250 {
1251 	struct hv_pcibus_device *hbus = hpdev->hbus;
1252 	struct device *dev = &hbus->hdev->device;
1253 	u32 val;
1254 	u16 ret;
1255 	unsigned long flags;
1256 
1257 	spin_lock_irqsave(&hbus->config_lock, flags);
1258 
1259 	if (hbus->use_calls) {
1260 		phys_addr_t addr = hbus->mem_config->start +
1261 					 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1262 
1263 		hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1264 					hpdev->desc.win_slot.slot);
1265 		hv_pci_read_mmio(dev, addr, 2, &val);
1266 		ret = val;  /* Truncates to 16 bits */
1267 	} else {
1268 		void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1269 					     PCI_VENDOR_ID;
1270 		/* Choose the function to be read. (See comment above) */
1271 		writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1272 		/* Make sure the function was chosen before we start reading. */
1273 		mb();
1274 		/* Read from that function's config space. */
1275 		ret = readw(addr);
1276 		/*
1277 		 * mb() is not required here, because the
1278 		 * spin_unlock_irqrestore() is a barrier.
1279 		 */
1280 	}
1281 
1282 	spin_unlock_irqrestore(&hbus->config_lock, flags);
1283 
1284 	return ret;
1285 }
1286 
1287 /**
1288  * _hv_pcifront_write_config() - Internal PCI config write
1289  * @hpdev:	The PCI driver's representation of the device
1290  * @where:	Offset within config space
1291  * @size:	Size of the transfer
1292  * @val:	The data being transferred
1293  */
_hv_pcifront_write_config(struct hv_pci_dev * hpdev,int where,int size,u32 val)1294 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1295 				      int size, u32 val)
1296 {
1297 	struct hv_pcibus_device *hbus = hpdev->hbus;
1298 	struct device *dev = &hbus->hdev->device;
1299 	int offset = where + CFG_PAGE_OFFSET;
1300 	unsigned long flags;
1301 
1302 	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1303 	    where + size <= PCI_CAPABILITY_LIST) {
1304 		/* SSIDs and ROM BARs are read-only */
1305 	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1306 		spin_lock_irqsave(&hbus->config_lock, flags);
1307 
1308 		if (hbus->use_calls) {
1309 			phys_addr_t addr = hbus->mem_config->start + offset;
1310 
1311 			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1312 						hpdev->desc.win_slot.slot);
1313 			hv_pci_write_mmio(dev, addr, size, val);
1314 		} else {
1315 			void __iomem *addr = hbus->cfg_addr + offset;
1316 
1317 			/* Choose the function to write. (See comment above) */
1318 			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1319 			/* Make sure the function was chosen before writing. */
1320 			wmb();
1321 			/* Write to that function's config space. */
1322 			switch (size) {
1323 			case 1:
1324 				writeb(val, addr);
1325 				break;
1326 			case 2:
1327 				writew(val, addr);
1328 				break;
1329 			default:
1330 				writel(val, addr);
1331 				break;
1332 			}
1333 			/*
1334 			 * Make sure the write was done before we release the
1335 			 * spinlock allowing consecutive reads/writes.
1336 			 */
1337 			mb();
1338 		}
1339 		spin_unlock_irqrestore(&hbus->config_lock, flags);
1340 	} else {
1341 		dev_err(dev, "Attempt to write beyond a function's config space.\n");
1342 	}
1343 }
1344 
1345 /**
1346  * hv_pcifront_read_config() - Read configuration space
1347  * @bus: PCI Bus structure
1348  * @devfn: Device/function
1349  * @where: Offset from base
1350  * @size: Byte/word/dword
1351  * @val: Value to be read
1352  *
1353  * Return: PCIBIOS_SUCCESSFUL on success
1354  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1355  */
hv_pcifront_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1356 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1357 				   int where, int size, u32 *val)
1358 {
1359 	struct hv_pcibus_device *hbus =
1360 		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1361 	struct hv_pci_dev *hpdev;
1362 
1363 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1364 	if (!hpdev)
1365 		return PCIBIOS_DEVICE_NOT_FOUND;
1366 
1367 	_hv_pcifront_read_config(hpdev, where, size, val);
1368 
1369 	put_pcichild(hpdev);
1370 	return PCIBIOS_SUCCESSFUL;
1371 }
1372 
1373 /**
1374  * hv_pcifront_write_config() - Write configuration space
1375  * @bus: PCI Bus structure
1376  * @devfn: Device/function
1377  * @where: Offset from base
1378  * @size: Byte/word/dword
1379  * @val: Value to be written to device
1380  *
1381  * Return: PCIBIOS_SUCCESSFUL on success
1382  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1383  */
hv_pcifront_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1384 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1385 				    int where, int size, u32 val)
1386 {
1387 	struct hv_pcibus_device *hbus =
1388 	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1389 	struct hv_pci_dev *hpdev;
1390 
1391 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1392 	if (!hpdev)
1393 		return PCIBIOS_DEVICE_NOT_FOUND;
1394 
1395 	_hv_pcifront_write_config(hpdev, where, size, val);
1396 
1397 	put_pcichild(hpdev);
1398 	return PCIBIOS_SUCCESSFUL;
1399 }
1400 
1401 /* PCIe operations */
1402 static struct pci_ops hv_pcifront_ops = {
1403 	.read  = hv_pcifront_read_config,
1404 	.write = hv_pcifront_write_config,
1405 };
1406 
1407 /*
1408  * Paravirtual backchannel
1409  *
1410  * Hyper-V SR-IOV provides a backchannel mechanism in software for
1411  * communication between a VF driver and a PF driver.  These
1412  * "configuration blocks" are similar in concept to PCI configuration space,
1413  * but instead of doing reads and writes in 32-bit chunks through a very slow
1414  * path, packets of up to 128 bytes can be sent or received asynchronously.
1415  *
1416  * Nearly every SR-IOV device contains just such a communications channel in
1417  * hardware, so using this one in software is usually optional.  Using the
1418  * software channel, however, allows driver implementers to leverage software
1419  * tools that fuzz the communications channel looking for vulnerabilities.
1420  *
1421  * The usage model for these packets puts the responsibility for reading or
1422  * writing on the VF driver.  The VF driver sends a read or a write packet,
1423  * indicating which "block" is being referred to by number.
1424  *
1425  * If the PF driver wishes to initiate communication, it can "invalidate" one or
1426  * more of the first 64 blocks.  This invalidation is delivered via a callback
1427  * supplied to the VF driver by this driver.
1428  *
1429  * No protocol is implied, except that supplied by the PF and VF drivers.
1430  */
1431 
1432 struct hv_read_config_compl {
1433 	struct hv_pci_compl comp_pkt;
1434 	void *buf;
1435 	unsigned int len;
1436 	unsigned int bytes_returned;
1437 };
1438 
1439 /**
1440  * hv_pci_read_config_compl() - Invoked when a response packet
1441  * for a read config block operation arrives.
1442  * @context:		Identifies the read config operation
1443  * @resp:		The response packet itself
1444  * @resp_packet_size:	Size in bytes of the response packet
1445  */
hv_pci_read_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1446 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1447 				     int resp_packet_size)
1448 {
1449 	struct hv_read_config_compl *comp = context;
1450 	struct pci_read_block_response *read_resp =
1451 		(struct pci_read_block_response *)resp;
1452 	unsigned int data_len, hdr_len;
1453 
1454 	hdr_len = offsetof(struct pci_read_block_response, bytes);
1455 	if (resp_packet_size < hdr_len) {
1456 		comp->comp_pkt.completion_status = -1;
1457 		goto out;
1458 	}
1459 
1460 	data_len = resp_packet_size - hdr_len;
1461 	if (data_len > 0 && read_resp->status == 0) {
1462 		comp->bytes_returned = min(comp->len, data_len);
1463 		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1464 	} else {
1465 		comp->bytes_returned = 0;
1466 	}
1467 
1468 	comp->comp_pkt.completion_status = read_resp->status;
1469 out:
1470 	complete(&comp->comp_pkt.host_event);
1471 }
1472 
1473 /**
1474  * hv_read_config_block() - Sends a read config block request to
1475  * the back-end driver running in the Hyper-V parent partition.
1476  * @pdev:		The PCI driver's representation for this device.
1477  * @buf:		Buffer into which the config block will be copied.
1478  * @len:		Size in bytes of buf.
1479  * @block_id:		Identifies the config block which has been requested.
1480  * @bytes_returned:	Size which came back from the back-end driver.
1481  *
1482  * Return: 0 on success, -errno on failure
1483  */
hv_read_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id,unsigned int * bytes_returned)1484 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1485 				unsigned int len, unsigned int block_id,
1486 				unsigned int *bytes_returned)
1487 {
1488 	struct hv_pcibus_device *hbus =
1489 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1490 			     sysdata);
1491 	struct {
1492 		struct pci_packet pkt;
1493 		char buf[sizeof(struct pci_read_block)];
1494 	} pkt;
1495 	struct hv_read_config_compl comp_pkt;
1496 	struct pci_read_block *read_blk;
1497 	int ret;
1498 
1499 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1500 		return -EINVAL;
1501 
1502 	init_completion(&comp_pkt.comp_pkt.host_event);
1503 	comp_pkt.buf = buf;
1504 	comp_pkt.len = len;
1505 
1506 	memset(&pkt, 0, sizeof(pkt));
1507 	pkt.pkt.completion_func = hv_pci_read_config_compl;
1508 	pkt.pkt.compl_ctxt = &comp_pkt;
1509 	read_blk = (struct pci_read_block *)pkt.buf;
1510 	read_blk->message_type.type = PCI_READ_BLOCK;
1511 	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1512 	read_blk->block_id = block_id;
1513 	read_blk->bytes_requested = len;
1514 
1515 	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1516 			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
1517 			       VM_PKT_DATA_INBAND,
1518 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1519 	if (ret)
1520 		return ret;
1521 
1522 	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1523 	if (ret)
1524 		return ret;
1525 
1526 	if (comp_pkt.comp_pkt.completion_status != 0 ||
1527 	    comp_pkt.bytes_returned == 0) {
1528 		dev_err(&hbus->hdev->device,
1529 			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
1530 			comp_pkt.comp_pkt.completion_status,
1531 			comp_pkt.bytes_returned);
1532 		return -EIO;
1533 	}
1534 
1535 	*bytes_returned = comp_pkt.bytes_returned;
1536 	return 0;
1537 }
1538 
1539 /**
1540  * hv_pci_write_config_compl() - Invoked when a response packet for a write
1541  * config block operation arrives.
1542  * @context:		Identifies the write config operation
1543  * @resp:		The response packet itself
1544  * @resp_packet_size:	Size in bytes of the response packet
1545  */
hv_pci_write_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1546 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1547 				      int resp_packet_size)
1548 {
1549 	struct hv_pci_compl *comp_pkt = context;
1550 
1551 	comp_pkt->completion_status = resp->status;
1552 	complete(&comp_pkt->host_event);
1553 }
1554 
1555 /**
1556  * hv_write_config_block() - Sends a write config block request to the
1557  * back-end driver running in the Hyper-V parent partition.
1558  * @pdev:		The PCI driver's representation for this device.
1559  * @buf:		Buffer from which the config block will	be copied.
1560  * @len:		Size in bytes of buf.
1561  * @block_id:		Identifies the config block which is being written.
1562  *
1563  * Return: 0 on success, -errno on failure
1564  */
hv_write_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id)1565 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1566 				unsigned int len, unsigned int block_id)
1567 {
1568 	struct hv_pcibus_device *hbus =
1569 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1570 			     sysdata);
1571 	struct {
1572 		struct pci_packet pkt;
1573 		char buf[sizeof(struct pci_write_block)];
1574 		u32 reserved;
1575 	} pkt;
1576 	struct hv_pci_compl comp_pkt;
1577 	struct pci_write_block *write_blk;
1578 	u32 pkt_size;
1579 	int ret;
1580 
1581 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1582 		return -EINVAL;
1583 
1584 	init_completion(&comp_pkt.host_event);
1585 
1586 	memset(&pkt, 0, sizeof(pkt));
1587 	pkt.pkt.completion_func = hv_pci_write_config_compl;
1588 	pkt.pkt.compl_ctxt = &comp_pkt;
1589 	write_blk = (struct pci_write_block *)pkt.buf;
1590 	write_blk->message_type.type = PCI_WRITE_BLOCK;
1591 	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1592 	write_blk->block_id = block_id;
1593 	write_blk->byte_count = len;
1594 	memcpy(write_blk->bytes, buf, len);
1595 	pkt_size = offsetof(struct pci_write_block, bytes) + len;
1596 	/*
1597 	 * This quirk is required on some hosts shipped around 2018, because
1598 	 * these hosts don't check the pkt_size correctly (new hosts have been
1599 	 * fixed since early 2019). The quirk is also safe on very old hosts
1600 	 * and new hosts, because, on them, what really matters is the length
1601 	 * specified in write_blk->byte_count.
1602 	 */
1603 	pkt_size += sizeof(pkt.reserved);
1604 
1605 	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1606 			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1607 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1608 	if (ret)
1609 		return ret;
1610 
1611 	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1612 	if (ret)
1613 		return ret;
1614 
1615 	if (comp_pkt.completion_status != 0) {
1616 		dev_err(&hbus->hdev->device,
1617 			"Write Config Block failed: 0x%x\n",
1618 			comp_pkt.completion_status);
1619 		return -EIO;
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 /**
1626  * hv_register_block_invalidate() - Invoked when a config block invalidation
1627  * arrives from the back-end driver.
1628  * @pdev:		The PCI driver's representation for this device.
1629  * @context:		Identifies the device.
1630  * @block_invalidate:	Identifies all of the blocks being invalidated.
1631  *
1632  * Return: 0 on success, -errno on failure
1633  */
hv_register_block_invalidate(struct pci_dev * pdev,void * context,void (* block_invalidate)(void * context,u64 block_mask))1634 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1635 					void (*block_invalidate)(void *context,
1636 								 u64 block_mask))
1637 {
1638 	struct hv_pcibus_device *hbus =
1639 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1640 			     sysdata);
1641 	struct hv_pci_dev *hpdev;
1642 
1643 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1644 	if (!hpdev)
1645 		return -ENODEV;
1646 
1647 	hpdev->block_invalidate = block_invalidate;
1648 	hpdev->invalidate_context = context;
1649 
1650 	put_pcichild(hpdev);
1651 	return 0;
1652 
1653 }
1654 
1655 /* Interrupt management hooks */
hv_int_desc_free(struct hv_pci_dev * hpdev,struct tran_int_desc * int_desc)1656 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1657 			     struct tran_int_desc *int_desc)
1658 {
1659 	struct pci_delete_interrupt *int_pkt;
1660 	struct {
1661 		struct pci_packet pkt;
1662 		u8 buffer[sizeof(struct pci_delete_interrupt)];
1663 	} ctxt;
1664 
1665 	if (!int_desc->vector_count) {
1666 		kfree(int_desc);
1667 		return;
1668 	}
1669 	memset(&ctxt, 0, sizeof(ctxt));
1670 	int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
1671 	int_pkt->message_type.type =
1672 		PCI_DELETE_INTERRUPT_MESSAGE;
1673 	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1674 	int_pkt->int_desc = *int_desc;
1675 	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1676 			 0, VM_PKT_DATA_INBAND, 0);
1677 	kfree(int_desc);
1678 }
1679 
1680 /**
1681  * hv_msi_free() - Free the MSI.
1682  * @domain:	The interrupt domain pointer
1683  * @irq:	Identifies the IRQ.
1684  *
1685  * The Hyper-V parent partition and hypervisor are tracking the
1686  * messages that are in use, keeping the interrupt redirection
1687  * table up to date.  This callback sends a message that frees
1688  * the IRT entry and related tracking nonsense.
1689  */
hv_msi_free(struct irq_domain * domain,unsigned int irq)1690 static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
1691 {
1692 	struct hv_pcibus_device *hbus;
1693 	struct hv_pci_dev *hpdev;
1694 	struct pci_dev *pdev;
1695 	struct tran_int_desc *int_desc;
1696 	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1697 	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1698 
1699 	pdev = msi_desc_to_pci_dev(msi);
1700 	hbus = domain->host_data;
1701 	int_desc = irq_data_get_irq_chip_data(irq_data);
1702 	if (!int_desc)
1703 		return;
1704 
1705 	irq_data->chip_data = NULL;
1706 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1707 	if (!hpdev) {
1708 		kfree(int_desc);
1709 		return;
1710 	}
1711 
1712 	hv_int_desc_free(hpdev, int_desc);
1713 	put_pcichild(hpdev);
1714 }
1715 
hv_irq_mask(struct irq_data * data)1716 static void hv_irq_mask(struct irq_data *data)
1717 {
1718 	if (data->parent_data->chip->irq_mask)
1719 		irq_chip_mask_parent(data);
1720 }
1721 
hv_irq_unmask(struct irq_data * data)1722 static void hv_irq_unmask(struct irq_data *data)
1723 {
1724 	hv_arch_irq_unmask(data);
1725 
1726 	if (data->parent_data->chip->irq_unmask)
1727 		irq_chip_unmask_parent(data);
1728 }
1729 
1730 struct compose_comp_ctxt {
1731 	struct hv_pci_compl comp_pkt;
1732 	struct tran_int_desc int_desc;
1733 };
1734 
hv_pci_compose_compl(void * context,struct pci_response * resp,int resp_packet_size)1735 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1736 				 int resp_packet_size)
1737 {
1738 	struct compose_comp_ctxt *comp_pkt = context;
1739 	struct pci_create_int_response *int_resp =
1740 		(struct pci_create_int_response *)resp;
1741 
1742 	if (resp_packet_size < sizeof(*int_resp)) {
1743 		comp_pkt->comp_pkt.completion_status = -1;
1744 		goto out;
1745 	}
1746 	comp_pkt->comp_pkt.completion_status = resp->status;
1747 	comp_pkt->int_desc = int_resp->int_desc;
1748 out:
1749 	complete(&comp_pkt->comp_pkt.host_event);
1750 }
1751 
hv_compose_msi_req_v1(struct pci_create_interrupt * int_pkt,u32 slot,u8 vector,u16 vector_count)1752 static u32 hv_compose_msi_req_v1(
1753 	struct pci_create_interrupt *int_pkt,
1754 	u32 slot, u8 vector, u16 vector_count)
1755 {
1756 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1757 	int_pkt->wslot.slot = slot;
1758 	int_pkt->int_desc.vector = vector;
1759 	int_pkt->int_desc.vector_count = vector_count;
1760 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1761 
1762 	/*
1763 	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1764 	 * hv_irq_unmask().
1765 	 */
1766 	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1767 
1768 	return sizeof(*int_pkt);
1769 }
1770 
1771 /*
1772  * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1773  * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1774  * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1775  * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1776  * not irrelevant because Hyper-V chooses the physical CPU to handle the
1777  * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1778  * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1779  * but assigning too many vPCI device interrupts to the same pCPU can cause a
1780  * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1781  * to spread out the pCPUs that it selects.
1782  *
1783  * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1784  * to always return the same dummy vCPU, because a second call to
1785  * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1786  * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1787  * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1788  * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1789  * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1790  * the same pCPU, even though the vCPUs will be spread out by later calls
1791  * to hv_irq_unmask(), but that is the best we can do now.
1792  *
1793  * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1794  * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1795  * enhancement is planned for a future version. With that enhancement, the
1796  * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1797  * device will be spread across multiple pCPUs.
1798  */
1799 
1800 /*
1801  * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1802  * by subsequent retarget in hv_irq_unmask().
1803  */
hv_compose_msi_req_get_cpu(const struct cpumask * affinity)1804 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1805 {
1806 	return cpumask_first_and(affinity, cpu_online_mask);
1807 }
1808 
1809 /*
1810  * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1811  */
hv_compose_multi_msi_req_get_cpu(void)1812 static int hv_compose_multi_msi_req_get_cpu(void)
1813 {
1814 	static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1815 
1816 	/* -1 means starting with CPU 0 */
1817 	static int cpu_next = -1;
1818 
1819 	unsigned long flags;
1820 	int cpu;
1821 
1822 	spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1823 
1824 	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
1825 	cpu = cpu_next;
1826 
1827 	spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1828 
1829 	return cpu;
1830 }
1831 
hv_compose_msi_req_v2(struct pci_create_interrupt2 * int_pkt,int cpu,u32 slot,u8 vector,u16 vector_count)1832 static u32 hv_compose_msi_req_v2(
1833 	struct pci_create_interrupt2 *int_pkt, int cpu,
1834 	u32 slot, u8 vector, u16 vector_count)
1835 {
1836 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1837 	int_pkt->wslot.slot = slot;
1838 	int_pkt->int_desc.vector = vector;
1839 	int_pkt->int_desc.vector_count = vector_count;
1840 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1841 	int_pkt->int_desc.processor_array[0] =
1842 		hv_cpu_number_to_vp_number(cpu);
1843 	int_pkt->int_desc.processor_count = 1;
1844 
1845 	return sizeof(*int_pkt);
1846 }
1847 
hv_compose_msi_req_v3(struct pci_create_interrupt3 * int_pkt,int cpu,u32 slot,u32 vector,u16 vector_count)1848 static u32 hv_compose_msi_req_v3(
1849 	struct pci_create_interrupt3 *int_pkt, int cpu,
1850 	u32 slot, u32 vector, u16 vector_count)
1851 {
1852 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1853 	int_pkt->wslot.slot = slot;
1854 	int_pkt->int_desc.vector = vector;
1855 	int_pkt->int_desc.reserved = 0;
1856 	int_pkt->int_desc.vector_count = vector_count;
1857 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1858 	int_pkt->int_desc.processor_array[0] =
1859 		hv_cpu_number_to_vp_number(cpu);
1860 	int_pkt->int_desc.processor_count = 1;
1861 
1862 	return sizeof(*int_pkt);
1863 }
1864 
1865 /**
1866  * hv_compose_msi_msg() - Supplies a valid MSI address/data
1867  * @data:	Everything about this MSI
1868  * @msg:	Buffer that is filled in by this function
1869  *
1870  * This function unpacks the IRQ looking for target CPU set, IDT
1871  * vector and mode and sends a message to the parent partition
1872  * asking for a mapping for that tuple in this partition.  The
1873  * response supplies a data value and address to which that data
1874  * should be written to trigger that interrupt.
1875  */
hv_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1876 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1877 {
1878 	struct hv_pcibus_device *hbus;
1879 	struct vmbus_channel *channel;
1880 	struct hv_pci_dev *hpdev;
1881 	struct pci_bus *pbus;
1882 	struct pci_dev *pdev;
1883 	const struct cpumask *dest;
1884 	struct compose_comp_ctxt comp;
1885 	struct tran_int_desc *int_desc;
1886 	struct msi_desc *msi_desc;
1887 	/*
1888 	 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1889 	 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1890 	 */
1891 	u16 vector_count;
1892 	u32 vector;
1893 	struct {
1894 		struct pci_packet pci_pkt;
1895 		union {
1896 			struct pci_create_interrupt v1;
1897 			struct pci_create_interrupt2 v2;
1898 			struct pci_create_interrupt3 v3;
1899 		} int_pkts;
1900 	} __packed ctxt;
1901 	bool multi_msi;
1902 	u64 trans_id;
1903 	u32 size;
1904 	int ret;
1905 	int cpu;
1906 
1907 	msi_desc  = irq_data_get_msi_desc(data);
1908 	multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1909 		    msi_desc->nvec_used > 1;
1910 
1911 	/* Reuse the previous allocation */
1912 	if (data->chip_data && multi_msi) {
1913 		int_desc = data->chip_data;
1914 		msg->address_hi = int_desc->address >> 32;
1915 		msg->address_lo = int_desc->address & 0xffffffff;
1916 		msg->data = int_desc->data;
1917 		return;
1918 	}
1919 
1920 	pdev = msi_desc_to_pci_dev(msi_desc);
1921 	dest = irq_data_get_effective_affinity_mask(data);
1922 	pbus = pdev->bus;
1923 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1924 	channel = hbus->hdev->channel;
1925 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1926 	if (!hpdev)
1927 		goto return_null_message;
1928 
1929 	/* Free any previous message that might have already been composed. */
1930 	if (data->chip_data && !multi_msi) {
1931 		int_desc = data->chip_data;
1932 		data->chip_data = NULL;
1933 		hv_int_desc_free(hpdev, int_desc);
1934 	}
1935 
1936 	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1937 	if (!int_desc)
1938 		goto drop_reference;
1939 
1940 	if (multi_msi) {
1941 		/*
1942 		 * If this is not the first MSI of Multi MSI, we already have
1943 		 * a mapping.  Can exit early.
1944 		 */
1945 		if (msi_desc->irq != data->irq) {
1946 			data->chip_data = int_desc;
1947 			int_desc->address = msi_desc->msg.address_lo |
1948 					    (u64)msi_desc->msg.address_hi << 32;
1949 			int_desc->data = msi_desc->msg.data +
1950 					 (data->irq - msi_desc->irq);
1951 			msg->address_hi = msi_desc->msg.address_hi;
1952 			msg->address_lo = msi_desc->msg.address_lo;
1953 			msg->data = int_desc->data;
1954 			put_pcichild(hpdev);
1955 			return;
1956 		}
1957 		/*
1958 		 * The vector we select here is a dummy value.  The correct
1959 		 * value gets sent to the hypervisor in unmask().  This needs
1960 		 * to be aligned with the count, and also not zero.  Multi-msi
1961 		 * is powers of 2 up to 32, so 32 will always work here.
1962 		 */
1963 		vector = 32;
1964 		vector_count = msi_desc->nvec_used;
1965 		cpu = hv_compose_multi_msi_req_get_cpu();
1966 	} else {
1967 		vector = hv_msi_get_int_vector(data);
1968 		vector_count = 1;
1969 		cpu = hv_compose_msi_req_get_cpu(dest);
1970 	}
1971 
1972 	/*
1973 	 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1974 	 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1975 	 * for better readability.
1976 	 */
1977 	memset(&ctxt, 0, sizeof(ctxt));
1978 	init_completion(&comp.comp_pkt.host_event);
1979 	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1980 	ctxt.pci_pkt.compl_ctxt = &comp;
1981 
1982 	switch (hbus->protocol_version) {
1983 	case PCI_PROTOCOL_VERSION_1_1:
1984 		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1985 					hpdev->desc.win_slot.slot,
1986 					(u8)vector,
1987 					vector_count);
1988 		break;
1989 
1990 	case PCI_PROTOCOL_VERSION_1_2:
1991 	case PCI_PROTOCOL_VERSION_1_3:
1992 		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1993 					cpu,
1994 					hpdev->desc.win_slot.slot,
1995 					(u8)vector,
1996 					vector_count);
1997 		break;
1998 
1999 	case PCI_PROTOCOL_VERSION_1_4:
2000 		size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
2001 					cpu,
2002 					hpdev->desc.win_slot.slot,
2003 					vector,
2004 					vector_count);
2005 		break;
2006 
2007 	default:
2008 		/* As we only negotiate protocol versions known to this driver,
2009 		 * this path should never hit. However, this is it not a hot
2010 		 * path so we print a message to aid future updates.
2011 		 */
2012 		dev_err(&hbus->hdev->device,
2013 			"Unexpected vPCI protocol, update driver.");
2014 		goto free_int_desc;
2015 	}
2016 
2017 	ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
2018 				     size, (unsigned long)&ctxt.pci_pkt,
2019 				     &trans_id, VM_PKT_DATA_INBAND,
2020 				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2021 	if (ret) {
2022 		dev_err(&hbus->hdev->device,
2023 			"Sending request for interrupt failed: 0x%x",
2024 			comp.comp_pkt.completion_status);
2025 		goto free_int_desc;
2026 	}
2027 
2028 	/*
2029 	 * Prevents hv_pci_onchannelcallback() from running concurrently
2030 	 * in the tasklet.
2031 	 */
2032 	tasklet_disable_in_atomic(&channel->callback_event);
2033 
2034 	/*
2035 	 * Since this function is called with IRQ locks held, can't
2036 	 * do normal wait for completion; instead poll.
2037 	 */
2038 	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
2039 		unsigned long flags;
2040 
2041 		/* 0xFFFF means an invalid PCI VENDOR ID. */
2042 		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
2043 			dev_err_once(&hbus->hdev->device,
2044 				     "the device has gone\n");
2045 			goto enable_tasklet;
2046 		}
2047 
2048 		/*
2049 		 * Make sure that the ring buffer data structure doesn't get
2050 		 * freed while we dereference the ring buffer pointer.  Test
2051 		 * for the channel's onchannel_callback being NULL within a
2052 		 * sched_lock critical section.  See also the inline comments
2053 		 * in vmbus_reset_channel_cb().
2054 		 */
2055 		spin_lock_irqsave(&channel->sched_lock, flags);
2056 		if (unlikely(channel->onchannel_callback == NULL)) {
2057 			spin_unlock_irqrestore(&channel->sched_lock, flags);
2058 			goto enable_tasklet;
2059 		}
2060 		hv_pci_onchannelcallback(hbus);
2061 		spin_unlock_irqrestore(&channel->sched_lock, flags);
2062 
2063 		udelay(100);
2064 	}
2065 
2066 	tasklet_enable(&channel->callback_event);
2067 
2068 	if (comp.comp_pkt.completion_status < 0) {
2069 		dev_err(&hbus->hdev->device,
2070 			"Request for interrupt failed: 0x%x",
2071 			comp.comp_pkt.completion_status);
2072 		goto free_int_desc;
2073 	}
2074 
2075 	/*
2076 	 * Record the assignment so that this can be unwound later. Using
2077 	 * irq_set_chip_data() here would be appropriate, but the lock it takes
2078 	 * is already held.
2079 	 */
2080 	*int_desc = comp.int_desc;
2081 	data->chip_data = int_desc;
2082 
2083 	/* Pass up the result. */
2084 	msg->address_hi = comp.int_desc.address >> 32;
2085 	msg->address_lo = comp.int_desc.address & 0xffffffff;
2086 	msg->data = comp.int_desc.data;
2087 
2088 	put_pcichild(hpdev);
2089 	return;
2090 
2091 enable_tasklet:
2092 	tasklet_enable(&channel->callback_event);
2093 	/*
2094 	 * The completion packet on the stack becomes invalid after 'return';
2095 	 * remove the ID from the VMbus requestor if the identifier is still
2096 	 * mapped to/associated with the packet.  (The identifier could have
2097 	 * been 're-used', i.e., already removed and (re-)mapped.)
2098 	 *
2099 	 * Cf. hv_pci_onchannelcallback().
2100 	 */
2101 	vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2102 free_int_desc:
2103 	kfree(int_desc);
2104 drop_reference:
2105 	put_pcichild(hpdev);
2106 return_null_message:
2107 	msg->address_hi = 0;
2108 	msg->address_lo = 0;
2109 	msg->data = 0;
2110 }
2111 
hv_pcie_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)2112 static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
2113 				      struct irq_domain *real_parent, struct msi_domain_info *info)
2114 {
2115 	struct irq_chip *chip = info->chip;
2116 
2117 	if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
2118 		return false;
2119 
2120 	info->ops->msi_prepare = hv_msi_prepare;
2121 
2122 	chip->irq_set_affinity = irq_chip_set_affinity_parent;
2123 
2124 	if (IS_ENABLED(CONFIG_X86))
2125 		chip->flags |= IRQCHIP_MOVE_DEFERRED;
2126 
2127 	return true;
2128 }
2129 
2130 #define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS		| \
2131 				    MSI_FLAG_USE_DEF_CHIP_OPS		| \
2132 				    MSI_FLAG_PCI_MSI_MASK_PARENT)
2133 #define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI		| \
2134 				     MSI_FLAG_PCI_MSIX			| \
2135 				     MSI_FLAG_PCI_MSIX_ALLOC_DYN	| \
2136 				     MSI_GENERIC_FLAGS_MASK)
2137 
2138 static const struct msi_parent_ops hv_pcie_msi_parent_ops = {
2139 	.required_flags		= HV_PCIE_MSI_FLAGS_REQUIRED,
2140 	.supported_flags	= HV_PCIE_MSI_FLAGS_SUPPORTED,
2141 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
2142 	.chip_flags		= HV_MSI_CHIP_FLAGS,
2143 	.prefix			= "HV-",
2144 	.init_dev_msi_info	= hv_pcie_init_dev_msi_info,
2145 };
2146 
2147 /* HW Interrupt Chip Descriptor */
2148 static struct irq_chip hv_msi_irq_chip = {
2149 	.name			= "Hyper-V PCIe MSI",
2150 	.irq_compose_msi_msg	= hv_compose_msi_msg,
2151 	.irq_set_affinity	= irq_chip_set_affinity_parent,
2152 	.irq_ack		= irq_chip_ack_parent,
2153 	.irq_eoi		= irq_chip_eoi_parent,
2154 	.irq_mask		= hv_irq_mask,
2155 	.irq_unmask		= hv_irq_unmask,
2156 };
2157 
hv_pcie_domain_alloc(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs,void * arg)2158 static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
2159 			       void *arg)
2160 {
2161 	/*
2162 	 * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg()
2163 	 * should be moved here.
2164 	 */
2165 	int ret;
2166 
2167 	ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
2168 	if (ret < 0)
2169 		return ret;
2170 
2171 	for (int i = 0; i < nr_irqs; i++) {
2172 		irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL);
2173 		if (IS_ENABLED(CONFIG_X86))
2174 			__irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
2175 	}
2176 
2177 	return 0;
2178 }
2179 
hv_pcie_domain_free(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs)2180 static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
2181 {
2182 	for (int i = 0; i < nr_irqs; i++)
2183 		hv_msi_free(d, virq + i);
2184 
2185 	irq_domain_free_irqs_top(d, virq, nr_irqs);
2186 }
2187 
2188 static const struct irq_domain_ops hv_pcie_domain_ops = {
2189 	.alloc	= hv_pcie_domain_alloc,
2190 	.free	= hv_pcie_domain_free,
2191 };
2192 
2193 /**
2194  * hv_pcie_init_irq_domain() - Initialize IRQ domain
2195  * @hbus:	The root PCI bus
2196  *
2197  * This function creates an IRQ domain which will be used for
2198  * interrupts from devices that have been passed through.  These
2199  * devices only support MSI and MSI-X, not line-based interrupts
2200  * or simulations of line-based interrupts through PCIe's
2201  * fabric-layer messages.  Because interrupts are remapped, we
2202  * can support multi-message MSI here.
2203  *
2204  * Return: '0' on success and error value on failure
2205  */
hv_pcie_init_irq_domain(struct hv_pcibus_device * hbus)2206 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2207 {
2208 	struct irq_domain_info info = {
2209 		.fwnode		= hbus->fwnode,
2210 		.ops		= &hv_pcie_domain_ops,
2211 		.host_data	= hbus,
2212 		.parent		= hv_pci_get_root_domain(),
2213 	};
2214 
2215 	hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops);
2216 	if (!hbus->irq_domain) {
2217 		dev_err(&hbus->hdev->device,
2218 			"Failed to build an MSI IRQ domain\n");
2219 		return -ENODEV;
2220 	}
2221 
2222 	dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2223 
2224 	return 0;
2225 }
2226 
2227 /**
2228  * get_bar_size() - Get the address space consumed by a BAR
2229  * @bar_val:	Value that a BAR returned after -1 was written
2230  *              to it.
2231  *
2232  * This function returns the size of the BAR, rounded up to 1
2233  * page.  It has to be rounded up because the hypervisor's page
2234  * table entry that maps the BAR into the VM can't specify an
2235  * offset within a page.  The invariant is that the hypervisor
2236  * must place any BARs of smaller than page length at the
2237  * beginning of a page.
2238  *
2239  * Return:	Size in bytes of the consumed MMIO space.
2240  */
get_bar_size(u64 bar_val)2241 static u64 get_bar_size(u64 bar_val)
2242 {
2243 	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2244 			PAGE_SIZE);
2245 }
2246 
2247 /**
2248  * survey_child_resources() - Total all MMIO requirements
2249  * @hbus:	Root PCI bus, as understood by this driver
2250  */
survey_child_resources(struct hv_pcibus_device * hbus)2251 static void survey_child_resources(struct hv_pcibus_device *hbus)
2252 {
2253 	struct hv_pci_dev *hpdev;
2254 	resource_size_t bar_size = 0;
2255 	unsigned long flags;
2256 	struct completion *event;
2257 	u64 bar_val;
2258 	int i;
2259 
2260 	/* If nobody is waiting on the answer, don't compute it. */
2261 	event = xchg(&hbus->survey_event, NULL);
2262 	if (!event)
2263 		return;
2264 
2265 	/* If the answer has already been computed, go with it. */
2266 	if (hbus->low_mmio_space || hbus->high_mmio_space) {
2267 		complete(event);
2268 		return;
2269 	}
2270 
2271 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2272 
2273 	/*
2274 	 * Due to an interesting quirk of the PCI spec, all memory regions
2275 	 * for a child device are a power of 2 in size and aligned in memory,
2276 	 * so it's sufficient to just add them up without tracking alignment.
2277 	 */
2278 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2279 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2280 			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2281 				dev_err(&hbus->hdev->device,
2282 					"There's an I/O BAR in this list!\n");
2283 
2284 			if (hpdev->probed_bar[i] != 0) {
2285 				/*
2286 				 * A probed BAR has all the upper bits set that
2287 				 * can be changed.
2288 				 */
2289 
2290 				bar_val = hpdev->probed_bar[i];
2291 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2292 					bar_val |=
2293 					((u64)hpdev->probed_bar[++i] << 32);
2294 				else
2295 					bar_val |= 0xffffffff00000000ULL;
2296 
2297 				bar_size = get_bar_size(bar_val);
2298 
2299 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2300 					hbus->high_mmio_space += bar_size;
2301 				else
2302 					hbus->low_mmio_space += bar_size;
2303 			}
2304 		}
2305 	}
2306 
2307 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2308 	complete(event);
2309 }
2310 
2311 /**
2312  * prepopulate_bars() - Fill in BARs with defaults
2313  * @hbus:	Root PCI bus, as understood by this driver
2314  *
2315  * The core PCI driver code seems much, much happier if the BARs
2316  * for a device have values upon first scan. So fill them in.
2317  * The algorithm below works down from large sizes to small,
2318  * attempting to pack the assignments optimally. The assumption,
2319  * enforced in other parts of the code, is that the beginning of
2320  * the memory-mapped I/O space will be aligned on the largest
2321  * BAR size.
2322  */
prepopulate_bars(struct hv_pcibus_device * hbus)2323 static void prepopulate_bars(struct hv_pcibus_device *hbus)
2324 {
2325 	resource_size_t high_size = 0;
2326 	resource_size_t low_size = 0;
2327 	resource_size_t high_base = 0;
2328 	resource_size_t low_base = 0;
2329 	resource_size_t bar_size;
2330 	struct hv_pci_dev *hpdev;
2331 	unsigned long flags;
2332 	u64 bar_val;
2333 	u32 command;
2334 	bool high;
2335 	int i;
2336 
2337 	if (hbus->low_mmio_space) {
2338 		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2339 		low_base = hbus->low_mmio_res->start;
2340 	}
2341 
2342 	if (hbus->high_mmio_space) {
2343 		high_size = 1ULL <<
2344 			(63 - __builtin_clzll(hbus->high_mmio_space));
2345 		high_base = hbus->high_mmio_res->start;
2346 	}
2347 
2348 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2349 
2350 	/*
2351 	 * Clear the memory enable bit, in case it's already set. This occurs
2352 	 * in the suspend path of hibernation, where the device is suspended,
2353 	 * resumed and suspended again: see hibernation_snapshot() and
2354 	 * hibernation_platform_enter().
2355 	 *
2356 	 * If the memory enable bit is already set, Hyper-V silently ignores
2357 	 * the below BAR updates, and the related PCI device driver can not
2358 	 * work, because reading from the device register(s) always returns
2359 	 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2360 	 */
2361 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2362 		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2363 		command &= ~PCI_COMMAND_MEMORY;
2364 		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2365 	}
2366 
2367 	/* Pick addresses for the BARs. */
2368 	do {
2369 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2370 			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2371 				bar_val = hpdev->probed_bar[i];
2372 				if (bar_val == 0)
2373 					continue;
2374 				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2375 				if (high) {
2376 					bar_val |=
2377 						((u64)hpdev->probed_bar[i + 1]
2378 						 << 32);
2379 				} else {
2380 					bar_val |= 0xffffffffULL << 32;
2381 				}
2382 				bar_size = get_bar_size(bar_val);
2383 				if (high) {
2384 					if (high_size != bar_size) {
2385 						i++;
2386 						continue;
2387 					}
2388 					_hv_pcifront_write_config(hpdev,
2389 						PCI_BASE_ADDRESS_0 + (4 * i),
2390 						4,
2391 						(u32)(high_base & 0xffffff00));
2392 					i++;
2393 					_hv_pcifront_write_config(hpdev,
2394 						PCI_BASE_ADDRESS_0 + (4 * i),
2395 						4, (u32)(high_base >> 32));
2396 					high_base += bar_size;
2397 				} else {
2398 					if (low_size != bar_size)
2399 						continue;
2400 					_hv_pcifront_write_config(hpdev,
2401 						PCI_BASE_ADDRESS_0 + (4 * i),
2402 						4,
2403 						(u32)(low_base & 0xffffff00));
2404 					low_base += bar_size;
2405 				}
2406 			}
2407 			if (high_size <= 1 && low_size <= 1) {
2408 				/*
2409 				 * No need to set the PCI_COMMAND_MEMORY bit as
2410 				 * the core PCI driver doesn't require the bit
2411 				 * to be pre-set. Actually here we intentionally
2412 				 * keep the bit off so that the PCI BAR probing
2413 				 * in the core PCI driver doesn't cause Hyper-V
2414 				 * to unnecessarily unmap/map the virtual BARs
2415 				 * from/to the physical BARs multiple times.
2416 				 * This reduces the VM boot time significantly
2417 				 * if the BAR sizes are huge.
2418 				 */
2419 				break;
2420 			}
2421 		}
2422 
2423 		high_size >>= 1;
2424 		low_size >>= 1;
2425 	}  while (high_size || low_size);
2426 
2427 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2428 }
2429 
2430 /*
2431  * Assign entries in sysfs pci slot directory.
2432  *
2433  * Note that this function does not need to lock the children list
2434  * because it is called from pci_devices_present_work which
2435  * is serialized with hv_eject_device_work because they are on the
2436  * same ordered workqueue. Therefore hbus->children list will not change
2437  * even when pci_create_slot sleeps.
2438  */
hv_pci_assign_slots(struct hv_pcibus_device * hbus)2439 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2440 {
2441 	struct hv_pci_dev *hpdev;
2442 	char name[SLOT_NAME_SIZE];
2443 	int slot_nr;
2444 
2445 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2446 		if (hpdev->pci_slot)
2447 			continue;
2448 
2449 		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2450 		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2451 		hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2452 					  name, NULL);
2453 		if (IS_ERR(hpdev->pci_slot)) {
2454 			pr_warn("pci_create slot %s failed\n", name);
2455 			hpdev->pci_slot = NULL;
2456 		}
2457 	}
2458 }
2459 
2460 /*
2461  * Remove entries in sysfs pci slot directory.
2462  */
hv_pci_remove_slots(struct hv_pcibus_device * hbus)2463 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2464 {
2465 	struct hv_pci_dev *hpdev;
2466 
2467 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2468 		if (!hpdev->pci_slot)
2469 			continue;
2470 		pci_destroy_slot(hpdev->pci_slot);
2471 		hpdev->pci_slot = NULL;
2472 	}
2473 }
2474 
2475 /*
2476  * Set NUMA node for the devices on the bus
2477  */
hv_pci_assign_numa_node(struct hv_pcibus_device * hbus)2478 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2479 {
2480 	struct pci_dev *dev;
2481 	struct pci_bus *bus = hbus->bridge->bus;
2482 	struct hv_pci_dev *hv_dev;
2483 
2484 	list_for_each_entry(dev, &bus->devices, bus_list) {
2485 		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2486 		if (!hv_dev)
2487 			continue;
2488 
2489 		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2490 		    hv_dev->desc.virtual_numa_node < num_possible_nodes())
2491 			/*
2492 			 * The kernel may boot with some NUMA nodes offline
2493 			 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2494 			 * "numa=off". In those cases, adjust the host provided
2495 			 * NUMA node to a valid NUMA node used by the kernel.
2496 			 */
2497 			set_dev_node(&dev->dev,
2498 				     numa_map_to_online_node(
2499 					     hv_dev->desc.virtual_numa_node));
2500 
2501 		put_pcichild(hv_dev);
2502 	}
2503 }
2504 
2505 /**
2506  * create_root_hv_pci_bus() - Expose a new root PCI bus
2507  * @hbus:	Root PCI bus, as understood by this driver
2508  *
2509  * Return: 0 on success, -errno on failure
2510  */
create_root_hv_pci_bus(struct hv_pcibus_device * hbus)2511 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2512 {
2513 	int error;
2514 	struct pci_host_bridge *bridge = hbus->bridge;
2515 
2516 	bridge->dev.parent = &hbus->hdev->device;
2517 	bridge->sysdata = &hbus->sysdata;
2518 	bridge->ops = &hv_pcifront_ops;
2519 
2520 	error = pci_scan_root_bus_bridge(bridge);
2521 	if (error)
2522 		return error;
2523 
2524 	pci_lock_rescan_remove();
2525 	hv_pci_assign_numa_node(hbus);
2526 	pci_bus_assign_resources(bridge->bus);
2527 	hv_pci_assign_slots(hbus);
2528 	pci_bus_add_devices(bridge->bus);
2529 	pci_unlock_rescan_remove();
2530 	hbus->state = hv_pcibus_installed;
2531 	return 0;
2532 }
2533 
2534 struct q_res_req_compl {
2535 	struct completion host_event;
2536 	struct hv_pci_dev *hpdev;
2537 };
2538 
2539 /**
2540  * q_resource_requirements() - Query Resource Requirements
2541  * @context:		The completion context.
2542  * @resp:		The response that came from the host.
2543  * @resp_packet_size:	The size in bytes of resp.
2544  *
2545  * This function is invoked on completion of a Query Resource
2546  * Requirements packet.
2547  */
q_resource_requirements(void * context,struct pci_response * resp,int resp_packet_size)2548 static void q_resource_requirements(void *context, struct pci_response *resp,
2549 				    int resp_packet_size)
2550 {
2551 	struct q_res_req_compl *completion = context;
2552 	struct pci_q_res_req_response *q_res_req =
2553 		(struct pci_q_res_req_response *)resp;
2554 	s32 status;
2555 	int i;
2556 
2557 	status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2558 	if (status < 0) {
2559 		dev_err(&completion->hpdev->hbus->hdev->device,
2560 			"query resource requirements failed: %x\n",
2561 			status);
2562 	} else {
2563 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2564 			completion->hpdev->probed_bar[i] =
2565 				q_res_req->probed_bar[i];
2566 		}
2567 	}
2568 
2569 	complete(&completion->host_event);
2570 }
2571 
2572 /**
2573  * new_pcichild_device() - Create a new child device
2574  * @hbus:	The internal struct tracking this root PCI bus.
2575  * @desc:	The information supplied so far from the host
2576  *              about the device.
2577  *
2578  * This function creates the tracking structure for a new child
2579  * device and kicks off the process of figuring out what it is.
2580  *
2581  * Return: Pointer to the new tracking struct
2582  */
new_pcichild_device(struct hv_pcibus_device * hbus,struct hv_pcidev_description * desc)2583 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2584 		struct hv_pcidev_description *desc)
2585 {
2586 	struct hv_pci_dev *hpdev;
2587 	struct pci_child_message *res_req;
2588 	struct q_res_req_compl comp_pkt;
2589 	struct {
2590 		struct pci_packet init_packet;
2591 		u8 buffer[sizeof(struct pci_child_message)];
2592 	} pkt;
2593 	unsigned long flags;
2594 	int ret;
2595 
2596 	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2597 	if (!hpdev)
2598 		return NULL;
2599 
2600 	hpdev->hbus = hbus;
2601 
2602 	memset(&pkt, 0, sizeof(pkt));
2603 	init_completion(&comp_pkt.host_event);
2604 	comp_pkt.hpdev = hpdev;
2605 	pkt.init_packet.compl_ctxt = &comp_pkt;
2606 	pkt.init_packet.completion_func = q_resource_requirements;
2607 	res_req = (struct pci_child_message *)pkt.buffer;
2608 	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2609 	res_req->wslot.slot = desc->win_slot.slot;
2610 
2611 	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2612 			       sizeof(struct pci_child_message),
2613 			       (unsigned long)&pkt.init_packet,
2614 			       VM_PKT_DATA_INBAND,
2615 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2616 	if (ret)
2617 		goto error;
2618 
2619 	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2620 		goto error;
2621 
2622 	hpdev->desc = *desc;
2623 	refcount_set(&hpdev->refs, 1);
2624 	get_pcichild(hpdev);
2625 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2626 
2627 	list_add_tail(&hpdev->list_entry, &hbus->children);
2628 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2629 	return hpdev;
2630 
2631 error:
2632 	kfree(hpdev);
2633 	return NULL;
2634 }
2635 
2636 /**
2637  * get_pcichild_wslot() - Find device from slot
2638  * @hbus:	Root PCI bus, as understood by this driver
2639  * @wslot:	Location on the bus
2640  *
2641  * This function looks up a PCI device and returns the internal
2642  * representation of it.  It acquires a reference on it, so that
2643  * the device won't be deleted while somebody is using it.  The
2644  * caller is responsible for calling put_pcichild() to release
2645  * this reference.
2646  *
2647  * Return:	Internal representation of a PCI device
2648  */
get_pcichild_wslot(struct hv_pcibus_device * hbus,u32 wslot)2649 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2650 					     u32 wslot)
2651 {
2652 	unsigned long flags;
2653 	struct hv_pci_dev *iter, *hpdev = NULL;
2654 
2655 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2656 	list_for_each_entry(iter, &hbus->children, list_entry) {
2657 		if (iter->desc.win_slot.slot == wslot) {
2658 			hpdev = iter;
2659 			get_pcichild(hpdev);
2660 			break;
2661 		}
2662 	}
2663 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2664 
2665 	return hpdev;
2666 }
2667 
2668 /**
2669  * pci_devices_present_work() - Handle new list of child devices
2670  * @work:	Work struct embedded in struct hv_dr_work
2671  *
2672  * "Bus Relations" is the Windows term for "children of this
2673  * bus."  The terminology is preserved here for people trying to
2674  * debug the interaction between Hyper-V and Linux.  This
2675  * function is called when the parent partition reports a list
2676  * of functions that should be observed under this PCI Express
2677  * port (bus).
2678  *
2679  * This function updates the list, and must tolerate being
2680  * called multiple times with the same information.  The typical
2681  * number of child devices is one, with very atypical cases
2682  * involving three or four, so the algorithms used here can be
2683  * simple and inefficient.
2684  *
2685  * It must also treat the omission of a previously observed device as
2686  * notification that the device no longer exists.
2687  *
2688  * Note that this function is serialized with hv_eject_device_work(),
2689  * because both are pushed to the ordered workqueue hbus->wq.
2690  */
pci_devices_present_work(struct work_struct * work)2691 static void pci_devices_present_work(struct work_struct *work)
2692 {
2693 	u32 child_no;
2694 	bool found;
2695 	struct hv_pcidev_description *new_desc;
2696 	struct hv_pci_dev *hpdev;
2697 	struct hv_pcibus_device *hbus;
2698 	struct list_head removed;
2699 	struct hv_dr_work *dr_wrk;
2700 	struct hv_dr_state *dr = NULL;
2701 	unsigned long flags;
2702 
2703 	dr_wrk = container_of(work, struct hv_dr_work, wrk);
2704 	hbus = dr_wrk->bus;
2705 	kfree(dr_wrk);
2706 
2707 	INIT_LIST_HEAD(&removed);
2708 
2709 	/* Pull this off the queue and process it if it was the last one. */
2710 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2711 	while (!list_empty(&hbus->dr_list)) {
2712 		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2713 				      list_entry);
2714 		list_del(&dr->list_entry);
2715 
2716 		/* Throw this away if the list still has stuff in it. */
2717 		if (!list_empty(&hbus->dr_list)) {
2718 			kfree(dr);
2719 			continue;
2720 		}
2721 	}
2722 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2723 
2724 	if (!dr)
2725 		return;
2726 
2727 	mutex_lock(&hbus->state_lock);
2728 
2729 	/* First, mark all existing children as reported missing. */
2730 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2731 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2732 		hpdev->reported_missing = true;
2733 	}
2734 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2735 
2736 	/* Next, add back any reported devices. */
2737 	for (child_no = 0; child_no < dr->device_count; child_no++) {
2738 		found = false;
2739 		new_desc = &dr->func[child_no];
2740 
2741 		spin_lock_irqsave(&hbus->device_list_lock, flags);
2742 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2743 			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2744 			    (hpdev->desc.v_id == new_desc->v_id) &&
2745 			    (hpdev->desc.d_id == new_desc->d_id) &&
2746 			    (hpdev->desc.ser == new_desc->ser)) {
2747 				hpdev->reported_missing = false;
2748 				found = true;
2749 			}
2750 		}
2751 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2752 
2753 		if (!found) {
2754 			hpdev = new_pcichild_device(hbus, new_desc);
2755 			if (!hpdev)
2756 				dev_err(&hbus->hdev->device,
2757 					"couldn't record a child device.\n");
2758 		}
2759 	}
2760 
2761 	/* Move missing children to a list on the stack. */
2762 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2763 	do {
2764 		found = false;
2765 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2766 			if (hpdev->reported_missing) {
2767 				found = true;
2768 				put_pcichild(hpdev);
2769 				list_move_tail(&hpdev->list_entry, &removed);
2770 				break;
2771 			}
2772 		}
2773 	} while (found);
2774 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2775 
2776 	/* Delete everything that should no longer exist. */
2777 	while (!list_empty(&removed)) {
2778 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
2779 					 list_entry);
2780 		list_del(&hpdev->list_entry);
2781 
2782 		if (hpdev->pci_slot)
2783 			pci_destroy_slot(hpdev->pci_slot);
2784 
2785 		put_pcichild(hpdev);
2786 	}
2787 
2788 	switch (hbus->state) {
2789 	case hv_pcibus_installed:
2790 		/*
2791 		 * Tell the core to rescan bus
2792 		 * because there may have been changes.
2793 		 */
2794 		pci_lock_rescan_remove();
2795 		pci_scan_child_bus(hbus->bridge->bus);
2796 		hv_pci_assign_numa_node(hbus);
2797 		hv_pci_assign_slots(hbus);
2798 		pci_unlock_rescan_remove();
2799 		break;
2800 
2801 	case hv_pcibus_init:
2802 	case hv_pcibus_probed:
2803 		survey_child_resources(hbus);
2804 		break;
2805 
2806 	default:
2807 		break;
2808 	}
2809 
2810 	mutex_unlock(&hbus->state_lock);
2811 
2812 	kfree(dr);
2813 }
2814 
2815 /**
2816  * hv_pci_start_relations_work() - Queue work to start device discovery
2817  * @hbus:	Root PCI bus, as understood by this driver
2818  * @dr:		The list of children returned from host
2819  *
2820  * Return:  0 on success, -errno on failure
2821  */
hv_pci_start_relations_work(struct hv_pcibus_device * hbus,struct hv_dr_state * dr)2822 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2823 				       struct hv_dr_state *dr)
2824 {
2825 	struct hv_dr_work *dr_wrk;
2826 	unsigned long flags;
2827 	bool pending_dr;
2828 
2829 	if (hbus->state == hv_pcibus_removing) {
2830 		dev_info(&hbus->hdev->device,
2831 			 "PCI VMBus BUS_RELATIONS: ignored\n");
2832 		return -ENOENT;
2833 	}
2834 
2835 	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2836 	if (!dr_wrk)
2837 		return -ENOMEM;
2838 
2839 	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2840 	dr_wrk->bus = hbus;
2841 
2842 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2843 	/*
2844 	 * If pending_dr is true, we have already queued a work,
2845 	 * which will see the new dr. Otherwise, we need to
2846 	 * queue a new work.
2847 	 */
2848 	pending_dr = !list_empty(&hbus->dr_list);
2849 	list_add_tail(&dr->list_entry, &hbus->dr_list);
2850 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2851 
2852 	if (pending_dr)
2853 		kfree(dr_wrk);
2854 	else
2855 		queue_work(hbus->wq, &dr_wrk->wrk);
2856 
2857 	return 0;
2858 }
2859 
2860 /**
2861  * hv_pci_devices_present() - Handle list of new children
2862  * @hbus:      Root PCI bus, as understood by this driver
2863  * @relations: Packet from host listing children
2864  *
2865  * Process a new list of devices on the bus. The list of devices is
2866  * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2867  * whenever a new list of devices for this bus appears.
2868  */
hv_pci_devices_present(struct hv_pcibus_device * hbus,struct pci_bus_relations * relations)2869 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2870 				   struct pci_bus_relations *relations)
2871 {
2872 	struct hv_dr_state *dr;
2873 	int i;
2874 
2875 	dr = kzalloc(struct_size(dr, func, relations->device_count),
2876 		     GFP_NOWAIT);
2877 	if (!dr)
2878 		return;
2879 
2880 	dr->device_count = relations->device_count;
2881 	for (i = 0; i < dr->device_count; i++) {
2882 		dr->func[i].v_id = relations->func[i].v_id;
2883 		dr->func[i].d_id = relations->func[i].d_id;
2884 		dr->func[i].rev = relations->func[i].rev;
2885 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2886 		dr->func[i].subclass = relations->func[i].subclass;
2887 		dr->func[i].base_class = relations->func[i].base_class;
2888 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2889 		dr->func[i].win_slot = relations->func[i].win_slot;
2890 		dr->func[i].ser = relations->func[i].ser;
2891 	}
2892 
2893 	if (hv_pci_start_relations_work(hbus, dr))
2894 		kfree(dr);
2895 }
2896 
2897 /**
2898  * hv_pci_devices_present2() - Handle list of new children
2899  * @hbus:	Root PCI bus, as understood by this driver
2900  * @relations:	Packet from host listing children
2901  *
2902  * This function is the v2 version of hv_pci_devices_present()
2903  */
hv_pci_devices_present2(struct hv_pcibus_device * hbus,struct pci_bus_relations2 * relations)2904 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2905 				    struct pci_bus_relations2 *relations)
2906 {
2907 	struct hv_dr_state *dr;
2908 	int i;
2909 
2910 	dr = kzalloc(struct_size(dr, func, relations->device_count),
2911 		     GFP_NOWAIT);
2912 	if (!dr)
2913 		return;
2914 
2915 	dr->device_count = relations->device_count;
2916 	for (i = 0; i < dr->device_count; i++) {
2917 		dr->func[i].v_id = relations->func[i].v_id;
2918 		dr->func[i].d_id = relations->func[i].d_id;
2919 		dr->func[i].rev = relations->func[i].rev;
2920 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2921 		dr->func[i].subclass = relations->func[i].subclass;
2922 		dr->func[i].base_class = relations->func[i].base_class;
2923 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2924 		dr->func[i].win_slot = relations->func[i].win_slot;
2925 		dr->func[i].ser = relations->func[i].ser;
2926 		dr->func[i].flags = relations->func[i].flags;
2927 		dr->func[i].virtual_numa_node =
2928 			relations->func[i].virtual_numa_node;
2929 	}
2930 
2931 	if (hv_pci_start_relations_work(hbus, dr))
2932 		kfree(dr);
2933 }
2934 
2935 /**
2936  * hv_eject_device_work() - Asynchronously handles ejection
2937  * @work:	Work struct embedded in internal device struct
2938  *
2939  * This function handles ejecting a device.  Windows will
2940  * attempt to gracefully eject a device, waiting 60 seconds to
2941  * hear back from the guest OS that this completed successfully.
2942  * If this timer expires, the device will be forcibly removed.
2943  */
hv_eject_device_work(struct work_struct * work)2944 static void hv_eject_device_work(struct work_struct *work)
2945 {
2946 	struct pci_eject_response *ejct_pkt;
2947 	struct hv_pcibus_device *hbus;
2948 	struct hv_pci_dev *hpdev;
2949 	struct pci_dev *pdev;
2950 	unsigned long flags;
2951 	int wslot;
2952 	struct {
2953 		struct pci_packet pkt;
2954 		u8 buffer[sizeof(struct pci_eject_response)];
2955 	} ctxt;
2956 
2957 	hpdev = container_of(work, struct hv_pci_dev, wrk);
2958 	hbus = hpdev->hbus;
2959 
2960 	mutex_lock(&hbus->state_lock);
2961 
2962 	/*
2963 	 * Ejection can come before or after the PCI bus has been set up, so
2964 	 * attempt to find it and tear down the bus state, if it exists.  This
2965 	 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2966 	 * because hbus->bridge->bus may not exist yet.
2967 	 */
2968 	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2969 	pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2970 	if (pdev) {
2971 		pci_lock_rescan_remove();
2972 		pci_stop_and_remove_bus_device(pdev);
2973 		pci_dev_put(pdev);
2974 		pci_unlock_rescan_remove();
2975 	}
2976 
2977 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2978 	list_del(&hpdev->list_entry);
2979 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2980 
2981 	if (hpdev->pci_slot)
2982 		pci_destroy_slot(hpdev->pci_slot);
2983 
2984 	memset(&ctxt, 0, sizeof(ctxt));
2985 	ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
2986 	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2987 	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2988 	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2989 			 sizeof(*ejct_pkt), 0,
2990 			 VM_PKT_DATA_INBAND, 0);
2991 
2992 	/* For the get_pcichild() in hv_pci_eject_device() */
2993 	put_pcichild(hpdev);
2994 	/* For the two refs got in new_pcichild_device() */
2995 	put_pcichild(hpdev);
2996 	put_pcichild(hpdev);
2997 	/* hpdev has been freed. Do not use it any more. */
2998 
2999 	mutex_unlock(&hbus->state_lock);
3000 }
3001 
3002 /**
3003  * hv_pci_eject_device() - Handles device ejection
3004  * @hpdev:	Internal device tracking struct
3005  *
3006  * This function is invoked when an ejection packet arrives.  It
3007  * just schedules work so that we don't re-enter the packet
3008  * delivery code handling the ejection.
3009  */
hv_pci_eject_device(struct hv_pci_dev * hpdev)3010 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
3011 {
3012 	struct hv_pcibus_device *hbus = hpdev->hbus;
3013 	struct hv_device *hdev = hbus->hdev;
3014 
3015 	if (hbus->state == hv_pcibus_removing) {
3016 		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
3017 		return;
3018 	}
3019 
3020 	get_pcichild(hpdev);
3021 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
3022 	queue_work(hbus->wq, &hpdev->wrk);
3023 }
3024 
3025 /**
3026  * hv_pci_onchannelcallback() - Handles incoming packets
3027  * @context:	Internal bus tracking struct
3028  *
3029  * This function is invoked whenever the host sends a packet to
3030  * this channel (which is private to this root PCI bus).
3031  */
hv_pci_onchannelcallback(void * context)3032 static void hv_pci_onchannelcallback(void *context)
3033 {
3034 	const int packet_size = 0x100;
3035 	int ret;
3036 	struct hv_pcibus_device *hbus = context;
3037 	struct vmbus_channel *chan = hbus->hdev->channel;
3038 	u32 bytes_recvd;
3039 	u64 req_id, req_addr;
3040 	struct vmpacket_descriptor *desc;
3041 	unsigned char *buffer;
3042 	int bufferlen = packet_size;
3043 	struct pci_packet *comp_packet;
3044 	struct pci_response *response;
3045 	struct pci_incoming_message *new_message;
3046 	struct pci_bus_relations *bus_rel;
3047 	struct pci_bus_relations2 *bus_rel2;
3048 	struct pci_dev_inval_block *inval;
3049 	struct pci_dev_incoming *dev_message;
3050 	struct hv_pci_dev *hpdev;
3051 	unsigned long flags;
3052 
3053 	buffer = kmalloc(bufferlen, GFP_ATOMIC);
3054 	if (!buffer)
3055 		return;
3056 
3057 	while (1) {
3058 		ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
3059 					   &bytes_recvd, &req_id);
3060 
3061 		if (ret == -ENOBUFS) {
3062 			kfree(buffer);
3063 			/* Handle large packet */
3064 			bufferlen = bytes_recvd;
3065 			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
3066 			if (!buffer)
3067 				return;
3068 			continue;
3069 		}
3070 
3071 		/* Zero length indicates there are no more packets. */
3072 		if (ret || !bytes_recvd)
3073 			break;
3074 
3075 		/*
3076 		 * All incoming packets must be at least as large as a
3077 		 * response.
3078 		 */
3079 		if (bytes_recvd <= sizeof(struct pci_response))
3080 			continue;
3081 		desc = (struct vmpacket_descriptor *)buffer;
3082 
3083 		switch (desc->type) {
3084 		case VM_PKT_COMP:
3085 
3086 			lock_requestor(chan, flags);
3087 			req_addr = __vmbus_request_addr_match(chan, req_id,
3088 							      VMBUS_RQST_ADDR_ANY);
3089 			if (req_addr == VMBUS_RQST_ERROR) {
3090 				unlock_requestor(chan, flags);
3091 				dev_err(&hbus->hdev->device,
3092 					"Invalid transaction ID %llx\n",
3093 					req_id);
3094 				break;
3095 			}
3096 			comp_packet = (struct pci_packet *)req_addr;
3097 			response = (struct pci_response *)buffer;
3098 			/*
3099 			 * Call ->completion_func() within the critical section to make
3100 			 * sure that the packet pointer is still valid during the call:
3101 			 * here 'valid' means that there's a task still waiting for the
3102 			 * completion, and that the packet data is still on the waiting
3103 			 * task's stack.  Cf. hv_compose_msi_msg().
3104 			 */
3105 			comp_packet->completion_func(comp_packet->compl_ctxt,
3106 						     response,
3107 						     bytes_recvd);
3108 			unlock_requestor(chan, flags);
3109 			break;
3110 
3111 		case VM_PKT_DATA_INBAND:
3112 
3113 			new_message = (struct pci_incoming_message *)buffer;
3114 			switch (new_message->message_type.type) {
3115 			case PCI_BUS_RELATIONS:
3116 
3117 				bus_rel = (struct pci_bus_relations *)buffer;
3118 				if (bytes_recvd < sizeof(*bus_rel) ||
3119 				    bytes_recvd <
3120 					struct_size(bus_rel, func,
3121 						    bus_rel->device_count)) {
3122 					dev_err(&hbus->hdev->device,
3123 						"bus relations too small\n");
3124 					break;
3125 				}
3126 
3127 				hv_pci_devices_present(hbus, bus_rel);
3128 				break;
3129 
3130 			case PCI_BUS_RELATIONS2:
3131 
3132 				bus_rel2 = (struct pci_bus_relations2 *)buffer;
3133 				if (bytes_recvd < sizeof(*bus_rel2) ||
3134 				    bytes_recvd <
3135 					struct_size(bus_rel2, func,
3136 						    bus_rel2->device_count)) {
3137 					dev_err(&hbus->hdev->device,
3138 						"bus relations v2 too small\n");
3139 					break;
3140 				}
3141 
3142 				hv_pci_devices_present2(hbus, bus_rel2);
3143 				break;
3144 
3145 			case PCI_EJECT:
3146 
3147 				dev_message = (struct pci_dev_incoming *)buffer;
3148 				if (bytes_recvd < sizeof(*dev_message)) {
3149 					dev_err(&hbus->hdev->device,
3150 						"eject message too small\n");
3151 					break;
3152 				}
3153 				hpdev = get_pcichild_wslot(hbus,
3154 						      dev_message->wslot.slot);
3155 				if (hpdev) {
3156 					hv_pci_eject_device(hpdev);
3157 					put_pcichild(hpdev);
3158 				}
3159 				break;
3160 
3161 			case PCI_INVALIDATE_BLOCK:
3162 
3163 				inval = (struct pci_dev_inval_block *)buffer;
3164 				if (bytes_recvd < sizeof(*inval)) {
3165 					dev_err(&hbus->hdev->device,
3166 						"invalidate message too small\n");
3167 					break;
3168 				}
3169 				hpdev = get_pcichild_wslot(hbus,
3170 							   inval->wslot.slot);
3171 				if (hpdev) {
3172 					if (hpdev->block_invalidate) {
3173 						hpdev->block_invalidate(
3174 						    hpdev->invalidate_context,
3175 						    inval->block_mask);
3176 					}
3177 					put_pcichild(hpdev);
3178 				}
3179 				break;
3180 
3181 			default:
3182 				dev_warn(&hbus->hdev->device,
3183 					"Unimplemented protocol message %x\n",
3184 					new_message->message_type.type);
3185 				break;
3186 			}
3187 			break;
3188 
3189 		default:
3190 			dev_err(&hbus->hdev->device,
3191 				"unhandled packet type %d, tid %llx len %d\n",
3192 				desc->type, req_id, bytes_recvd);
3193 			break;
3194 		}
3195 	}
3196 
3197 	kfree(buffer);
3198 }
3199 
3200 /**
3201  * hv_pci_protocol_negotiation() - Set up protocol
3202  * @hdev:		VMBus's tracking struct for this root PCI bus.
3203  * @version:		Array of supported channel protocol versions in
3204  *			the order of probing - highest go first.
3205  * @num_version:	Number of elements in the version array.
3206  *
3207  * This driver is intended to support running on Windows 10
3208  * (server) and later versions. It will not run on earlier
3209  * versions, as they assume that many of the operations which
3210  * Linux needs accomplished with a spinlock held were done via
3211  * asynchronous messaging via VMBus.  Windows 10 increases the
3212  * surface area of PCI emulation so that these actions can take
3213  * place by suspending a virtual processor for their duration.
3214  *
3215  * This function negotiates the channel protocol version,
3216  * failing if the host doesn't support the necessary protocol
3217  * level.
3218  */
hv_pci_protocol_negotiation(struct hv_device * hdev,enum pci_protocol_version_t version[],int num_version)3219 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3220 				       enum pci_protocol_version_t version[],
3221 				       int num_version)
3222 {
3223 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3224 	struct pci_version_request *version_req;
3225 	struct hv_pci_compl comp_pkt;
3226 	struct pci_packet *pkt;
3227 	int ret;
3228 	int i;
3229 
3230 	/*
3231 	 * Initiate the handshake with the host and negotiate
3232 	 * a version that the host can support. We start with the
3233 	 * highest version number and go down if the host cannot
3234 	 * support it.
3235 	 */
3236 	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3237 	if (!pkt)
3238 		return -ENOMEM;
3239 
3240 	init_completion(&comp_pkt.host_event);
3241 	pkt->completion_func = hv_pci_generic_compl;
3242 	pkt->compl_ctxt = &comp_pkt;
3243 	version_req = (struct pci_version_request *)(pkt + 1);
3244 	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3245 
3246 	for (i = 0; i < num_version; i++) {
3247 		version_req->protocol_version = version[i];
3248 		ret = vmbus_sendpacket(hdev->channel, version_req,
3249 				sizeof(struct pci_version_request),
3250 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
3251 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3252 		if (!ret)
3253 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3254 
3255 		if (ret) {
3256 			dev_err(&hdev->device,
3257 				"PCI Pass-through VSP failed to request version: %d",
3258 				ret);
3259 			goto exit;
3260 		}
3261 
3262 		if (comp_pkt.completion_status >= 0) {
3263 			hbus->protocol_version = version[i];
3264 			dev_info(&hdev->device,
3265 				"PCI VMBus probing: Using version %#x\n",
3266 				hbus->protocol_version);
3267 			goto exit;
3268 		}
3269 
3270 		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3271 			dev_err(&hdev->device,
3272 				"PCI Pass-through VSP failed version request: %#x",
3273 				comp_pkt.completion_status);
3274 			ret = -EPROTO;
3275 			goto exit;
3276 		}
3277 
3278 		reinit_completion(&comp_pkt.host_event);
3279 	}
3280 
3281 	dev_err(&hdev->device,
3282 		"PCI pass-through VSP failed to find supported version");
3283 	ret = -EPROTO;
3284 
3285 exit:
3286 	kfree(pkt);
3287 	return ret;
3288 }
3289 
3290 /**
3291  * hv_pci_free_bridge_windows() - Release memory regions for the
3292  * bus
3293  * @hbus:	Root PCI bus, as understood by this driver
3294  */
hv_pci_free_bridge_windows(struct hv_pcibus_device * hbus)3295 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3296 {
3297 	/*
3298 	 * Set the resources back to the way they looked when they
3299 	 * were allocated by setting IORESOURCE_BUSY again.
3300 	 */
3301 
3302 	if (hbus->low_mmio_space && hbus->low_mmio_res) {
3303 		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3304 		vmbus_free_mmio(hbus->low_mmio_res->start,
3305 				resource_size(hbus->low_mmio_res));
3306 	}
3307 
3308 	if (hbus->high_mmio_space && hbus->high_mmio_res) {
3309 		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3310 		vmbus_free_mmio(hbus->high_mmio_res->start,
3311 				resource_size(hbus->high_mmio_res));
3312 	}
3313 }
3314 
3315 /**
3316  * hv_pci_allocate_bridge_windows() - Allocate memory regions
3317  * for the bus
3318  * @hbus:	Root PCI bus, as understood by this driver
3319  *
3320  * This function calls vmbus_allocate_mmio(), which is itself a
3321  * bit of a compromise.  Ideally, we might change the pnp layer
3322  * in the kernel such that it comprehends either PCI devices
3323  * which are "grandchildren of ACPI," with some intermediate bus
3324  * node (in this case, VMBus) or change it such that it
3325  * understands VMBus.  The pnp layer, however, has been declared
3326  * deprecated, and not subject to change.
3327  *
3328  * The workaround, implemented here, is to ask VMBus to allocate
3329  * MMIO space for this bus.  VMBus itself knows which ranges are
3330  * appropriate by looking at its own ACPI objects.  Then, after
3331  * these ranges are claimed, they're modified to look like they
3332  * would have looked if the ACPI and pnp code had allocated
3333  * bridge windows.  These descriptors have to exist in this form
3334  * in order to satisfy the code which will get invoked when the
3335  * endpoint PCI function driver calls request_mem_region() or
3336  * request_mem_region_exclusive().
3337  *
3338  * Return: 0 on success, -errno on failure
3339  */
hv_pci_allocate_bridge_windows(struct hv_pcibus_device * hbus)3340 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3341 {
3342 	resource_size_t align;
3343 	int ret;
3344 
3345 	if (hbus->low_mmio_space) {
3346 		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3347 		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3348 					  (u64)(u32)0xffffffff,
3349 					  hbus->low_mmio_space,
3350 					  align, false);
3351 		if (ret) {
3352 			dev_err(&hbus->hdev->device,
3353 				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3354 				hbus->low_mmio_space);
3355 			return ret;
3356 		}
3357 
3358 		/* Modify this resource to become a bridge window. */
3359 		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3360 		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3361 		pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3362 	}
3363 
3364 	if (hbus->high_mmio_space) {
3365 		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3366 		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3367 					  0x100000000, -1,
3368 					  hbus->high_mmio_space, align,
3369 					  false);
3370 		if (ret) {
3371 			dev_err(&hbus->hdev->device,
3372 				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3373 				hbus->high_mmio_space);
3374 			goto release_low_mmio;
3375 		}
3376 
3377 		/* Modify this resource to become a bridge window. */
3378 		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3379 		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3380 		pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3381 	}
3382 
3383 	return 0;
3384 
3385 release_low_mmio:
3386 	if (hbus->low_mmio_res) {
3387 		vmbus_free_mmio(hbus->low_mmio_res->start,
3388 				resource_size(hbus->low_mmio_res));
3389 	}
3390 
3391 	return ret;
3392 }
3393 
3394 /**
3395  * hv_allocate_config_window() - Find MMIO space for PCI Config
3396  * @hbus:	Root PCI bus, as understood by this driver
3397  *
3398  * This function claims memory-mapped I/O space for accessing
3399  * configuration space for the functions on this bus.
3400  *
3401  * Return: 0 on success, -errno on failure
3402  */
hv_allocate_config_window(struct hv_pcibus_device * hbus)3403 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3404 {
3405 	int ret;
3406 
3407 	/*
3408 	 * Set up a region of MMIO space to use for accessing configuration
3409 	 * space.
3410 	 */
3411 	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3412 				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3413 	if (ret)
3414 		return ret;
3415 
3416 	/*
3417 	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3418 	 * resource claims (those which cannot be overlapped) and the ranges
3419 	 * which are valid for the children of this bus, which are intended
3420 	 * to be overlapped by those children.  Set the flag on this claim
3421 	 * meaning that this region can't be overlapped.
3422 	 */
3423 
3424 	hbus->mem_config->flags |= IORESOURCE_BUSY;
3425 
3426 	return 0;
3427 }
3428 
hv_free_config_window(struct hv_pcibus_device * hbus)3429 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3430 {
3431 	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3432 }
3433 
3434 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3435 
3436 /**
3437  * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3438  * @hdev:	VMBus's tracking struct for this root PCI bus
3439  *
3440  * Return: 0 on success, -errno on failure
3441  */
hv_pci_enter_d0(struct hv_device * hdev)3442 static int hv_pci_enter_d0(struct hv_device *hdev)
3443 {
3444 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3445 	struct pci_bus_d0_entry *d0_entry;
3446 	struct hv_pci_compl comp_pkt;
3447 	struct pci_packet *pkt;
3448 	bool retry = true;
3449 	int ret;
3450 
3451 enter_d0_retry:
3452 	/*
3453 	 * Tell the host that the bus is ready to use, and moved into the
3454 	 * powered-on state.  This includes telling the host which region
3455 	 * of memory-mapped I/O space has been chosen for configuration space
3456 	 * access.
3457 	 */
3458 	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3459 	if (!pkt)
3460 		return -ENOMEM;
3461 
3462 	init_completion(&comp_pkt.host_event);
3463 	pkt->completion_func = hv_pci_generic_compl;
3464 	pkt->compl_ctxt = &comp_pkt;
3465 	d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
3466 	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3467 	d0_entry->mmio_base = hbus->mem_config->start;
3468 
3469 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3470 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
3471 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3472 	if (!ret)
3473 		ret = wait_for_response(hdev, &comp_pkt.host_event);
3474 
3475 	if (ret)
3476 		goto exit;
3477 
3478 	/*
3479 	 * In certain case (Kdump) the pci device of interest was
3480 	 * not cleanly shut down and resource is still held on host
3481 	 * side, the host could return invalid device status.
3482 	 * We need to explicitly request host to release the resource
3483 	 * and try to enter D0 again.
3484 	 */
3485 	if (comp_pkt.completion_status < 0 && retry) {
3486 		retry = false;
3487 
3488 		dev_err(&hdev->device, "Retrying D0 Entry\n");
3489 
3490 		/*
3491 		 * Hv_pci_bus_exit() calls hv_send_resource_released()
3492 		 * to free up resources of its child devices.
3493 		 * In the kdump kernel we need to set the
3494 		 * wslot_res_allocated to 255 so it scans all child
3495 		 * devices to release resources allocated in the
3496 		 * normal kernel before panic happened.
3497 		 */
3498 		hbus->wslot_res_allocated = 255;
3499 
3500 		ret = hv_pci_bus_exit(hdev, true);
3501 
3502 		if (ret == 0) {
3503 			kfree(pkt);
3504 			goto enter_d0_retry;
3505 		}
3506 		dev_err(&hdev->device,
3507 			"Retrying D0 failed with ret %d\n", ret);
3508 	}
3509 
3510 	if (comp_pkt.completion_status < 0) {
3511 		dev_err(&hdev->device,
3512 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
3513 			comp_pkt.completion_status);
3514 		ret = -EPROTO;
3515 		goto exit;
3516 	}
3517 
3518 	ret = 0;
3519 
3520 exit:
3521 	kfree(pkt);
3522 	return ret;
3523 }
3524 
3525 /**
3526  * hv_pci_query_relations() - Ask host to send list of child
3527  * devices
3528  * @hdev:	VMBus's tracking struct for this root PCI bus
3529  *
3530  * Return: 0 on success, -errno on failure
3531  */
hv_pci_query_relations(struct hv_device * hdev)3532 static int hv_pci_query_relations(struct hv_device *hdev)
3533 {
3534 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3535 	struct pci_message message;
3536 	struct completion comp;
3537 	int ret;
3538 
3539 	/* Ask the host to send along the list of child devices */
3540 	init_completion(&comp);
3541 	if (cmpxchg(&hbus->survey_event, NULL, &comp))
3542 		return -ENOTEMPTY;
3543 
3544 	memset(&message, 0, sizeof(message));
3545 	message.type = PCI_QUERY_BUS_RELATIONS;
3546 
3547 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3548 			       0, VM_PKT_DATA_INBAND, 0);
3549 	if (!ret)
3550 		ret = wait_for_response(hdev, &comp);
3551 
3552 	/*
3553 	 * In the case of fast device addition/removal, it's possible that
3554 	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3555 	 * already got a PCI_BUS_RELATIONS* message from the host and the
3556 	 * channel callback already scheduled a work to hbus->wq, which can be
3557 	 * running pci_devices_present_work() -> survey_child_resources() ->
3558 	 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3559 	 * exits and the stack variable 'comp' is no longer valid; as a result,
3560 	 * a hang or a page fault may happen when the complete() calls
3561 	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3562 	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3563 	 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3564 	 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3565 	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3566 	 * channel->rescind = true.
3567 	 */
3568 	flush_workqueue(hbus->wq);
3569 
3570 	return ret;
3571 }
3572 
3573 /**
3574  * hv_send_resources_allocated() - Report local resource choices
3575  * @hdev:	VMBus's tracking struct for this root PCI bus
3576  *
3577  * The host OS is expecting to be sent a request as a message
3578  * which contains all the resources that the device will use.
3579  * The response contains those same resources, "translated"
3580  * which is to say, the values which should be used by the
3581  * hardware, when it delivers an interrupt.  (MMIO resources are
3582  * used in local terms.)  This is nice for Windows, and lines up
3583  * with the FDO/PDO split, which doesn't exist in Linux.  Linux
3584  * is deeply expecting to scan an emulated PCI configuration
3585  * space.  So this message is sent here only to drive the state
3586  * machine on the host forward.
3587  *
3588  * Return: 0 on success, -errno on failure
3589  */
hv_send_resources_allocated(struct hv_device * hdev)3590 static int hv_send_resources_allocated(struct hv_device *hdev)
3591 {
3592 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3593 	struct pci_resources_assigned *res_assigned;
3594 	struct pci_resources_assigned2 *res_assigned2;
3595 	struct hv_pci_compl comp_pkt;
3596 	struct hv_pci_dev *hpdev;
3597 	struct pci_packet *pkt;
3598 	size_t size_res;
3599 	int wslot;
3600 	int ret;
3601 
3602 	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3603 			? sizeof(*res_assigned) : sizeof(*res_assigned2);
3604 
3605 	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3606 	if (!pkt)
3607 		return -ENOMEM;
3608 
3609 	ret = 0;
3610 
3611 	for (wslot = 0; wslot < 256; wslot++) {
3612 		hpdev = get_pcichild_wslot(hbus, wslot);
3613 		if (!hpdev)
3614 			continue;
3615 
3616 		memset(pkt, 0, sizeof(*pkt) + size_res);
3617 		init_completion(&comp_pkt.host_event);
3618 		pkt->completion_func = hv_pci_generic_compl;
3619 		pkt->compl_ctxt = &comp_pkt;
3620 
3621 		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3622 			res_assigned =
3623 				(struct pci_resources_assigned *)(pkt + 1);
3624 			res_assigned->message_type.type =
3625 				PCI_RESOURCES_ASSIGNED;
3626 			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3627 		} else {
3628 			res_assigned2 =
3629 				(struct pci_resources_assigned2 *)(pkt + 1);
3630 			res_assigned2->message_type.type =
3631 				PCI_RESOURCES_ASSIGNED2;
3632 			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3633 		}
3634 		put_pcichild(hpdev);
3635 
3636 		ret = vmbus_sendpacket(hdev->channel, pkt + 1,
3637 				size_res, (unsigned long)pkt,
3638 				VM_PKT_DATA_INBAND,
3639 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3640 		if (!ret)
3641 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3642 		if (ret)
3643 			break;
3644 
3645 		if (comp_pkt.completion_status < 0) {
3646 			ret = -EPROTO;
3647 			dev_err(&hdev->device,
3648 				"resource allocated returned 0x%x",
3649 				comp_pkt.completion_status);
3650 			break;
3651 		}
3652 
3653 		hbus->wslot_res_allocated = wslot;
3654 	}
3655 
3656 	kfree(pkt);
3657 	return ret;
3658 }
3659 
3660 /**
3661  * hv_send_resources_released() - Report local resources
3662  * released
3663  * @hdev:	VMBus's tracking struct for this root PCI bus
3664  *
3665  * Return: 0 on success, -errno on failure
3666  */
hv_send_resources_released(struct hv_device * hdev)3667 static int hv_send_resources_released(struct hv_device *hdev)
3668 {
3669 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3670 	struct pci_child_message pkt;
3671 	struct hv_pci_dev *hpdev;
3672 	int wslot;
3673 	int ret;
3674 
3675 	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3676 		hpdev = get_pcichild_wslot(hbus, wslot);
3677 		if (!hpdev)
3678 			continue;
3679 
3680 		memset(&pkt, 0, sizeof(pkt));
3681 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
3682 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
3683 
3684 		put_pcichild(hpdev);
3685 
3686 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3687 				       VM_PKT_DATA_INBAND, 0);
3688 		if (ret)
3689 			return ret;
3690 
3691 		hbus->wslot_res_allocated = wslot - 1;
3692 	}
3693 
3694 	hbus->wslot_res_allocated = -1;
3695 
3696 	return 0;
3697 }
3698 
3699 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3700 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3701 
3702 /*
3703  * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3704  * as invalid for passthrough PCI devices of this driver.
3705  */
3706 #define HVPCI_DOM_INVALID 0
3707 
3708 /**
3709  * hv_get_dom_num() - Get a valid PCI domain number
3710  * Check if the PCI domain number is in use, and return another number if
3711  * it is in use.
3712  *
3713  * @dom: Requested domain number
3714  *
3715  * return: domain number on success, HVPCI_DOM_INVALID on failure
3716  */
hv_get_dom_num(u16 dom)3717 static u16 hv_get_dom_num(u16 dom)
3718 {
3719 	unsigned int i;
3720 
3721 	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3722 		return dom;
3723 
3724 	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3725 		if (test_and_set_bit(i, hvpci_dom_map) == 0)
3726 			return i;
3727 	}
3728 
3729 	return HVPCI_DOM_INVALID;
3730 }
3731 
3732 /**
3733  * hv_put_dom_num() - Mark the PCI domain number as free
3734  * @dom: Domain number to be freed
3735  */
hv_put_dom_num(u16 dom)3736 static void hv_put_dom_num(u16 dom)
3737 {
3738 	clear_bit(dom, hvpci_dom_map);
3739 }
3740 
3741 /**
3742  * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3743  * @hdev:	VMBus's tracking struct for this root PCI bus
3744  * @dev_id:	Identifies the device itself
3745  *
3746  * Return: 0 on success, -errno on failure
3747  */
hv_pci_probe(struct hv_device * hdev,const struct hv_vmbus_device_id * dev_id)3748 static int hv_pci_probe(struct hv_device *hdev,
3749 			const struct hv_vmbus_device_id *dev_id)
3750 {
3751 	struct pci_host_bridge *bridge;
3752 	struct hv_pcibus_device *hbus;
3753 	u16 dom_req, dom;
3754 	char *name;
3755 	int ret;
3756 
3757 	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3758 	if (!bridge)
3759 		return -ENOMEM;
3760 
3761 	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
3762 	if (!hbus)
3763 		return -ENOMEM;
3764 
3765 	hbus->bridge = bridge;
3766 	mutex_init(&hbus->state_lock);
3767 	hbus->state = hv_pcibus_init;
3768 	hbus->wslot_res_allocated = -1;
3769 
3770 	/*
3771 	 * The PCI bus "domain" is what is called "segment" in ACPI and other
3772 	 * specs. Pull it from the instance ID, to get something usually
3773 	 * unique. In rare cases of collision, we will find out another number
3774 	 * not in use.
3775 	 *
3776 	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3777 	 * together with this guest driver can guarantee that (1) The only
3778 	 * domain used by Gen1 VMs for something that looks like a physical
3779 	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3780 	 * (2) There will be no overlap between domains (after fixing possible
3781 	 * collisions) in the same VM.
3782 	 */
3783 	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3784 	dom = hv_get_dom_num(dom_req);
3785 
3786 	if (dom == HVPCI_DOM_INVALID) {
3787 		dev_err(&hdev->device,
3788 			"Unable to use dom# 0x%x or other numbers", dom_req);
3789 		ret = -EINVAL;
3790 		goto free_bus;
3791 	}
3792 
3793 	if (dom != dom_req)
3794 		dev_info(&hdev->device,
3795 			 "PCI dom# 0x%x has collision, using 0x%x",
3796 			 dom_req, dom);
3797 
3798 	hbus->bridge->domain_nr = dom;
3799 #ifdef CONFIG_X86
3800 	hbus->sysdata.domain = dom;
3801 	hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3802 #elif defined(CONFIG_ARM64)
3803 	/*
3804 	 * Set the PCI bus parent to be the corresponding VMbus
3805 	 * device. Then the VMbus device will be assigned as the
3806 	 * ACPI companion in pcibios_root_bridge_prepare() and
3807 	 * pci_dma_configure() will propagate device coherence
3808 	 * information to devices created on the bus.
3809 	 */
3810 	hbus->sysdata.parent = hdev->device.parent;
3811 	hbus->use_calls = false;
3812 #endif
3813 
3814 	hbus->hdev = hdev;
3815 	INIT_LIST_HEAD(&hbus->children);
3816 	INIT_LIST_HEAD(&hbus->dr_list);
3817 	spin_lock_init(&hbus->config_lock);
3818 	spin_lock_init(&hbus->device_list_lock);
3819 	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3820 					   hbus->bridge->domain_nr);
3821 	if (!hbus->wq) {
3822 		ret = -ENOMEM;
3823 		goto free_dom;
3824 	}
3825 
3826 	hdev->channel->next_request_id_callback = vmbus_next_request_id;
3827 	hdev->channel->request_addr_callback = vmbus_request_addr;
3828 	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3829 
3830 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3831 			 hv_pci_onchannelcallback, hbus);
3832 	if (ret)
3833 		goto destroy_wq;
3834 
3835 	hv_set_drvdata(hdev, hbus);
3836 
3837 	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3838 					  ARRAY_SIZE(pci_protocol_versions));
3839 	if (ret)
3840 		goto close;
3841 
3842 	ret = hv_allocate_config_window(hbus);
3843 	if (ret)
3844 		goto close;
3845 
3846 	hbus->cfg_addr = ioremap(hbus->mem_config->start,
3847 				 PCI_CONFIG_MMIO_LENGTH);
3848 	if (!hbus->cfg_addr) {
3849 		dev_err(&hdev->device,
3850 			"Unable to map a virtual address for config space\n");
3851 		ret = -ENOMEM;
3852 		goto free_config;
3853 	}
3854 
3855 	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3856 	if (!name) {
3857 		ret = -ENOMEM;
3858 		goto unmap;
3859 	}
3860 
3861 	hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3862 	kfree(name);
3863 	if (!hbus->fwnode) {
3864 		ret = -ENOMEM;
3865 		goto unmap;
3866 	}
3867 
3868 	ret = hv_pcie_init_irq_domain(hbus);
3869 	if (ret)
3870 		goto free_fwnode;
3871 
3872 	ret = hv_pci_query_relations(hdev);
3873 	if (ret)
3874 		goto free_irq_domain;
3875 
3876 	mutex_lock(&hbus->state_lock);
3877 
3878 	ret = hv_pci_enter_d0(hdev);
3879 	if (ret)
3880 		goto release_state_lock;
3881 
3882 	ret = hv_pci_allocate_bridge_windows(hbus);
3883 	if (ret)
3884 		goto exit_d0;
3885 
3886 	ret = hv_send_resources_allocated(hdev);
3887 	if (ret)
3888 		goto free_windows;
3889 
3890 	prepopulate_bars(hbus);
3891 
3892 	hbus->state = hv_pcibus_probed;
3893 
3894 	ret = create_root_hv_pci_bus(hbus);
3895 	if (ret)
3896 		goto free_windows;
3897 
3898 	mutex_unlock(&hbus->state_lock);
3899 	return 0;
3900 
3901 free_windows:
3902 	hv_pci_free_bridge_windows(hbus);
3903 exit_d0:
3904 	(void) hv_pci_bus_exit(hdev, true);
3905 release_state_lock:
3906 	mutex_unlock(&hbus->state_lock);
3907 free_irq_domain:
3908 	irq_domain_remove(hbus->irq_domain);
3909 free_fwnode:
3910 	irq_domain_free_fwnode(hbus->fwnode);
3911 unmap:
3912 	iounmap(hbus->cfg_addr);
3913 free_config:
3914 	hv_free_config_window(hbus);
3915 close:
3916 	vmbus_close(hdev->channel);
3917 destroy_wq:
3918 	destroy_workqueue(hbus->wq);
3919 free_dom:
3920 	hv_put_dom_num(hbus->bridge->domain_nr);
3921 free_bus:
3922 	kfree(hbus);
3923 	return ret;
3924 }
3925 
hv_pci_bus_exit(struct hv_device * hdev,bool keep_devs)3926 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3927 {
3928 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3929 	struct vmbus_channel *chan = hdev->channel;
3930 	struct {
3931 		struct pci_packet teardown_packet;
3932 		u8 buffer[sizeof(struct pci_message)];
3933 	} pkt;
3934 	struct pci_message *msg;
3935 	struct hv_pci_compl comp_pkt;
3936 	struct hv_pci_dev *hpdev, *tmp;
3937 	unsigned long flags;
3938 	u64 trans_id;
3939 	int ret;
3940 
3941 	/*
3942 	 * After the host sends the RESCIND_CHANNEL message, it doesn't
3943 	 * access the per-channel ringbuffer any longer.
3944 	 */
3945 	if (chan->rescind)
3946 		return 0;
3947 
3948 	if (!keep_devs) {
3949 		struct list_head removed;
3950 
3951 		/* Move all present children to the list on stack */
3952 		INIT_LIST_HEAD(&removed);
3953 		spin_lock_irqsave(&hbus->device_list_lock, flags);
3954 		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3955 			list_move_tail(&hpdev->list_entry, &removed);
3956 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3957 
3958 		/* Remove all children in the list */
3959 		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3960 			list_del(&hpdev->list_entry);
3961 			if (hpdev->pci_slot)
3962 				pci_destroy_slot(hpdev->pci_slot);
3963 			/* For the two refs got in new_pcichild_device() */
3964 			put_pcichild(hpdev);
3965 			put_pcichild(hpdev);
3966 		}
3967 	}
3968 
3969 	ret = hv_send_resources_released(hdev);
3970 	if (ret) {
3971 		dev_err(&hdev->device,
3972 			"Couldn't send resources released packet(s)\n");
3973 		return ret;
3974 	}
3975 
3976 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3977 	init_completion(&comp_pkt.host_event);
3978 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3979 	pkt.teardown_packet.compl_ctxt = &comp_pkt;
3980 	msg = (struct pci_message *)pkt.buffer;
3981 	msg->type = PCI_BUS_D0EXIT;
3982 
3983 	ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
3984 				     (unsigned long)&pkt.teardown_packet,
3985 				     &trans_id, VM_PKT_DATA_INBAND,
3986 				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3987 	if (ret)
3988 		return ret;
3989 
3990 	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3991 		/*
3992 		 * The completion packet on the stack becomes invalid after
3993 		 * 'return'; remove the ID from the VMbus requestor if the
3994 		 * identifier is still mapped to/associated with the packet.
3995 		 *
3996 		 * Cf. hv_pci_onchannelcallback().
3997 		 */
3998 		vmbus_request_addr_match(chan, trans_id,
3999 					 (unsigned long)&pkt.teardown_packet);
4000 		return -ETIMEDOUT;
4001 	}
4002 
4003 	return 0;
4004 }
4005 
4006 /**
4007  * hv_pci_remove() - Remove routine for this VMBus channel
4008  * @hdev:	VMBus's tracking struct for this root PCI bus
4009  */
hv_pci_remove(struct hv_device * hdev)4010 static void hv_pci_remove(struct hv_device *hdev)
4011 {
4012 	struct hv_pcibus_device *hbus;
4013 
4014 	hbus = hv_get_drvdata(hdev);
4015 	if (hbus->state == hv_pcibus_installed) {
4016 		tasklet_disable(&hdev->channel->callback_event);
4017 		hbus->state = hv_pcibus_removing;
4018 		tasklet_enable(&hdev->channel->callback_event);
4019 		destroy_workqueue(hbus->wq);
4020 		hbus->wq = NULL;
4021 		/*
4022 		 * At this point, no work is running or can be scheduled
4023 		 * on hbus-wq. We can't race with hv_pci_devices_present()
4024 		 * or hv_pci_eject_device(), it's safe to proceed.
4025 		 */
4026 
4027 		/* Remove the bus from PCI's point of view. */
4028 		pci_lock_rescan_remove();
4029 		pci_stop_root_bus(hbus->bridge->bus);
4030 		hv_pci_remove_slots(hbus);
4031 		pci_remove_root_bus(hbus->bridge->bus);
4032 		pci_unlock_rescan_remove();
4033 	}
4034 
4035 	hv_pci_bus_exit(hdev, false);
4036 
4037 	vmbus_close(hdev->channel);
4038 
4039 	iounmap(hbus->cfg_addr);
4040 	hv_free_config_window(hbus);
4041 	hv_pci_free_bridge_windows(hbus);
4042 	irq_domain_remove(hbus->irq_domain);
4043 	irq_domain_free_fwnode(hbus->fwnode);
4044 
4045 	hv_put_dom_num(hbus->bridge->domain_nr);
4046 
4047 	kfree(hbus);
4048 }
4049 
hv_pci_suspend(struct hv_device * hdev)4050 static int hv_pci_suspend(struct hv_device *hdev)
4051 {
4052 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4053 	enum hv_pcibus_state old_state;
4054 	int ret;
4055 
4056 	/*
4057 	 * hv_pci_suspend() must make sure there are no pending work items
4058 	 * before calling vmbus_close(), since it runs in a process context
4059 	 * as a callback in dpm_suspend().  When it starts to run, the channel
4060 	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
4061 	 * context, can be still running concurrently and scheduling new work
4062 	 * items onto hbus->wq in hv_pci_devices_present() and
4063 	 * hv_pci_eject_device(), and the work item handlers can access the
4064 	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
4065 	 * the work item handler pci_devices_present_work() ->
4066 	 * new_pcichild_device() writes to the vmbus channel.
4067 	 *
4068 	 * To eliminate the race, hv_pci_suspend() disables the channel
4069 	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
4070 	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
4071 	 * it knows that no new work item can be scheduled, and then it flushes
4072 	 * hbus->wq and safely closes the vmbus channel.
4073 	 */
4074 	tasklet_disable(&hdev->channel->callback_event);
4075 
4076 	/* Change the hbus state to prevent new work items. */
4077 	old_state = hbus->state;
4078 	if (hbus->state == hv_pcibus_installed)
4079 		hbus->state = hv_pcibus_removing;
4080 
4081 	tasklet_enable(&hdev->channel->callback_event);
4082 
4083 	if (old_state != hv_pcibus_installed)
4084 		return -EINVAL;
4085 
4086 	flush_workqueue(hbus->wq);
4087 
4088 	ret = hv_pci_bus_exit(hdev, true);
4089 	if (ret)
4090 		return ret;
4091 
4092 	vmbus_close(hdev->channel);
4093 
4094 	return 0;
4095 }
4096 
hv_pci_restore_msi_msg(struct pci_dev * pdev,void * arg)4097 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
4098 {
4099 	struct irq_data *irq_data;
4100 	struct msi_desc *entry;
4101 
4102 	if (!pdev->msi_enabled && !pdev->msix_enabled)
4103 		return 0;
4104 
4105 	guard(msi_descs_lock)(&pdev->dev);
4106 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
4107 		irq_data = irq_get_irq_data(entry->irq);
4108 		if (WARN_ON_ONCE(!irq_data))
4109 			return -EINVAL;
4110 		hv_compose_msi_msg(irq_data, &entry->msg);
4111 	}
4112 	return 0;
4113 }
4114 
4115 /*
4116  * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
4117  * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4118  * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4119  * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4120  * Table entries.
4121  */
hv_pci_restore_msi_state(struct hv_pcibus_device * hbus)4122 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4123 {
4124 	pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4125 }
4126 
hv_pci_resume(struct hv_device * hdev)4127 static int hv_pci_resume(struct hv_device *hdev)
4128 {
4129 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4130 	enum pci_protocol_version_t version[1];
4131 	int ret;
4132 
4133 	hbus->state = hv_pcibus_init;
4134 
4135 	hdev->channel->next_request_id_callback = vmbus_next_request_id;
4136 	hdev->channel->request_addr_callback = vmbus_request_addr;
4137 	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4138 
4139 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4140 			 hv_pci_onchannelcallback, hbus);
4141 	if (ret)
4142 		return ret;
4143 
4144 	/* Only use the version that was in use before hibernation. */
4145 	version[0] = hbus->protocol_version;
4146 	ret = hv_pci_protocol_negotiation(hdev, version, 1);
4147 	if (ret)
4148 		goto out;
4149 
4150 	ret = hv_pci_query_relations(hdev);
4151 	if (ret)
4152 		goto out;
4153 
4154 	mutex_lock(&hbus->state_lock);
4155 
4156 	ret = hv_pci_enter_d0(hdev);
4157 	if (ret)
4158 		goto release_state_lock;
4159 
4160 	ret = hv_send_resources_allocated(hdev);
4161 	if (ret)
4162 		goto release_state_lock;
4163 
4164 	prepopulate_bars(hbus);
4165 
4166 	hv_pci_restore_msi_state(hbus);
4167 
4168 	hbus->state = hv_pcibus_installed;
4169 	mutex_unlock(&hbus->state_lock);
4170 	return 0;
4171 
4172 release_state_lock:
4173 	mutex_unlock(&hbus->state_lock);
4174 out:
4175 	vmbus_close(hdev->channel);
4176 	return ret;
4177 }
4178 
4179 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4180 	/* PCI Pass-through Class ID */
4181 	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4182 	{ HV_PCIE_GUID, },
4183 	{ },
4184 };
4185 
4186 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4187 
4188 static struct hv_driver hv_pci_drv = {
4189 	.name		= "hv_pci",
4190 	.id_table	= hv_pci_id_table,
4191 	.probe		= hv_pci_probe,
4192 	.remove		= hv_pci_remove,
4193 	.suspend	= hv_pci_suspend,
4194 	.resume		= hv_pci_resume,
4195 };
4196 
exit_hv_pci_drv(void)4197 static void __exit exit_hv_pci_drv(void)
4198 {
4199 	vmbus_driver_unregister(&hv_pci_drv);
4200 
4201 	hvpci_block_ops.read_block = NULL;
4202 	hvpci_block_ops.write_block = NULL;
4203 	hvpci_block_ops.reg_blk_invalidate = NULL;
4204 }
4205 
init_hv_pci_drv(void)4206 static int __init init_hv_pci_drv(void)
4207 {
4208 	int ret;
4209 
4210 	if (!hv_is_hyperv_initialized())
4211 		return -ENODEV;
4212 
4213 	if (hv_root_partition() && !hv_nested)
4214 		return -ENODEV;
4215 
4216 	ret = hv_pci_irqchip_init();
4217 	if (ret)
4218 		return ret;
4219 
4220 	/* Set the invalid domain number's bit, so it will not be used */
4221 	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4222 
4223 	/* Initialize PCI block r/w interface */
4224 	hvpci_block_ops.read_block = hv_read_config_block;
4225 	hvpci_block_ops.write_block = hv_write_config_block;
4226 	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4227 
4228 	return vmbus_driver_register(&hv_pci_drv);
4229 }
4230 
4231 module_init(init_hv_pci_drv);
4232 module_exit(exit_hv_pci_drv);
4233 
4234 MODULE_DESCRIPTION("Hyper-V PCI");
4235 MODULE_LICENSE("GPL v2");
4236