xref: /linux/drivers/xen/pci.c (revision 570172569238c66a482ec3eb5d766cc9cf255f69)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Intel Corporation.
4  *
5  * Author: Weidong Han <weidong.han@intel.com>
6  */
7 
8 #include <linux/pci.h>
9 #include <linux/acpi.h>
10 #include <linux/pci-acpi.h>
11 #include <xen/pci.h>
12 #include <xen/xen.h>
13 #include <xen/interface/physdev.h>
14 #include <xen/interface/xen.h>
15 
16 #include <asm/xen/hypervisor.h>
17 #include <asm/xen/hypercall.h>
18 #include "../pci/pci.h"
19 #ifdef CONFIG_PCI_MMCONFIG
20 #include <asm/pci_x86.h>
21 
22 static int xen_mcfg_late(void);
23 #endif
24 
25 static bool __read_mostly pci_seg_supported = true;
26 
27 static int xen_add_device(struct device *dev)
28 {
29 	int r;
30 	struct pci_dev *pci_dev = to_pci_dev(dev);
31 #ifdef CONFIG_PCI_IOV
32 	struct pci_dev *physfn = pci_dev->physfn;
33 #endif
34 #ifdef CONFIG_PCI_MMCONFIG
35 	static bool pci_mcfg_reserved = false;
36 	/*
37 	 * Reserve MCFG areas in Xen on first invocation due to this being
38 	 * potentially called from inside of acpi_init immediately after
39 	 * MCFG table has been finally parsed.
40 	 */
41 	if (!pci_mcfg_reserved) {
42 		xen_mcfg_late();
43 		pci_mcfg_reserved = true;
44 	}
45 #endif
46 	if (pci_seg_supported) {
47 		DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
48 
49 		add->seg = pci_domain_nr(pci_dev->bus);
50 		add->bus = pci_dev->bus->number;
51 		add->devfn = pci_dev->devfn;
52 
53 #ifdef CONFIG_ACPI
54 		acpi_handle handle;
55 #endif
56 
57 #ifdef CONFIG_PCI_IOV
58 		if (pci_dev->is_virtfn) {
59 			add->flags = XEN_PCI_DEV_VIRTFN;
60 			add->physfn.bus = physfn->bus->number;
61 			add->physfn.devfn = physfn->devfn;
62 		} else
63 #endif
64 		if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
65 			add->flags = XEN_PCI_DEV_EXTFN;
66 
67 #ifdef CONFIG_ACPI
68 		handle = ACPI_HANDLE(&pci_dev->dev);
69 #ifdef CONFIG_PCI_IOV
70 		if (!handle && pci_dev->is_virtfn)
71 			handle = ACPI_HANDLE(physfn->bus->bridge);
72 #endif
73 		if (!handle) {
74 			/*
75 			 * This device was not listed in the ACPI name space at
76 			 * all. Try to get acpi handle of parent pci bus.
77 			 */
78 			struct pci_bus *pbus;
79 			for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
80 				handle = acpi_pci_get_bridge_handle(pbus);
81 				if (handle)
82 					break;
83 			}
84 		}
85 		if (handle) {
86 			acpi_status status;
87 
88 			do {
89 				unsigned long long pxm;
90 
91 				status = acpi_evaluate_integer(handle, "_PXM",
92 							       NULL, &pxm);
93 				if (ACPI_SUCCESS(status)) {
94 					add->optarr[0] = pxm;
95 					add->flags |= XEN_PCI_DEV_PXM;
96 					break;
97 				}
98 				status = acpi_get_parent(handle, &handle);
99 			} while (ACPI_SUCCESS(status));
100 		}
101 #endif /* CONFIG_ACPI */
102 
103 		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
104 		if (r != -ENOSYS)
105 			return r;
106 		pci_seg_supported = false;
107 	}
108 
109 	if (pci_domain_nr(pci_dev->bus))
110 		r = -ENOSYS;
111 #ifdef CONFIG_PCI_IOV
112 	else if (pci_dev->is_virtfn) {
113 		struct physdev_manage_pci_ext manage_pci_ext = {
114 			.bus		= pci_dev->bus->number,
115 			.devfn		= pci_dev->devfn,
116 			.is_virtfn 	= 1,
117 			.physfn.bus	= physfn->bus->number,
118 			.physfn.devfn	= physfn->devfn,
119 		};
120 
121 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
122 			&manage_pci_ext);
123 	}
124 #endif
125 	else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
126 		struct physdev_manage_pci_ext manage_pci_ext = {
127 			.bus		= pci_dev->bus->number,
128 			.devfn		= pci_dev->devfn,
129 			.is_extfn	= 1,
130 		};
131 
132 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
133 			&manage_pci_ext);
134 	} else {
135 		struct physdev_manage_pci manage_pci = {
136 			.bus	= pci_dev->bus->number,
137 			.devfn	= pci_dev->devfn,
138 		};
139 
140 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
141 			&manage_pci);
142 	}
143 
144 	return r;
145 }
146 
147 static int xen_remove_device(struct device *dev)
148 {
149 	int r;
150 	struct pci_dev *pci_dev = to_pci_dev(dev);
151 
152 	if (pci_seg_supported) {
153 		struct physdev_pci_device device = {
154 			.seg = pci_domain_nr(pci_dev->bus),
155 			.bus = pci_dev->bus->number,
156 			.devfn = pci_dev->devfn
157 		};
158 
159 		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
160 					  &device);
161 	} else if (pci_domain_nr(pci_dev->bus))
162 		r = -ENOSYS;
163 	else {
164 		struct physdev_manage_pci manage_pci = {
165 			.bus = pci_dev->bus->number,
166 			.devfn = pci_dev->devfn
167 		};
168 
169 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
170 					  &manage_pci);
171 	}
172 
173 	return r;
174 }
175 
176 static int xen_pci_notifier(struct notifier_block *nb,
177 			    unsigned long action, void *data)
178 {
179 	struct device *dev = data;
180 	int r = 0;
181 
182 	switch (action) {
183 	case BUS_NOTIFY_ADD_DEVICE:
184 		r = xen_add_device(dev);
185 		break;
186 	case BUS_NOTIFY_DEL_DEVICE:
187 		r = xen_remove_device(dev);
188 		break;
189 	default:
190 		return NOTIFY_DONE;
191 	}
192 	if (r)
193 		dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
194 			action == BUS_NOTIFY_ADD_DEVICE ? "add" :
195 			(action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
196 	return NOTIFY_OK;
197 }
198 
199 static struct notifier_block device_nb = {
200 	.notifier_call = xen_pci_notifier,
201 };
202 
203 static int __init register_xen_pci_notifier(void)
204 {
205 	if (!xen_initial_domain())
206 		return 0;
207 
208 	return bus_register_notifier(&pci_bus_type, &device_nb);
209 }
210 
211 arch_initcall(register_xen_pci_notifier);
212 
213 #ifdef CONFIG_PCI_MMCONFIG
214 static int xen_mcfg_late(void)
215 {
216 	struct pci_mmcfg_region *cfg;
217 	int rc;
218 
219 	if (!xen_initial_domain())
220 		return 0;
221 
222 	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
223 		return 0;
224 
225 	if (list_empty(&pci_mmcfg_list))
226 		return 0;
227 
228 	/* Check whether they are in the right area. */
229 	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
230 		struct physdev_pci_mmcfg_reserved r;
231 
232 		r.address = cfg->address;
233 		r.segment = cfg->segment;
234 		r.start_bus = cfg->start_bus;
235 		r.end_bus = cfg->end_bus;
236 		r.flags = XEN_PCI_MMCFG_RESERVED;
237 
238 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
239 		switch (rc) {
240 		case 0:
241 		case -ENOSYS:
242 			continue;
243 
244 		default:
245 			pr_warn("Failed to report MMCONFIG reservation"
246 				" state for %s to hypervisor"
247 				" (%d)\n",
248 				cfg->name, rc);
249 		}
250 	}
251 	return 0;
252 }
253 #endif
254 
255 #ifdef CONFIG_XEN_DOM0
256 struct xen_device_domain_owner {
257 	domid_t domain;
258 	struct pci_dev *dev;
259 	struct list_head list;
260 };
261 
262 static DEFINE_SPINLOCK(dev_domain_list_spinlock);
263 static LIST_HEAD(dev_domain_list);
264 
265 static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
266 {
267 	struct xen_device_domain_owner *owner;
268 
269 	list_for_each_entry(owner, &dev_domain_list, list) {
270 		if (owner->dev == dev)
271 			return owner;
272 	}
273 	return NULL;
274 }
275 
276 int xen_find_device_domain_owner(struct pci_dev *dev)
277 {
278 	struct xen_device_domain_owner *owner;
279 	int domain = -ENODEV;
280 
281 	spin_lock(&dev_domain_list_spinlock);
282 	owner = find_device(dev);
283 	if (owner)
284 		domain = owner->domain;
285 	spin_unlock(&dev_domain_list_spinlock);
286 	return domain;
287 }
288 EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
289 
290 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
291 {
292 	struct xen_device_domain_owner *owner;
293 
294 	owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
295 	if (!owner)
296 		return -ENODEV;
297 
298 	spin_lock(&dev_domain_list_spinlock);
299 	if (find_device(dev)) {
300 		spin_unlock(&dev_domain_list_spinlock);
301 		kfree(owner);
302 		return -EEXIST;
303 	}
304 	owner->domain = domain;
305 	owner->dev = dev;
306 	list_add_tail(&owner->list, &dev_domain_list);
307 	spin_unlock(&dev_domain_list_spinlock);
308 	return 0;
309 }
310 EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
311 
312 int xen_unregister_device_domain_owner(struct pci_dev *dev)
313 {
314 	struct xen_device_domain_owner *owner;
315 
316 	spin_lock(&dev_domain_list_spinlock);
317 	owner = find_device(dev);
318 	if (!owner) {
319 		spin_unlock(&dev_domain_list_spinlock);
320 		return -ENODEV;
321 	}
322 	list_del(&owner->list);
323 	spin_unlock(&dev_domain_list_spinlock);
324 	kfree(owner);
325 	return 0;
326 }
327 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
328 #endif
329