xref: /linux/drivers/vfio/pci/vfio_pci_core.c (revision 3bf83e47b497d2630d2dcb408ec14ad95050cead)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
4  *     Author: Alex Williamson <alex.williamson@redhat.com>
5  *
6  * Derived from original vfio:
7  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
8  * Author: Tom Lyon, pugs@cisco.com
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/aperture.h>
14 #include <linux/device.h>
15 #include <linux/eventfd.h>
16 #include <linux/file.h>
17 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/uaccess.h>
27 #include <linux/vgaarb.h>
28 #include <linux/nospec.h>
29 #include <linux/sched/mm.h>
30 #include <linux/iommufd.h>
31 #include <linux/pci-p2pdma.h>
32 #if IS_ENABLED(CONFIG_EEH)
33 #include <asm/eeh.h>
34 #endif
35 
36 #include "vfio_pci_priv.h"
37 
38 #define DRIVER_AUTHOR   "Alex Williamson <alex.williamson@redhat.com>"
39 #define DRIVER_DESC "core driver for VFIO based PCI devices"
40 
41 static bool nointxmask;
42 static bool disable_vga;
43 static bool disable_idle_d3;
44 
vfio_pci_eventfd_rcu_free(struct rcu_head * rcu)45 static void vfio_pci_eventfd_rcu_free(struct rcu_head *rcu)
46 {
47 	struct vfio_pci_eventfd *eventfd =
48 		container_of(rcu, struct vfio_pci_eventfd, rcu);
49 
50 	eventfd_ctx_put(eventfd->ctx);
51 	kfree(eventfd);
52 }
53 
vfio_pci_eventfd_replace_locked(struct vfio_pci_core_device * vdev,struct vfio_pci_eventfd __rcu ** peventfd,struct eventfd_ctx * ctx)54 int vfio_pci_eventfd_replace_locked(struct vfio_pci_core_device *vdev,
55 				    struct vfio_pci_eventfd __rcu **peventfd,
56 				    struct eventfd_ctx *ctx)
57 {
58 	struct vfio_pci_eventfd *new = NULL;
59 	struct vfio_pci_eventfd *old;
60 
61 	lockdep_assert_held(&vdev->igate);
62 
63 	if (ctx) {
64 		new = kzalloc_obj(*new, GFP_KERNEL_ACCOUNT);
65 		if (!new)
66 			return -ENOMEM;
67 
68 		new->ctx = ctx;
69 	}
70 
71 	old = rcu_replace_pointer(*peventfd, new,
72 				  lockdep_is_held(&vdev->igate));
73 	if (old)
74 		call_rcu(&old->rcu, vfio_pci_eventfd_rcu_free);
75 
76 	return 0;
77 }
78 
79 /* List of PF's that vfio_pci_core_sriov_configure() has been called on */
80 static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
81 static LIST_HEAD(vfio_pci_sriov_pfs);
82 
83 struct vfio_pci_dummy_resource {
84 	struct resource		resource;
85 	int			index;
86 	struct list_head	res_next;
87 };
88 
89 struct vfio_pci_vf_token {
90 	struct mutex		lock;
91 	uuid_t			uuid;
92 	int			users;
93 };
94 
vfio_vga_disabled(void)95 static inline bool vfio_vga_disabled(void)
96 {
97 #ifdef CONFIG_VFIO_PCI_VGA
98 	return disable_vga;
99 #else
100 	return true;
101 #endif
102 }
103 
104 /*
105  * Our VGA arbiter participation is limited since we don't know anything
106  * about the device itself.  However, if the device is the only VGA device
107  * downstream of a bridge and VFIO VGA support is disabled, then we can
108  * safely return legacy VGA IO and memory as not decoded since the user
109  * has no way to get to it and routing can be disabled externally at the
110  * bridge.
111  */
vfio_pci_set_decode(struct pci_dev * pdev,bool single_vga)112 static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
113 {
114 	struct pci_dev *tmp = NULL;
115 	unsigned char max_busnr;
116 	unsigned int decodes;
117 
118 	if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
119 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
120 		       VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
121 
122 	max_busnr = pci_bus_max_busnr(pdev->bus);
123 	decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
124 
125 	while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
126 		if (tmp == pdev ||
127 		    pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
128 		    pci_is_root_bus(tmp->bus))
129 			continue;
130 
131 		if (tmp->bus->number >= pdev->bus->number &&
132 		    tmp->bus->number <= max_busnr) {
133 			pci_dev_put(tmp);
134 			decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
135 			break;
136 		}
137 	}
138 
139 	return decodes;
140 }
141 
vfio_pci_probe_mmaps(struct vfio_pci_core_device * vdev)142 static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
143 {
144 	struct resource *res;
145 	int i;
146 	struct vfio_pci_dummy_resource *dummy_res;
147 
148 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
149 		int bar = i + PCI_STD_RESOURCES;
150 
151 		res = &vdev->pdev->resource[bar];
152 
153 		if (vdev->pdev->non_mappable_bars)
154 			goto no_mmap;
155 
156 		if (!(res->flags & IORESOURCE_MEM))
157 			goto no_mmap;
158 
159 		/*
160 		 * The PCI core shouldn't set up a resource with a
161 		 * type but zero size. But there may be bugs that
162 		 * cause us to do that.
163 		 */
164 		if (!resource_size(res))
165 			goto no_mmap;
166 
167 		if (resource_size(res) >= PAGE_SIZE) {
168 			vdev->bar_mmap_supported[bar] = true;
169 			continue;
170 		}
171 
172 		if (!(res->start & ~PAGE_MASK)) {
173 			/*
174 			 * Add a dummy resource to reserve the remainder
175 			 * of the exclusive page in case that hot-add
176 			 * device's bar is assigned into it.
177 			 */
178 			dummy_res = kzalloc_obj(*dummy_res, GFP_KERNEL_ACCOUNT);
179 			if (dummy_res == NULL)
180 				goto no_mmap;
181 
182 			dummy_res->resource.name = "vfio sub-page reserved";
183 			dummy_res->resource.start = res->end + 1;
184 			dummy_res->resource.end = res->start + PAGE_SIZE - 1;
185 			dummy_res->resource.flags = res->flags;
186 			if (request_resource(res->parent,
187 						&dummy_res->resource)) {
188 				kfree(dummy_res);
189 				goto no_mmap;
190 			}
191 			dummy_res->index = bar;
192 			list_add(&dummy_res->res_next,
193 					&vdev->dummy_resources_list);
194 			vdev->bar_mmap_supported[bar] = true;
195 			continue;
196 		}
197 		/*
198 		 * Here we don't handle the case when the BAR is not page
199 		 * aligned because we can't expect the BAR will be
200 		 * assigned into the same location in a page in guest
201 		 * when we passthrough the BAR. And it's hard to access
202 		 * this BAR in userspace because we have no way to get
203 		 * the BAR's location in a page.
204 		 */
205 no_mmap:
206 		vdev->bar_mmap_supported[bar] = false;
207 	}
208 }
209 
210 struct vfio_pci_group_info;
211 static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
212 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
213 				      struct vfio_pci_group_info *groups,
214 				      struct iommufd_ctx *iommufd_ctx);
215 
216 /*
217  * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
218  * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
219  * If a device implements the former but not the latter we would typically
220  * expect broken_intx_masking be set and require an exclusive interrupt.
221  * However since we do have control of the device's ability to assert INTx,
222  * we can instead pretend that the device does not implement INTx, virtualizing
223  * the pin register to report zero and maintaining DisINTx set on the host.
224  */
vfio_pci_nointx(struct pci_dev * pdev)225 static bool vfio_pci_nointx(struct pci_dev *pdev)
226 {
227 	switch (pdev->vendor) {
228 	case PCI_VENDOR_ID_INTEL:
229 		switch (pdev->device) {
230 		/* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
231 		case 0x1572:
232 		case 0x1574:
233 		case 0x1580 ... 0x1581:
234 		case 0x1583 ... 0x158b:
235 		case 0x37d0 ... 0x37d2:
236 		/* X550 */
237 		case 0x1563:
238 			return true;
239 		default:
240 			return false;
241 		}
242 	}
243 
244 	return false;
245 }
246 
vfio_pci_probe_power_state(struct vfio_pci_core_device * vdev)247 static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
248 {
249 	struct pci_dev *pdev = vdev->pdev;
250 	u16 pmcsr;
251 
252 	if (!pdev->pm_cap)
253 		return;
254 
255 	pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
256 
257 	vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
258 }
259 
260 /*
261  * pci_set_power_state() wrapper handling devices which perform a soft reset on
262  * D3->D0 transition.  Save state prior to D0/1/2->D3, stash it on the vdev,
263  * restore when returned to D0.  Saved separately from pci_saved_state for use
264  * by PM capability emulation and separately from pci_dev internal saved state
265  * to avoid it being overwritten and consumed around other resets.
266  */
vfio_pci_set_power_state(struct vfio_pci_core_device * vdev,pci_power_t state)267 int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
268 {
269 	struct pci_dev *pdev = vdev->pdev;
270 	bool needs_restore = false, needs_save = false;
271 	int ret;
272 
273 	/* Prevent changing power state for PFs with VFs enabled */
274 	if (pci_num_vf(pdev) && state > PCI_D0)
275 		return -EBUSY;
276 
277 	if (vdev->needs_pm_restore) {
278 		if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
279 			pci_save_state(pdev);
280 			needs_save = true;
281 		}
282 
283 		if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
284 			needs_restore = true;
285 	}
286 
287 	ret = pci_set_power_state(pdev, state);
288 
289 	if (!ret) {
290 		/* D3 might be unsupported via quirk, skip unless in D3 */
291 		if (needs_save && pdev->current_state >= PCI_D3hot) {
292 			/*
293 			 * The current PCI state will be saved locally in
294 			 * 'pm_save' during the D3hot transition. When the
295 			 * device state is changed to D0 again with the current
296 			 * function, then pci_store_saved_state() will restore
297 			 * the state and will free the memory pointed by
298 			 * 'pm_save'. There are few cases where the PCI power
299 			 * state can be changed to D0 without the involvement
300 			 * of the driver. For these cases, free the earlier
301 			 * allocated memory first before overwriting 'pm_save'
302 			 * to prevent the memory leak.
303 			 */
304 			kfree(vdev->pm_save);
305 			vdev->pm_save = pci_store_saved_state(pdev);
306 		} else if (needs_restore) {
307 			pci_load_and_free_saved_state(pdev, &vdev->pm_save);
308 			pci_restore_state(pdev);
309 		}
310 	}
311 
312 	return ret;
313 }
314 
vfio_pci_runtime_pm_entry(struct vfio_pci_core_device * vdev,struct eventfd_ctx * efdctx)315 static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
316 				     struct eventfd_ctx *efdctx)
317 {
318 	/*
319 	 * The vdev power related flags are protected with 'memory_lock'
320 	 * semaphore.
321 	 */
322 	vfio_pci_zap_and_down_write_memory_lock(vdev);
323 	vfio_pci_dma_buf_move(vdev, true);
324 
325 	if (vdev->pm_runtime_engaged) {
326 		up_write(&vdev->memory_lock);
327 		return -EINVAL;
328 	}
329 
330 	vdev->pm_runtime_engaged = true;
331 	vdev->pm_wake_eventfd_ctx = efdctx;
332 	pm_runtime_put_noidle(&vdev->pdev->dev);
333 	up_write(&vdev->memory_lock);
334 
335 	return 0;
336 }
337 
vfio_pci_core_pm_entry(struct vfio_pci_core_device * vdev,u32 flags,void __user * arg,size_t argsz)338 static int vfio_pci_core_pm_entry(struct vfio_pci_core_device *vdev, u32 flags,
339 				  void __user *arg, size_t argsz)
340 {
341 	int ret;
342 
343 	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
344 	if (ret != 1)
345 		return ret;
346 
347 	/*
348 	 * Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count
349 	 * will be decremented. The pm_runtime_put() will be invoked again
350 	 * while returning from the ioctl and then the device can go into
351 	 * runtime suspended state.
352 	 */
353 	return vfio_pci_runtime_pm_entry(vdev, NULL);
354 }
355 
vfio_pci_core_pm_entry_with_wakeup(struct vfio_pci_core_device * vdev,u32 flags,struct vfio_device_low_power_entry_with_wakeup __user * arg,size_t argsz)356 static int vfio_pci_core_pm_entry_with_wakeup(
357 	struct vfio_pci_core_device *vdev, u32 flags,
358 	struct vfio_device_low_power_entry_with_wakeup __user *arg,
359 	size_t argsz)
360 {
361 	struct vfio_device_low_power_entry_with_wakeup entry;
362 	struct eventfd_ctx *efdctx;
363 	int ret;
364 
365 	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
366 				 sizeof(entry));
367 	if (ret != 1)
368 		return ret;
369 
370 	if (copy_from_user(&entry, arg, sizeof(entry)))
371 		return -EFAULT;
372 
373 	if (entry.wakeup_eventfd < 0)
374 		return -EINVAL;
375 
376 	efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd);
377 	if (IS_ERR(efdctx))
378 		return PTR_ERR(efdctx);
379 
380 	ret = vfio_pci_runtime_pm_entry(vdev, efdctx);
381 	if (ret)
382 		eventfd_ctx_put(efdctx);
383 
384 	return ret;
385 }
386 
__vfio_pci_runtime_pm_exit(struct vfio_pci_core_device * vdev)387 static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
388 {
389 	if (vdev->pm_runtime_engaged) {
390 		vdev->pm_runtime_engaged = false;
391 		pm_runtime_get_noresume(&vdev->pdev->dev);
392 
393 		if (vdev->pm_wake_eventfd_ctx) {
394 			eventfd_ctx_put(vdev->pm_wake_eventfd_ctx);
395 			vdev->pm_wake_eventfd_ctx = NULL;
396 		}
397 	}
398 }
399 
vfio_pci_runtime_pm_exit(struct vfio_pci_core_device * vdev)400 static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
401 {
402 	/*
403 	 * The vdev power related flags are protected with 'memory_lock'
404 	 * semaphore.
405 	 */
406 	down_write(&vdev->memory_lock);
407 	__vfio_pci_runtime_pm_exit(vdev);
408 	if (__vfio_pci_memory_enabled(vdev))
409 		vfio_pci_dma_buf_move(vdev, false);
410 	up_write(&vdev->memory_lock);
411 }
412 
vfio_pci_core_pm_exit(struct vfio_pci_core_device * vdev,u32 flags,void __user * arg,size_t argsz)413 static int vfio_pci_core_pm_exit(struct vfio_pci_core_device *vdev, u32 flags,
414 				 void __user *arg, size_t argsz)
415 {
416 	int ret;
417 
418 	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
419 	if (ret != 1)
420 		return ret;
421 
422 	/*
423 	 * The device is always in the active state here due to pm wrappers
424 	 * around ioctls. If the device had entered a low power state and
425 	 * pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has
426 	 * already signaled the eventfd and exited low power mode itself.
427 	 * pm_runtime_engaged protects the redundant call here.
428 	 */
429 	vfio_pci_runtime_pm_exit(vdev);
430 	return 0;
431 }
432 
433 #ifdef CONFIG_PM
vfio_pci_core_runtime_suspend(struct device * dev)434 static int vfio_pci_core_runtime_suspend(struct device *dev)
435 {
436 	struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
437 
438 	down_write(&vdev->memory_lock);
439 	/*
440 	 * The user can move the device into D3hot state before invoking
441 	 * power management IOCTL. Move the device into D0 state here and then
442 	 * the pci-driver core runtime PM suspend function will move the device
443 	 * into the low power state. Also, for the devices which have
444 	 * NoSoftRst-, it will help in restoring the original state
445 	 * (saved locally in 'vdev->pm_save').
446 	 */
447 	vfio_pci_set_power_state(vdev, PCI_D0);
448 	up_write(&vdev->memory_lock);
449 
450 	/*
451 	 * If INTx is enabled, then mask INTx before going into the runtime
452 	 * suspended state and unmask the same in the runtime resume.
453 	 * If INTx has already been masked by the user, then
454 	 * vfio_pci_intx_mask() will return false and in that case, INTx
455 	 * should not be unmasked in the runtime resume.
456 	 */
457 	vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
458 				vfio_pci_intx_mask(vdev));
459 
460 	return 0;
461 }
462 
vfio_pci_core_runtime_resume(struct device * dev)463 static int vfio_pci_core_runtime_resume(struct device *dev)
464 {
465 	struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
466 
467 	/*
468 	 * Resume with a pm_wake_eventfd_ctx signals the eventfd and exit
469 	 * low power mode.
470 	 */
471 	down_write(&vdev->memory_lock);
472 	if (vdev->pm_wake_eventfd_ctx) {
473 		eventfd_signal(vdev->pm_wake_eventfd_ctx);
474 		__vfio_pci_runtime_pm_exit(vdev);
475 	}
476 	up_write(&vdev->memory_lock);
477 
478 	if (vdev->pm_intx_masked)
479 		vfio_pci_intx_unmask(vdev);
480 
481 	return 0;
482 }
483 #endif /* CONFIG_PM */
484 
485 /*
486  * Eager-request BAR resources, and iomap them.  Soft failures are
487  * allowed, and consumers must check the barmap before use in order to
488  * give compatible user-visible behaviour with the previous on-demand
489  * allocation method.
490  */
vfio_pci_core_map_bars(struct vfio_pci_core_device * vdev)491 static void vfio_pci_core_map_bars(struct vfio_pci_core_device *vdev)
492 {
493 	struct pci_dev *pdev = vdev->pdev;
494 	int i;
495 
496 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
497 		int bar = i + PCI_STD_RESOURCES;
498 
499 		vdev->barmap[bar] = IOMEM_ERR_PTR(-ENODEV);
500 
501 		if (!pci_resource_len(pdev, i))
502 			continue;
503 
504 		if (pci_request_selected_regions(pdev, 1 << bar, "vfio")) {
505 			pci_dbg(pdev, "Failed to reserve region %d\n", bar);
506 			vdev->barmap[bar] = IOMEM_ERR_PTR(-EBUSY);
507 			continue;
508 		}
509 
510 		vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
511 		if (!vdev->barmap[bar]) {
512 			pci_dbg(pdev, "Failed to iomap region %d\n", bar);
513 			pci_release_selected_regions(pdev, 1 << bar);
514 			vdev->barmap[bar] = IOMEM_ERR_PTR(-ENOMEM);
515 		}
516 	}
517 }
518 
519 /*
520  * The pci-driver core runtime PM routines always save the device state
521  * before going into suspended state. If the device is going into low power
522  * state with only with runtime PM ops, then no explicit handling is needed
523  * for the devices which have NoSoftRst-.
524  */
525 static const struct dev_pm_ops vfio_pci_core_pm_ops = {
526 	SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend,
527 			   vfio_pci_core_runtime_resume,
528 			   NULL)
529 };
530 
vfio_pci_core_enable(struct vfio_pci_core_device * vdev)531 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
532 {
533 	struct pci_dev *pdev = vdev->pdev;
534 	int ret;
535 	u16 cmd;
536 	u8 msix_pos;
537 
538 	if (!disable_idle_d3) {
539 		ret = pm_runtime_resume_and_get(&pdev->dev);
540 		if (ret < 0)
541 			return ret;
542 	}
543 
544 	/* Don't allow our initial saved state to include busmaster */
545 	pci_clear_master(pdev);
546 
547 	ret = pci_enable_device(pdev);
548 	if (ret)
549 		goto out_power;
550 
551 	/* If reset fails because of the device lock, fail this path entirely */
552 	ret = pci_try_reset_function(pdev);
553 	if (ret == -EAGAIN)
554 		goto out_disable_device;
555 
556 	vdev->reset_works = !ret;
557 	pci_save_state(pdev);
558 	vdev->pci_saved_state = pci_store_saved_state(pdev);
559 	if (!vdev->pci_saved_state)
560 		pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
561 
562 	if (likely(!nointxmask)) {
563 		if (vfio_pci_nointx(pdev)) {
564 			pci_info(pdev, "Masking broken INTx support\n");
565 			vdev->nointx = true;
566 			pci_intx(pdev, 0);
567 		} else
568 			vdev->pci_2_3 = pci_intx_mask_supported(pdev);
569 	}
570 
571 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
572 	if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
573 		cmd &= ~PCI_COMMAND_INTX_DISABLE;
574 		pci_write_config_word(pdev, PCI_COMMAND, cmd);
575 	}
576 
577 	ret = vfio_pci_zdev_open_device(vdev);
578 	if (ret)
579 		goto out_free_state;
580 
581 	ret = vfio_config_init(vdev);
582 	if (ret)
583 		goto out_free_zdev;
584 
585 	msix_pos = pdev->msix_cap;
586 	if (msix_pos) {
587 		u16 flags;
588 		u32 table;
589 
590 		pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
591 		pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
592 
593 		vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
594 		vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
595 		vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
596 		vdev->has_dyn_msix = pci_msix_can_alloc_dyn(pdev);
597 	} else {
598 		vdev->msix_bar = 0xFF;
599 		vdev->has_dyn_msix = false;
600 	}
601 
602 	if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
603 		vdev->has_vga = true;
604 
605 	vfio_pci_core_map_bars(vdev);
606 
607 	return 0;
608 
609 out_free_zdev:
610 	vfio_pci_zdev_close_device(vdev);
611 out_free_state:
612 	kfree(vdev->pci_saved_state);
613 	vdev->pci_saved_state = NULL;
614 out_disable_device:
615 	pci_disable_device(pdev);
616 out_power:
617 	if (!disable_idle_d3)
618 		pm_runtime_put(&pdev->dev);
619 	return ret;
620 }
621 EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
622 
vfio_pci_core_disable(struct vfio_pci_core_device * vdev)623 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
624 {
625 	struct pci_dev *bridge;
626 	struct pci_dev *pdev = vdev->pdev;
627 	struct vfio_pci_dummy_resource *dummy_res, *tmp;
628 	struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
629 	int i, bar;
630 
631 	/* For needs_reset */
632 	lockdep_assert_held(&vdev->vdev.dev_set->lock);
633 
634 	/*
635 	 * This function can be invoked while the power state is non-D0.
636 	 * This non-D0 power state can be with or without runtime PM.
637 	 * vfio_pci_runtime_pm_exit() will internally increment the usage
638 	 * count corresponding to pm_runtime_put() called during low power
639 	 * feature entry and then pm_runtime_resume() will wake up the device,
640 	 * if the device has already gone into the suspended state. Otherwise,
641 	 * the vfio_pci_set_power_state() will change the device power state
642 	 * to D0.
643 	 */
644 	vfio_pci_runtime_pm_exit(vdev);
645 	pm_runtime_resume(&pdev->dev);
646 
647 	/*
648 	 * This function calls __pci_reset_function_locked() which internally
649 	 * can use pci_pm_reset() for the function reset. pci_pm_reset() will
650 	 * fail if the power state is non-D0. Also, for the devices which
651 	 * have NoSoftRst-, the reset function can cause the PCI config space
652 	 * reset without restoring the original state (saved locally in
653 	 * 'vdev->pm_save').
654 	 */
655 	vfio_pci_set_power_state(vdev, PCI_D0);
656 
657 	/* Stop the device from further DMA */
658 	pci_clear_master(pdev);
659 
660 	vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
661 				VFIO_IRQ_SET_ACTION_TRIGGER,
662 				vdev->irq_type, 0, 0, NULL);
663 
664 	/* Device closed, don't need mutex here */
665 	list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
666 				 &vdev->ioeventfds_list, next) {
667 		vfio_virqfd_disable(&ioeventfd->virqfd);
668 		list_del(&ioeventfd->next);
669 		kfree(ioeventfd);
670 	}
671 	vdev->ioeventfds_nr = 0;
672 
673 	vdev->virq_disabled = false;
674 
675 	for (i = 0; i < vdev->num_regions; i++)
676 		vdev->region[i].ops->release(vdev, &vdev->region[i]);
677 
678 	vdev->num_regions = 0;
679 	kfree(vdev->region);
680 	vdev->region = NULL; /* don't krealloc a freed pointer */
681 
682 	vfio_config_free(vdev);
683 
684 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
685 		bar = i + PCI_STD_RESOURCES;
686 		if (IS_ERR_OR_NULL(vdev->barmap[bar]))
687 			continue;
688 		pci_iounmap(pdev, vdev->barmap[bar]);
689 		pci_release_selected_regions(pdev, 1 << bar);
690 		vdev->barmap[bar] = NULL;
691 	}
692 
693 	list_for_each_entry_safe(dummy_res, tmp,
694 				 &vdev->dummy_resources_list, res_next) {
695 		list_del(&dummy_res->res_next);
696 		release_resource(&dummy_res->resource);
697 		kfree(dummy_res);
698 	}
699 
700 	vdev->needs_reset = true;
701 
702 	vfio_pci_zdev_close_device(vdev);
703 
704 	/*
705 	 * If we have saved state, restore it.  If we can reset the device,
706 	 * even better.  Resetting with current state seems better than
707 	 * nothing, but saving and restoring current state without reset
708 	 * is just busy work.
709 	 */
710 	if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
711 		pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
712 
713 		if (!vdev->reset_works)
714 			goto out;
715 
716 		pci_save_state(pdev);
717 	}
718 
719 	/*
720 	 * Disable INTx and MSI, presumably to avoid spurious interrupts
721 	 * during reset.  Stolen from pci_reset_function()
722 	 */
723 	pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
724 
725 	/*
726 	 * Try to get the locks ourselves to prevent a deadlock. The
727 	 * success of this is dependent on being able to lock the device,
728 	 * which is not always possible.
729 	 * We can not use the "try" reset interface here, which will
730 	 * overwrite the previously restored configuration information.
731 	 */
732 	if (vdev->reset_works) {
733 		bridge = pci_upstream_bridge(pdev);
734 		if (bridge && !pci_dev_trylock(bridge))
735 			goto out_restore_state;
736 		if (pci_dev_trylock(pdev)) {
737 			if (!__pci_reset_function_locked(pdev))
738 				vdev->needs_reset = false;
739 			pci_dev_unlock(pdev);
740 		}
741 		if (bridge)
742 			pci_dev_unlock(bridge);
743 	}
744 
745 out_restore_state:
746 	pci_restore_state(pdev);
747 out:
748 	pci_disable_device(pdev);
749 
750 	vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
751 
752 	/* Put the pm-runtime usage counter acquired during enable */
753 	if (!disable_idle_d3)
754 		pm_runtime_put(&pdev->dev);
755 }
756 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
757 
vfio_pci_core_close_device(struct vfio_device * core_vdev)758 void vfio_pci_core_close_device(struct vfio_device *core_vdev)
759 {
760 	struct vfio_pci_core_device *vdev =
761 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
762 
763 	if (vdev->sriov_pf_core_dev) {
764 		mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
765 		WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
766 		vdev->sriov_pf_core_dev->vf_token->users--;
767 		mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
768 	}
769 #if IS_ENABLED(CONFIG_EEH)
770 	eeh_dev_release(vdev->pdev);
771 #endif
772 	vfio_pci_dma_buf_cleanup(vdev);
773 
774 	vfio_pci_core_disable(vdev);
775 
776 	mutex_lock(&vdev->igate);
777 	vfio_pci_eventfd_replace_locked(vdev, &vdev->err_trigger, NULL);
778 	vfio_pci_eventfd_replace_locked(vdev, &vdev->req_trigger, NULL);
779 	mutex_unlock(&vdev->igate);
780 }
781 EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
782 
vfio_pci_core_finish_enable(struct vfio_pci_core_device * vdev)783 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
784 {
785 	vfio_pci_probe_mmaps(vdev);
786 #if IS_ENABLED(CONFIG_EEH)
787 	eeh_dev_open(vdev->pdev);
788 #endif
789 
790 	if (vdev->sriov_pf_core_dev) {
791 		mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
792 		vdev->sriov_pf_core_dev->vf_token->users++;
793 		mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
794 	}
795 }
796 EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
797 
vfio_pci_get_irq_count(struct vfio_pci_core_device * vdev,int irq_type)798 static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
799 {
800 	if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
801 		return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0;
802 	} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
803 		u8 pos;
804 		u16 flags;
805 
806 		pos = vdev->pdev->msi_cap;
807 		if (pos) {
808 			pci_read_config_word(vdev->pdev,
809 					     pos + PCI_MSI_FLAGS, &flags);
810 			return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
811 		}
812 	} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
813 		u8 pos;
814 		u16 flags;
815 
816 		pos = vdev->pdev->msix_cap;
817 		if (pos) {
818 			pci_read_config_word(vdev->pdev,
819 					     pos + PCI_MSIX_FLAGS, &flags);
820 
821 			return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
822 		}
823 	} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
824 		if (pci_is_pcie(vdev->pdev))
825 			return 1;
826 	} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
827 		return 1;
828 	}
829 
830 	return 0;
831 }
832 
vfio_pci_count_devs(struct pci_dev * pdev,void * data)833 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
834 {
835 	(*(int *)data)++;
836 	return 0;
837 }
838 
839 struct vfio_pci_fill_info {
840 	struct vfio_device *vdev;
841 	struct vfio_pci_dependent_device *devices;
842 	int nr_devices;
843 	u32 count;
844 	u32 flags;
845 };
846 
vfio_pci_fill_devs(struct pci_dev * pdev,void * data)847 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
848 {
849 	struct vfio_pci_dependent_device *info;
850 	struct vfio_pci_fill_info *fill = data;
851 
852 	/* The topology changed since we counted devices */
853 	if (fill->count >= fill->nr_devices)
854 		return -EAGAIN;
855 
856 	info = &fill->devices[fill->count++];
857 	info->segment = pci_domain_nr(pdev->bus);
858 	info->bus = pdev->bus->number;
859 	info->devfn = pdev->devfn;
860 
861 	if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
862 		struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
863 		struct vfio_device_set *dev_set = fill->vdev->dev_set;
864 		struct vfio_device *vdev;
865 
866 		/*
867 		 * hot-reset requires all affected devices be represented in
868 		 * the dev_set.
869 		 */
870 		vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
871 		if (!vdev) {
872 			info->devid = VFIO_PCI_DEVID_NOT_OWNED;
873 		} else {
874 			int id = vfio_iommufd_get_dev_id(vdev, iommufd);
875 
876 			if (id > 0)
877 				info->devid = id;
878 			else if (id == -ENOENT)
879 				info->devid = VFIO_PCI_DEVID_OWNED;
880 			else
881 				info->devid = VFIO_PCI_DEVID_NOT_OWNED;
882 		}
883 		/* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
884 		if (info->devid == VFIO_PCI_DEVID_NOT_OWNED)
885 			fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
886 	} else {
887 		struct iommu_group *iommu_group;
888 
889 		iommu_group = iommu_group_get(&pdev->dev);
890 		if (!iommu_group)
891 			return -EPERM; /* Cannot reset non-isolated devices */
892 
893 		info->group_id = iommu_group_id(iommu_group);
894 		iommu_group_put(iommu_group);
895 	}
896 
897 	return 0;
898 }
899 
900 struct vfio_pci_group_info {
901 	int count;
902 	struct file **files;
903 };
904 
vfio_pci_dev_below_slot(struct pci_dev * pdev,struct pci_slot * slot)905 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
906 {
907 	for (; pdev; pdev = pdev->bus->self)
908 		if (pdev->bus == slot->bus)
909 			return (pdev->slot == slot);
910 	return false;
911 }
912 
913 struct vfio_pci_walk_info {
914 	int (*fn)(struct pci_dev *pdev, void *data);
915 	void *data;
916 	struct pci_dev *pdev;
917 	bool slot;
918 	int ret;
919 };
920 
vfio_pci_walk_wrapper(struct pci_dev * pdev,void * data)921 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
922 {
923 	struct vfio_pci_walk_info *walk = data;
924 
925 	if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
926 		walk->ret = walk->fn(pdev, walk->data);
927 
928 	return walk->ret;
929 }
930 
vfio_pci_for_each_slot_or_bus(struct pci_dev * pdev,int (* fn)(struct pci_dev *,void * data),void * data,bool slot)931 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
932 					 int (*fn)(struct pci_dev *,
933 						   void *data), void *data,
934 					 bool slot)
935 {
936 	struct vfio_pci_walk_info walk = {
937 		.fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
938 	};
939 
940 	pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
941 
942 	return walk.ret;
943 }
944 
msix_mmappable_cap(struct vfio_pci_core_device * vdev,struct vfio_info_cap * caps)945 static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
946 			      struct vfio_info_cap *caps)
947 {
948 	struct vfio_info_cap_header header = {
949 		.id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
950 		.version = 1
951 	};
952 
953 	return vfio_info_add_capability(caps, &header, sizeof(header));
954 }
955 
vfio_pci_core_register_dev_region(struct vfio_pci_core_device * vdev,unsigned int type,unsigned int subtype,const struct vfio_pci_regops * ops,size_t size,u32 flags,void * data)956 int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
957 				      unsigned int type, unsigned int subtype,
958 				      const struct vfio_pci_regops *ops,
959 				      size_t size, u32 flags, void *data)
960 {
961 	struct vfio_pci_region *region;
962 
963 	region = krealloc(vdev->region,
964 			  (vdev->num_regions + 1) * sizeof(*region),
965 			  GFP_KERNEL_ACCOUNT);
966 	if (!region)
967 		return -ENOMEM;
968 
969 	vdev->region = region;
970 	vdev->region[vdev->num_regions].type = type;
971 	vdev->region[vdev->num_regions].subtype = subtype;
972 	vdev->region[vdev->num_regions].ops = ops;
973 	vdev->region[vdev->num_regions].size = size;
974 	vdev->region[vdev->num_regions].flags = flags;
975 	vdev->region[vdev->num_regions].data = data;
976 
977 	vdev->num_regions++;
978 
979 	return 0;
980 }
981 EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region);
982 
vfio_pci_info_atomic_cap(struct vfio_pci_core_device * vdev,struct vfio_info_cap * caps)983 static int vfio_pci_info_atomic_cap(struct vfio_pci_core_device *vdev,
984 				    struct vfio_info_cap *caps)
985 {
986 	struct vfio_device_info_cap_pci_atomic_comp cap = {
987 		.header.id = VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP,
988 		.header.version = 1
989 	};
990 	struct pci_dev *pdev = pci_physfn(vdev->pdev);
991 	u32 devcap2;
992 
993 	pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &devcap2);
994 
995 	if ((devcap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
996 	    !pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32))
997 		cap.flags |= VFIO_PCI_ATOMIC_COMP32;
998 
999 	if ((devcap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
1000 	    !pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64))
1001 		cap.flags |= VFIO_PCI_ATOMIC_COMP64;
1002 
1003 	if ((devcap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP128) &&
1004 	    !pci_enable_atomic_ops_to_root(pdev,
1005 					   PCI_EXP_DEVCAP2_ATOMIC_COMP128))
1006 		cap.flags |= VFIO_PCI_ATOMIC_COMP128;
1007 
1008 	if (!cap.flags)
1009 		return -ENODEV;
1010 
1011 	return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
1012 }
1013 
vfio_pci_ioctl_get_info(struct vfio_pci_core_device * vdev,struct vfio_device_info __user * arg)1014 static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
1015 				   struct vfio_device_info __user *arg)
1016 {
1017 	unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
1018 	struct vfio_device_info info = {};
1019 	struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1020 	int ret;
1021 
1022 	if (copy_from_user(&info, arg, minsz))
1023 		return -EFAULT;
1024 
1025 	if (info.argsz < minsz)
1026 		return -EINVAL;
1027 
1028 	minsz = min_t(size_t, info.argsz, sizeof(info));
1029 
1030 	info.flags = VFIO_DEVICE_FLAGS_PCI;
1031 
1032 	if (vdev->reset_works)
1033 		info.flags |= VFIO_DEVICE_FLAGS_RESET;
1034 
1035 	info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
1036 	info.num_irqs = VFIO_PCI_NUM_IRQS;
1037 
1038 	ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
1039 	if (ret && ret != -ENODEV) {
1040 		pci_warn(vdev->pdev,
1041 			 "Failed to setup zPCI info capabilities\n");
1042 		return ret;
1043 	}
1044 
1045 	ret = vfio_pci_info_atomic_cap(vdev, &caps);
1046 	if (ret && ret != -ENODEV) {
1047 		pci_warn(vdev->pdev,
1048 			 "Failed to setup AtomicOps info capability\n");
1049 		return ret;
1050 	}
1051 
1052 	if (caps.size) {
1053 		info.flags |= VFIO_DEVICE_FLAGS_CAPS;
1054 		if (info.argsz < sizeof(info) + caps.size) {
1055 			info.argsz = sizeof(info) + caps.size;
1056 		} else {
1057 			vfio_info_cap_shift(&caps, sizeof(info));
1058 			if (copy_to_user(arg + 1, caps.buf, caps.size)) {
1059 				kfree(caps.buf);
1060 				return -EFAULT;
1061 			}
1062 			info.cap_offset = sizeof(*arg);
1063 		}
1064 
1065 		kfree(caps.buf);
1066 	}
1067 
1068 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
1069 }
1070 
vfio_pci_ioctl_get_region_info(struct vfio_device * core_vdev,struct vfio_region_info * info,struct vfio_info_cap * caps)1071 int vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
1072 				   struct vfio_region_info *info,
1073 				   struct vfio_info_cap *caps)
1074 {
1075 	struct vfio_pci_core_device *vdev =
1076 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1077 	struct pci_dev *pdev = vdev->pdev;
1078 	int i, ret;
1079 
1080 	switch (info->index) {
1081 	case VFIO_PCI_CONFIG_REGION_INDEX:
1082 		info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1083 		info->size = pdev->cfg_size;
1084 		info->flags = VFIO_REGION_INFO_FLAG_READ |
1085 			      VFIO_REGION_INFO_FLAG_WRITE;
1086 		break;
1087 	case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1088 		info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1089 		info->size = pci_resource_len(pdev, info->index);
1090 		if (!info->size) {
1091 			info->flags = 0;
1092 			break;
1093 		}
1094 
1095 		info->flags = VFIO_REGION_INFO_FLAG_READ |
1096 			      VFIO_REGION_INFO_FLAG_WRITE;
1097 		if (vdev->bar_mmap_supported[info->index]) {
1098 			info->flags |= VFIO_REGION_INFO_FLAG_MMAP;
1099 			if (info->index == vdev->msix_bar) {
1100 				ret = msix_mmappable_cap(vdev, caps);
1101 				if (ret)
1102 					return ret;
1103 			}
1104 		}
1105 
1106 		break;
1107 	case VFIO_PCI_ROM_REGION_INDEX: {
1108 		void __iomem *io;
1109 		size_t size;
1110 		u16 cmd;
1111 
1112 		info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1113 		info->flags = 0;
1114 		info->size = 0;
1115 
1116 		if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
1117 			/*
1118 			 * Check ROM content is valid. Need to enable memory
1119 			 * decode for ROM access in pci_map_rom().
1120 			 */
1121 			cmd = vfio_pci_memory_lock_and_enable(vdev);
1122 			io = pci_map_rom(pdev, &size);
1123 			if (io) {
1124 				info->flags = VFIO_REGION_INFO_FLAG_READ;
1125 				/* Report the BAR size, not the ROM size. */
1126 				info->size = pci_resource_len(pdev,
1127 							      PCI_ROM_RESOURCE);
1128 				pci_unmap_rom(pdev, io);
1129 			}
1130 			vfio_pci_memory_unlock_and_restore(vdev, cmd);
1131 		} else if (pdev->rom && pdev->romlen) {
1132 			info->flags = VFIO_REGION_INFO_FLAG_READ;
1133 			/* Report BAR size as power of two. */
1134 			info->size = roundup_pow_of_two(pdev->romlen);
1135 		}
1136 
1137 		break;
1138 	}
1139 	case VFIO_PCI_VGA_REGION_INDEX:
1140 		if (!vdev->has_vga)
1141 			return -EINVAL;
1142 
1143 		info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1144 		info->size = 0xc0000;
1145 		info->flags = VFIO_REGION_INFO_FLAG_READ |
1146 			      VFIO_REGION_INFO_FLAG_WRITE;
1147 
1148 		break;
1149 	default: {
1150 		struct vfio_region_info_cap_type cap_type = {
1151 			.header.id = VFIO_REGION_INFO_CAP_TYPE,
1152 			.header.version = 1
1153 		};
1154 
1155 		if (info->index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1156 			return -EINVAL;
1157 		info->index = array_index_nospec(
1158 			info->index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
1159 
1160 		i = info->index - VFIO_PCI_NUM_REGIONS;
1161 
1162 		info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
1163 		info->size = vdev->region[i].size;
1164 		info->flags = vdev->region[i].flags;
1165 
1166 		cap_type.type = vdev->region[i].type;
1167 		cap_type.subtype = vdev->region[i].subtype;
1168 
1169 		ret = vfio_info_add_capability(caps, &cap_type.header,
1170 					       sizeof(cap_type));
1171 		if (ret)
1172 			return ret;
1173 
1174 		if (vdev->region[i].ops->add_capability) {
1175 			ret = vdev->region[i].ops->add_capability(
1176 				vdev, &vdev->region[i], caps);
1177 			if (ret)
1178 				return ret;
1179 		}
1180 	}
1181 	}
1182 	return 0;
1183 }
1184 EXPORT_SYMBOL_GPL(vfio_pci_ioctl_get_region_info);
1185 
vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device * vdev,struct vfio_irq_info __user * arg)1186 static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
1187 				       struct vfio_irq_info __user *arg)
1188 {
1189 	unsigned long minsz = offsetofend(struct vfio_irq_info, count);
1190 	struct vfio_irq_info info;
1191 
1192 	if (copy_from_user(&info, arg, minsz))
1193 		return -EFAULT;
1194 
1195 	if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1196 		return -EINVAL;
1197 
1198 	switch (info.index) {
1199 	case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
1200 	case VFIO_PCI_REQ_IRQ_INDEX:
1201 		break;
1202 	case VFIO_PCI_ERR_IRQ_INDEX:
1203 		if (pci_is_pcie(vdev->pdev))
1204 			break;
1205 		fallthrough;
1206 	default:
1207 		return -EINVAL;
1208 	}
1209 
1210 	info.flags = VFIO_IRQ_INFO_EVENTFD;
1211 
1212 	info.count = vfio_pci_get_irq_count(vdev, info.index);
1213 
1214 	if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1215 		info.flags |=
1216 			(VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
1217 	else if (info.index != VFIO_PCI_MSIX_IRQ_INDEX || !vdev->has_dyn_msix)
1218 		info.flags |= VFIO_IRQ_INFO_NORESIZE;
1219 
1220 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
1221 }
1222 
vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device * vdev,struct vfio_irq_set __user * arg)1223 static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
1224 				   struct vfio_irq_set __user *arg)
1225 {
1226 	unsigned long minsz = offsetofend(struct vfio_irq_set, count);
1227 	struct vfio_irq_set hdr;
1228 	u8 *data = NULL;
1229 	int max, ret = 0;
1230 	size_t data_size = 0;
1231 
1232 	if (copy_from_user(&hdr, arg, minsz))
1233 		return -EFAULT;
1234 
1235 	max = vfio_pci_get_irq_count(vdev, hdr.index);
1236 
1237 	ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
1238 						 &data_size);
1239 	if (ret)
1240 		return ret;
1241 
1242 	if (data_size) {
1243 		data = memdup_user(&arg->data, data_size);
1244 		if (IS_ERR(data))
1245 			return PTR_ERR(data);
1246 	}
1247 
1248 	mutex_lock(&vdev->igate);
1249 
1250 	ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
1251 				      hdr.count, data);
1252 
1253 	mutex_unlock(&vdev->igate);
1254 	kfree(data);
1255 
1256 	return ret;
1257 }
1258 
vfio_pci_ioctl_reset(struct vfio_pci_core_device * vdev,void __user * arg)1259 static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
1260 				void __user *arg)
1261 {
1262 	int ret;
1263 
1264 	if (!vdev->reset_works)
1265 		return -EINVAL;
1266 
1267 	vfio_pci_zap_and_down_write_memory_lock(vdev);
1268 
1269 	/*
1270 	 * This function can be invoked while the power state is non-D0. If
1271 	 * pci_try_reset_function() has been called while the power state is
1272 	 * non-D0, then pci_try_reset_function() will internally set the power
1273 	 * state to D0 without vfio driver involvement. For the devices which
1274 	 * have NoSoftRst-, the reset function can cause the PCI config space
1275 	 * reset without restoring the original state (saved locally in
1276 	 * 'vdev->pm_save').
1277 	 */
1278 	vfio_pci_set_power_state(vdev, PCI_D0);
1279 
1280 	vfio_pci_dma_buf_move(vdev, true);
1281 	ret = pci_try_reset_function(vdev->pdev);
1282 	if (__vfio_pci_memory_enabled(vdev))
1283 		vfio_pci_dma_buf_move(vdev, false);
1284 	up_write(&vdev->memory_lock);
1285 
1286 	return ret;
1287 }
1288 
vfio_pci_ioctl_get_pci_hot_reset_info(struct vfio_pci_core_device * vdev,struct vfio_pci_hot_reset_info __user * arg)1289 static int vfio_pci_ioctl_get_pci_hot_reset_info(
1290 	struct vfio_pci_core_device *vdev,
1291 	struct vfio_pci_hot_reset_info __user *arg)
1292 {
1293 	unsigned long minsz =
1294 		offsetofend(struct vfio_pci_hot_reset_info, count);
1295 	struct vfio_pci_dependent_device *devices = NULL;
1296 	struct vfio_pci_hot_reset_info hdr;
1297 	struct vfio_pci_fill_info fill = {};
1298 	bool slot = false;
1299 	int ret, count = 0;
1300 
1301 	if (copy_from_user(&hdr, arg, minsz))
1302 		return -EFAULT;
1303 
1304 	if (hdr.argsz < minsz)
1305 		return -EINVAL;
1306 
1307 	hdr.flags = 0;
1308 
1309 	/* Can we do a slot or bus reset or neither? */
1310 	if (!pci_probe_reset_slot(vdev->pdev->slot))
1311 		slot = true;
1312 	else if (pci_probe_reset_bus(vdev->pdev->bus))
1313 		return -ENODEV;
1314 
1315 	ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1316 					    &count, slot);
1317 	if (ret)
1318 		return ret;
1319 
1320 	if (WARN_ON(!count)) /* Should always be at least one */
1321 		return -ERANGE;
1322 
1323 	if (count > (hdr.argsz - sizeof(hdr)) / sizeof(*devices)) {
1324 		hdr.count = count;
1325 		ret = -ENOSPC;
1326 		goto header;
1327 	}
1328 
1329 	devices = kzalloc_objs(*devices, count);
1330 	if (!devices)
1331 		return -ENOMEM;
1332 
1333 	fill.devices = devices;
1334 	fill.nr_devices = count;
1335 	fill.vdev = &vdev->vdev;
1336 
1337 	if (vfio_device_cdev_opened(&vdev->vdev))
1338 		fill.flags |= VFIO_PCI_HOT_RESET_FLAG_DEV_ID |
1339 			     VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
1340 
1341 	mutex_lock(&vdev->vdev.dev_set->lock);
1342 	ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
1343 					    &fill, slot);
1344 	mutex_unlock(&vdev->vdev.dev_set->lock);
1345 	if (ret)
1346 		goto out;
1347 
1348 	if (copy_to_user(arg->devices, devices,
1349 			 sizeof(*devices) * fill.count)) {
1350 		ret = -EFAULT;
1351 		goto out;
1352 	}
1353 
1354 	hdr.count = fill.count;
1355 	hdr.flags = fill.flags;
1356 
1357 header:
1358 	if (copy_to_user(arg, &hdr, minsz))
1359 		ret = -EFAULT;
1360 out:
1361 	kfree(devices);
1362 	return ret;
1363 }
1364 
1365 static int
vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device * vdev,u32 array_count,bool slot,struct vfio_pci_hot_reset __user * arg)1366 vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
1367 				    u32 array_count, bool slot,
1368 				    struct vfio_pci_hot_reset __user *arg)
1369 {
1370 	int32_t *group_fds;
1371 	struct file **files;
1372 	struct vfio_pci_group_info info;
1373 	int file_idx, count = 0, ret = 0;
1374 
1375 	/*
1376 	 * We can't let userspace give us an arbitrarily large buffer to copy,
1377 	 * so verify how many we think there could be.  Note groups can have
1378 	 * multiple devices so one group per device is the max.
1379 	 */
1380 	ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1381 					    &count, slot);
1382 	if (ret)
1383 		return ret;
1384 
1385 	if (array_count > count)
1386 		return -EINVAL;
1387 
1388 	group_fds = kzalloc_objs(*group_fds, array_count);
1389 	files = kzalloc_objs(*files, array_count);
1390 	if (!group_fds || !files) {
1391 		kfree(group_fds);
1392 		kfree(files);
1393 		return -ENOMEM;
1394 	}
1395 
1396 	if (copy_from_user(group_fds, arg->group_fds,
1397 			   array_count * sizeof(*group_fds))) {
1398 		kfree(group_fds);
1399 		kfree(files);
1400 		return -EFAULT;
1401 	}
1402 
1403 	/*
1404 	 * Get the group file for each fd to ensure the group is held across
1405 	 * the reset
1406 	 */
1407 	for (file_idx = 0; file_idx < array_count; file_idx++) {
1408 		struct file *file = fget(group_fds[file_idx]);
1409 
1410 		if (!file) {
1411 			ret = -EBADF;
1412 			break;
1413 		}
1414 
1415 		/* Ensure the FD is a vfio group FD.*/
1416 		if (!vfio_file_is_group(file)) {
1417 			fput(file);
1418 			ret = -EINVAL;
1419 			break;
1420 		}
1421 
1422 		files[file_idx] = file;
1423 	}
1424 
1425 	kfree(group_fds);
1426 
1427 	/* release reference to groups on error */
1428 	if (ret)
1429 		goto hot_reset_release;
1430 
1431 	info.count = array_count;
1432 	info.files = files;
1433 
1434 	ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info, NULL);
1435 
1436 hot_reset_release:
1437 	for (file_idx--; file_idx >= 0; file_idx--)
1438 		fput(files[file_idx]);
1439 
1440 	kfree(files);
1441 	return ret;
1442 }
1443 
vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device * vdev,struct vfio_pci_hot_reset __user * arg)1444 static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
1445 					struct vfio_pci_hot_reset __user *arg)
1446 {
1447 	unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
1448 	struct vfio_pci_hot_reset hdr;
1449 	bool slot = false;
1450 
1451 	if (copy_from_user(&hdr, arg, minsz))
1452 		return -EFAULT;
1453 
1454 	if (hdr.argsz < minsz || hdr.flags)
1455 		return -EINVAL;
1456 
1457 	/* zero-length array is only for cdev opened devices */
1458 	if (!!hdr.count == vfio_device_cdev_opened(&vdev->vdev))
1459 		return -EINVAL;
1460 
1461 	/* Can we do a slot or bus reset or neither? */
1462 	if (!pci_probe_reset_slot(vdev->pdev->slot))
1463 		slot = true;
1464 	else if (pci_probe_reset_bus(vdev->pdev->bus))
1465 		return -ENODEV;
1466 
1467 	if (hdr.count)
1468 		return vfio_pci_ioctl_pci_hot_reset_groups(vdev, hdr.count, slot, arg);
1469 
1470 	return vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, NULL,
1471 					  vfio_iommufd_device_ictx(&vdev->vdev));
1472 }
1473 
vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device * vdev,struct vfio_device_ioeventfd __user * arg)1474 static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
1475 				    struct vfio_device_ioeventfd __user *arg)
1476 {
1477 	unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1478 	struct vfio_device_ioeventfd ioeventfd;
1479 	int count;
1480 
1481 	if (copy_from_user(&ioeventfd, arg, minsz))
1482 		return -EFAULT;
1483 
1484 	if (ioeventfd.argsz < minsz)
1485 		return -EINVAL;
1486 
1487 	if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1488 		return -EINVAL;
1489 
1490 	count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1491 
1492 	if (hweight8(count) != 1 || ioeventfd.fd < -1)
1493 		return -EINVAL;
1494 
1495 	return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
1496 				  ioeventfd.fd);
1497 }
1498 
vfio_pci_core_ioctl(struct vfio_device * core_vdev,unsigned int cmd,unsigned long arg)1499 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1500 			 unsigned long arg)
1501 {
1502 	struct vfio_pci_core_device *vdev =
1503 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1504 	void __user *uarg = (void __user *)arg;
1505 
1506 	switch (cmd) {
1507 	case VFIO_DEVICE_GET_INFO:
1508 		return vfio_pci_ioctl_get_info(vdev, uarg);
1509 	case VFIO_DEVICE_GET_IRQ_INFO:
1510 		return vfio_pci_ioctl_get_irq_info(vdev, uarg);
1511 	case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO:
1512 		return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg);
1513 	case VFIO_DEVICE_IOEVENTFD:
1514 		return vfio_pci_ioctl_ioeventfd(vdev, uarg);
1515 	case VFIO_DEVICE_PCI_HOT_RESET:
1516 		return vfio_pci_ioctl_pci_hot_reset(vdev, uarg);
1517 	case VFIO_DEVICE_RESET:
1518 		return vfio_pci_ioctl_reset(vdev, uarg);
1519 	case VFIO_DEVICE_SET_IRQS:
1520 		return vfio_pci_ioctl_set_irqs(vdev, uarg);
1521 	default:
1522 		return -ENOTTY;
1523 	}
1524 }
1525 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
1526 
vfio_pci_core_feature_token(struct vfio_pci_core_device * vdev,u32 flags,uuid_t __user * arg,size_t argsz)1527 static int vfio_pci_core_feature_token(struct vfio_pci_core_device *vdev,
1528 				       u32 flags, uuid_t __user *arg,
1529 				       size_t argsz)
1530 {
1531 	uuid_t uuid;
1532 	int ret;
1533 
1534 	if (!vdev->vf_token)
1535 		return -ENOTTY;
1536 	/*
1537 	 * We do not support GET of the VF Token UUID as this could
1538 	 * expose the token of the previous device user.
1539 	 */
1540 	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
1541 				 sizeof(uuid));
1542 	if (ret != 1)
1543 		return ret;
1544 
1545 	if (copy_from_user(&uuid, arg, sizeof(uuid)))
1546 		return -EFAULT;
1547 
1548 	mutex_lock(&vdev->vf_token->lock);
1549 	uuid_copy(&vdev->vf_token->uuid, &uuid);
1550 	mutex_unlock(&vdev->vf_token->lock);
1551 	return 0;
1552 }
1553 
vfio_pci_core_ioctl_feature(struct vfio_device * device,u32 flags,void __user * arg,size_t argsz)1554 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
1555 				void __user *arg, size_t argsz)
1556 {
1557 	struct vfio_pci_core_device *vdev =
1558 		container_of(device, struct vfio_pci_core_device, vdev);
1559 
1560 	switch (flags & VFIO_DEVICE_FEATURE_MASK) {
1561 	case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY:
1562 		return vfio_pci_core_pm_entry(vdev, flags, arg, argsz);
1563 	case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP:
1564 		return vfio_pci_core_pm_entry_with_wakeup(vdev, flags,
1565 							  arg, argsz);
1566 	case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT:
1567 		return vfio_pci_core_pm_exit(vdev, flags, arg, argsz);
1568 	case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
1569 		return vfio_pci_core_feature_token(vdev, flags, arg, argsz);
1570 	case VFIO_DEVICE_FEATURE_DMA_BUF:
1571 		return vfio_pci_core_feature_dma_buf(vdev, flags, arg, argsz);
1572 	default:
1573 		return -ENOTTY;
1574 	}
1575 }
1576 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature);
1577 
vfio_pci_rw(struct vfio_pci_core_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)1578 static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
1579 			   size_t count, loff_t *ppos, bool iswrite)
1580 {
1581 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1582 	int ret;
1583 
1584 	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1585 		return -EINVAL;
1586 
1587 	ret = pm_runtime_resume_and_get(&vdev->pdev->dev);
1588 	if (ret) {
1589 		pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n",
1590 				     ret);
1591 		return -EIO;
1592 	}
1593 
1594 	switch (index) {
1595 	case VFIO_PCI_CONFIG_REGION_INDEX:
1596 		ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1597 		break;
1598 
1599 	case VFIO_PCI_ROM_REGION_INDEX:
1600 		if (iswrite)
1601 			ret = -EINVAL;
1602 		else
1603 			ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1604 		break;
1605 
1606 	case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1607 		ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1608 		break;
1609 
1610 	case VFIO_PCI_VGA_REGION_INDEX:
1611 		ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1612 		break;
1613 
1614 	default:
1615 		index -= VFIO_PCI_NUM_REGIONS;
1616 		ret = vdev->region[index].ops->rw(vdev, buf,
1617 						   count, ppos, iswrite);
1618 		break;
1619 	}
1620 
1621 	pm_runtime_put(&vdev->pdev->dev);
1622 	return ret;
1623 }
1624 
vfio_pci_core_read(struct vfio_device * core_vdev,char __user * buf,size_t count,loff_t * ppos)1625 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
1626 		size_t count, loff_t *ppos)
1627 {
1628 	struct vfio_pci_core_device *vdev =
1629 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1630 
1631 	if (!count)
1632 		return 0;
1633 
1634 	return vfio_pci_rw(vdev, buf, count, ppos, false);
1635 }
1636 EXPORT_SYMBOL_GPL(vfio_pci_core_read);
1637 
vfio_pci_core_write(struct vfio_device * core_vdev,const char __user * buf,size_t count,loff_t * ppos)1638 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
1639 		size_t count, loff_t *ppos)
1640 {
1641 	struct vfio_pci_core_device *vdev =
1642 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1643 
1644 	if (!count)
1645 		return 0;
1646 
1647 	return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
1648 }
1649 EXPORT_SYMBOL_GPL(vfio_pci_core_write);
1650 
vfio_pci_zap_bars(struct vfio_pci_core_device * vdev)1651 static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
1652 {
1653 	struct vfio_device *core_vdev = &vdev->vdev;
1654 	loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
1655 	loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
1656 	loff_t len = end - start;
1657 
1658 	unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
1659 }
1660 
vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device * vdev)1661 void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
1662 {
1663 	down_write(&vdev->memory_lock);
1664 	vfio_pci_zap_bars(vdev);
1665 }
1666 
vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device * vdev)1667 u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
1668 {
1669 	u16 cmd;
1670 
1671 	down_write(&vdev->memory_lock);
1672 	pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1673 	if (!(cmd & PCI_COMMAND_MEMORY))
1674 		pci_write_config_word(vdev->pdev, PCI_COMMAND,
1675 				      cmd | PCI_COMMAND_MEMORY);
1676 
1677 	return cmd;
1678 }
1679 
vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device * vdev,u16 cmd)1680 void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
1681 {
1682 	pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1683 	up_write(&vdev->memory_lock);
1684 }
1685 
vma_to_pfn(struct vm_area_struct * vma)1686 static unsigned long vma_to_pfn(struct vm_area_struct *vma)
1687 {
1688 	struct vfio_pci_core_device *vdev = vma->vm_private_data;
1689 	int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1690 	u64 pgoff;
1691 
1692 	pgoff = vma->vm_pgoff &
1693 		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1694 
1695 	return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
1696 }
1697 
vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device * vdev,struct vm_fault * vmf,unsigned long pfn,unsigned int order)1698 vm_fault_t vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device *vdev,
1699 				   struct vm_fault *vmf,
1700 				   unsigned long pfn,
1701 				   unsigned int order)
1702 {
1703 	lockdep_assert_held_read(&vdev->memory_lock);
1704 
1705 	if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
1706 		return VM_FAULT_SIGBUS;
1707 
1708 	if (!order)
1709 		return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
1710 
1711 	if (IS_ENABLED(CONFIG_ARCH_SUPPORTS_PMD_PFNMAP) && order == PMD_ORDER)
1712 		return vmf_insert_pfn_pmd(vmf, pfn, false);
1713 
1714 	if (IS_ENABLED(CONFIG_ARCH_SUPPORTS_PUD_PFNMAP) && order == PUD_ORDER)
1715 		return vmf_insert_pfn_pud(vmf, pfn, false);
1716 
1717 	return VM_FAULT_FALLBACK;
1718 }
1719 EXPORT_SYMBOL_GPL(vfio_pci_vmf_insert_pfn);
1720 
vfio_pci_mmap_huge_fault(struct vm_fault * vmf,unsigned int order)1721 static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
1722 					   unsigned int order)
1723 {
1724 	struct vm_area_struct *vma = vmf->vma;
1725 	struct vfio_pci_core_device *vdev = vma->vm_private_data;
1726 	unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1);
1727 	unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
1728 	unsigned long pfn = vma_to_pfn(vma) + pgoff;
1729 	vm_fault_t ret = VM_FAULT_FALLBACK;
1730 
1731 	if (is_aligned_for_order(vma, addr, pfn, order)) {
1732 		scoped_guard(rwsem_read, &vdev->memory_lock)
1733 			ret = vfio_pci_vmf_insert_pfn(vdev, vmf, pfn, order);
1734 	}
1735 
1736 	dev_dbg_ratelimited(&vdev->pdev->dev,
1737 			   "%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n",
1738 			    __func__, order,
1739 			    vma->vm_pgoff >>
1740 				(VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT),
1741 			    pgoff, (unsigned int)ret);
1742 
1743 	return ret;
1744 }
1745 
vfio_pci_mmap_page_fault(struct vm_fault * vmf)1746 static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf)
1747 {
1748 	return vfio_pci_mmap_huge_fault(vmf, 0);
1749 }
1750 
1751 static const struct vm_operations_struct vfio_pci_mmap_ops = {
1752 	.fault = vfio_pci_mmap_page_fault,
1753 #ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
1754 	.huge_fault = vfio_pci_mmap_huge_fault,
1755 #endif
1756 };
1757 
vfio_pci_core_mmap(struct vfio_device * core_vdev,struct vm_area_struct * vma)1758 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
1759 {
1760 	struct vfio_pci_core_device *vdev =
1761 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1762 	struct pci_dev *pdev = vdev->pdev;
1763 	unsigned int index;
1764 	u64 phys_len, req_len, pgoff, req_start;
1765 	int ret;
1766 
1767 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1768 
1769 	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1770 		return -EINVAL;
1771 	if (vma->vm_end < vma->vm_start)
1772 		return -EINVAL;
1773 	if ((vma->vm_flags & VM_SHARED) == 0)
1774 		return -EINVAL;
1775 	if (index >= VFIO_PCI_NUM_REGIONS) {
1776 		int regnum = index - VFIO_PCI_NUM_REGIONS;
1777 		struct vfio_pci_region *region = vdev->region + regnum;
1778 
1779 		if (region->ops && region->ops->mmap &&
1780 		    (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1781 			return region->ops->mmap(vdev, region, vma);
1782 		return -EINVAL;
1783 	}
1784 	if (index >= VFIO_PCI_ROM_REGION_INDEX)
1785 		return -EINVAL;
1786 	if (!vdev->bar_mmap_supported[index])
1787 		return -EINVAL;
1788 
1789 	phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1790 	req_len = vma->vm_end - vma->vm_start;
1791 	pgoff = vma->vm_pgoff &
1792 		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1793 	req_start = pgoff << PAGE_SHIFT;
1794 
1795 	if (req_start + req_len > phys_len)
1796 		return -EINVAL;
1797 
1798 	/*
1799 	 * Even though we don't make use of the barmap for the mmap,
1800 	 * we need to request the region and the barmap tracks that.
1801 	 */
1802 	ret = vfio_pci_core_setup_barmap(vdev, index);
1803 	if (ret)
1804 		return ret;
1805 
1806 	vma->vm_private_data = vdev;
1807 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1808 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1809 
1810 	/*
1811 	 * Set vm_flags now, they should not be changed in the fault handler.
1812 	 * We want the same flags and page protection (decrypted above) as
1813 	 * io_remap_pfn_range() would set.
1814 	 *
1815 	 * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
1816 	 * allowing KVM stage 2 device mapping attributes to use Normal-NC
1817 	 * rather than DEVICE_nGnRE, which allows guest mappings
1818 	 * supporting write-combining attributes (WC). ARM does not
1819 	 * architecturally guarantee this is safe, and indeed some MMIO
1820 	 * regions like the GICv2 VCPU interface can trigger uncontained
1821 	 * faults if Normal-NC is used.
1822 	 *
1823 	 * To safely use VFIO in KVM the platform must guarantee full
1824 	 * safety in the guest where no action taken against a MMIO
1825 	 * mapping can trigger an uncontained failure. The assumption is
1826 	 * that most VFIO PCI platforms support this for both mapping types,
1827 	 * at least in common flows, based on some expectations of how
1828 	 * PCI IP is integrated. Hence VM_ALLOW_ANY_UNCACHED is set in
1829 	 * the VMA flags.
1830 	 */
1831 	vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED | VM_IO | VM_PFNMAP |
1832 			VM_DONTEXPAND | VM_DONTDUMP);
1833 	vma->vm_ops = &vfio_pci_mmap_ops;
1834 
1835 	return 0;
1836 }
1837 EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
1838 
vfio_pci_core_request(struct vfio_device * core_vdev,unsigned int count)1839 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
1840 {
1841 	struct vfio_pci_core_device *vdev =
1842 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1843 	struct pci_dev *pdev = vdev->pdev;
1844 	struct vfio_pci_eventfd *eventfd;
1845 
1846 	rcu_read_lock();
1847 	eventfd = rcu_dereference(vdev->req_trigger);
1848 	if (eventfd) {
1849 		if (!(count % 10))
1850 			pci_notice_ratelimited(pdev,
1851 				"Relaying device request to user (#%u)\n",
1852 				count);
1853 		eventfd_signal(eventfd->ctx);
1854 	} else if (count == 0) {
1855 		pci_warn(pdev,
1856 			"No device request channel registered, blocked until released by user\n");
1857 	}
1858 	rcu_read_unlock();
1859 }
1860 EXPORT_SYMBOL_GPL(vfio_pci_core_request);
1861 
vfio_pci_core_match_token_uuid(struct vfio_device * core_vdev,const uuid_t * uuid)1862 int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
1863 				   const uuid_t *uuid)
1864 
1865 {
1866 	struct vfio_pci_core_device *vdev =
1867 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1868 
1869 	/*
1870 	 * There's always some degree of trust or collaboration between SR-IOV
1871 	 * PF and VFs, even if just that the PF hosts the SR-IOV capability and
1872 	 * can disrupt VFs with a reset, but often the PF has more explicit
1873 	 * access to deny service to the VF or access data passed through the
1874 	 * VF.  We therefore require an opt-in via a shared VF token (UUID) to
1875 	 * represent this trust.  This both prevents that a VF driver might
1876 	 * assume the PF driver is a trusted, in-kernel driver, and also that
1877 	 * a PF driver might be replaced with a rogue driver, unknown to in-use
1878 	 * VF drivers.
1879 	 *
1880 	 * Therefore when presented with a VF, if the PF is a vfio device and
1881 	 * it is bound to the vfio-pci driver, the user needs to provide a VF
1882 	 * token to access the device, in the form of appending a vf_token to
1883 	 * the device name, for example:
1884 	 *
1885 	 * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
1886 	 *
1887 	 * When presented with a PF which has VFs in use, the user must also
1888 	 * provide the current VF token to prove collaboration with existing
1889 	 * VF users.  If VFs are not in use, the VF token provided for the PF
1890 	 * device will act to set the VF token.
1891 	 *
1892 	 * If the VF token is provided but unused, an error is generated.
1893 	 */
1894 	if (vdev->pdev->is_virtfn) {
1895 		struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
1896 		bool match;
1897 
1898 		if (!pf_vdev) {
1899 			if (!uuid)
1900 				return 0; /* PF is not vfio-pci, no VF token */
1901 
1902 			pci_info_ratelimited(vdev->pdev,
1903 				"VF token incorrectly provided, PF not bound to vfio-pci\n");
1904 			return -EINVAL;
1905 		}
1906 
1907 		if (!uuid) {
1908 			pci_info_ratelimited(vdev->pdev,
1909 				"VF token required to access device\n");
1910 			return -EACCES;
1911 		}
1912 
1913 		mutex_lock(&pf_vdev->vf_token->lock);
1914 		match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
1915 		mutex_unlock(&pf_vdev->vf_token->lock);
1916 
1917 		if (!match) {
1918 			pci_info_ratelimited(vdev->pdev,
1919 				"Incorrect VF token provided for device\n");
1920 			return -EACCES;
1921 		}
1922 	} else if (vdev->vf_token) {
1923 		mutex_lock(&vdev->vf_token->lock);
1924 		if (vdev->vf_token->users) {
1925 			if (!uuid) {
1926 				mutex_unlock(&vdev->vf_token->lock);
1927 				pci_info_ratelimited(vdev->pdev,
1928 					"VF token required to access device\n");
1929 				return -EACCES;
1930 			}
1931 
1932 			if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
1933 				mutex_unlock(&vdev->vf_token->lock);
1934 				pci_info_ratelimited(vdev->pdev,
1935 					"Incorrect VF token provided for device\n");
1936 				return -EACCES;
1937 			}
1938 		} else if (uuid) {
1939 			uuid_copy(&vdev->vf_token->uuid, uuid);
1940 		}
1941 
1942 		mutex_unlock(&vdev->vf_token->lock);
1943 	} else if (uuid) {
1944 		pci_info_ratelimited(vdev->pdev,
1945 			"VF token incorrectly provided, not a PF or VF\n");
1946 		return -EINVAL;
1947 	}
1948 
1949 	return 0;
1950 }
1951 EXPORT_SYMBOL_GPL(vfio_pci_core_match_token_uuid);
1952 
1953 #define VF_TOKEN_ARG "vf_token="
1954 
vfio_pci_core_match(struct vfio_device * core_vdev,char * buf)1955 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
1956 {
1957 	struct vfio_pci_core_device *vdev =
1958 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
1959 	bool vf_token = false;
1960 	uuid_t uuid;
1961 	int ret;
1962 
1963 	if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
1964 		return 0; /* No match */
1965 
1966 	if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
1967 		buf += strlen(pci_name(vdev->pdev));
1968 
1969 		if (*buf != ' ')
1970 			return 0; /* No match: non-whitespace after name */
1971 
1972 		while (*buf) {
1973 			if (*buf == ' ') {
1974 				buf++;
1975 				continue;
1976 			}
1977 
1978 			if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
1979 						  strlen(VF_TOKEN_ARG))) {
1980 				buf += strlen(VF_TOKEN_ARG);
1981 
1982 				if (strlen(buf) < UUID_STRING_LEN)
1983 					return -EINVAL;
1984 
1985 				ret = uuid_parse(buf, &uuid);
1986 				if (ret)
1987 					return ret;
1988 
1989 				vf_token = true;
1990 				buf += UUID_STRING_LEN;
1991 			} else {
1992 				/* Unknown/duplicate option */
1993 				return -EINVAL;
1994 			}
1995 		}
1996 	}
1997 
1998 	ret = core_vdev->ops->match_token_uuid(core_vdev,
1999 					       vf_token ? &uuid : NULL);
2000 	if (ret)
2001 		return ret;
2002 
2003 	return 1; /* Match */
2004 }
2005 EXPORT_SYMBOL_GPL(vfio_pci_core_match);
2006 
vfio_pci_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)2007 static int vfio_pci_bus_notifier(struct notifier_block *nb,
2008 				 unsigned long action, void *data)
2009 {
2010 	struct vfio_pci_core_device *vdev = container_of(nb,
2011 						    struct vfio_pci_core_device, nb);
2012 	struct device *dev = data;
2013 	struct pci_dev *pdev = to_pci_dev(dev);
2014 	struct pci_dev *physfn = pci_physfn(pdev);
2015 
2016 	if (action == BUS_NOTIFY_ADD_DEVICE &&
2017 	    pdev->is_virtfn && physfn == vdev->pdev) {
2018 		pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
2019 			 pci_name(pdev));
2020 		WARN_ON(device_set_driver_override(&pdev->dev,
2021 						   vdev->vdev.ops->name));
2022 	} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
2023 		   pdev->is_virtfn && physfn == vdev->pdev) {
2024 		struct pci_driver *drv = pci_dev_driver(pdev);
2025 
2026 		if (drv && drv != pci_dev_driver(vdev->pdev))
2027 			pci_warn(vdev->pdev,
2028 				 "VF %s bound to driver %s while PF bound to driver %s\n",
2029 				 pci_name(pdev), drv->name,
2030 				 pci_dev_driver(vdev->pdev)->name);
2031 	}
2032 
2033 	return 0;
2034 }
2035 
vfio_pci_vf_init(struct vfio_pci_core_device * vdev)2036 static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
2037 {
2038 	struct pci_dev *pdev = vdev->pdev;
2039 	struct vfio_pci_core_device *cur;
2040 	struct pci_dev *physfn;
2041 	int ret;
2042 
2043 	if (pdev->is_virtfn) {
2044 		/*
2045 		 * If this VF was created by our vfio_pci_core_sriov_configure()
2046 		 * then we can find the PF vfio_pci_core_device now, and due to
2047 		 * the locking in pci_disable_sriov() it cannot change until
2048 		 * this VF device driver is removed.
2049 		 */
2050 		physfn = pci_physfn(vdev->pdev);
2051 		mutex_lock(&vfio_pci_sriov_pfs_mutex);
2052 		list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
2053 			if (cur->pdev == physfn) {
2054 				vdev->sriov_pf_core_dev = cur;
2055 				break;
2056 			}
2057 		}
2058 		mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2059 		return 0;
2060 	}
2061 
2062 	/* Not a SRIOV PF */
2063 	if (!pdev->is_physfn)
2064 		return 0;
2065 
2066 	vdev->vf_token = kzalloc_obj(*vdev->vf_token);
2067 	if (!vdev->vf_token)
2068 		return -ENOMEM;
2069 
2070 	mutex_init(&vdev->vf_token->lock);
2071 	uuid_gen(&vdev->vf_token->uuid);
2072 
2073 	vdev->nb.notifier_call = vfio_pci_bus_notifier;
2074 	ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
2075 	if (ret) {
2076 		kfree(vdev->vf_token);
2077 		return ret;
2078 	}
2079 	return 0;
2080 }
2081 
vfio_pci_vf_uninit(struct vfio_pci_core_device * vdev)2082 static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
2083 {
2084 	if (!vdev->vf_token)
2085 		return;
2086 
2087 	bus_unregister_notifier(&pci_bus_type, &vdev->nb);
2088 	WARN_ON(vdev->vf_token->users);
2089 	mutex_destroy(&vdev->vf_token->lock);
2090 	kfree(vdev->vf_token);
2091 }
2092 
vfio_pci_vga_init(struct vfio_pci_core_device * vdev)2093 static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
2094 {
2095 	struct pci_dev *pdev = vdev->pdev;
2096 	int ret;
2097 
2098 	if (!vfio_pci_is_vga(pdev))
2099 		return 0;
2100 
2101 	ret = aperture_remove_conflicting_pci_devices(pdev, vdev->vdev.ops->name);
2102 	if (ret)
2103 		return ret;
2104 
2105 	ret = vga_client_register(pdev, vfio_pci_set_decode);
2106 	if (ret)
2107 		return ret;
2108 	vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
2109 	return 0;
2110 }
2111 
vfio_pci_vga_uninit(struct vfio_pci_core_device * vdev)2112 static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
2113 {
2114 	struct pci_dev *pdev = vdev->pdev;
2115 
2116 	if (!vfio_pci_is_vga(pdev))
2117 		return;
2118 	vga_client_unregister(pdev);
2119 	vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
2120 					      VGA_RSRC_LEGACY_IO |
2121 					      VGA_RSRC_LEGACY_MEM);
2122 }
2123 
vfio_pci_core_init_dev(struct vfio_device * core_vdev)2124 int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
2125 {
2126 	struct vfio_pci_core_device *vdev =
2127 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
2128 	int ret;
2129 
2130 	vdev->pdev = to_pci_dev(core_vdev->dev);
2131 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
2132 	mutex_init(&vdev->igate);
2133 	spin_lock_init(&vdev->irqlock);
2134 	mutex_init(&vdev->ioeventfds_lock);
2135 	INIT_LIST_HEAD(&vdev->dummy_resources_list);
2136 	INIT_LIST_HEAD(&vdev->ioeventfds_list);
2137 	INIT_LIST_HEAD(&vdev->sriov_pfs_item);
2138 	ret = pcim_p2pdma_init(vdev->pdev);
2139 	if (ret && ret != -EOPNOTSUPP)
2140 		return ret;
2141 	INIT_LIST_HEAD(&vdev->dmabufs);
2142 	init_rwsem(&vdev->memory_lock);
2143 	xa_init(&vdev->ctx);
2144 
2145 	return 0;
2146 }
2147 EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev);
2148 
vfio_pci_core_release_dev(struct vfio_device * core_vdev)2149 void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
2150 {
2151 	struct vfio_pci_core_device *vdev =
2152 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
2153 
2154 	mutex_destroy(&vdev->igate);
2155 	mutex_destroy(&vdev->ioeventfds_lock);
2156 	kfree(vdev->region);
2157 	kfree(vdev->pm_save);
2158 }
2159 EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev);
2160 
vfio_pci_core_register_device(struct vfio_pci_core_device * vdev)2161 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
2162 {
2163 	struct pci_dev *pdev = vdev->pdev;
2164 	struct device *dev = &pdev->dev;
2165 	int ret;
2166 
2167 	/* Drivers must set the vfio_pci_core_device to their drvdata */
2168 	if (WARN_ON(vdev != dev_get_drvdata(dev)))
2169 		return -EINVAL;
2170 
2171 	/* Drivers must set a name.  Required for sequestering SR-IOV VFs */
2172 	if (WARN_ON(!vdev->vdev.ops->name))
2173 		return -EINVAL;
2174 
2175 	if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
2176 		return -EINVAL;
2177 
2178 	if (vdev->vdev.mig_ops) {
2179 		if (!(vdev->vdev.mig_ops->migration_get_state &&
2180 		      vdev->vdev.mig_ops->migration_set_state &&
2181 		      vdev->vdev.mig_ops->migration_get_data_size) ||
2182 		    !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
2183 			return -EINVAL;
2184 	}
2185 
2186 	if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start &&
2187 	    vdev->vdev.log_ops->log_stop &&
2188 	    vdev->vdev.log_ops->log_read_and_clear))
2189 		return -EINVAL;
2190 
2191 	/*
2192 	 * Prevent binding to PFs with VFs enabled, the VFs might be in use
2193 	 * by the host or other users.  We cannot capture the VFs if they
2194 	 * already exist, nor can we track VF users.  Disabling SR-IOV here
2195 	 * would initiate removing the VFs, which would unbind the driver,
2196 	 * which is prone to blocking if that VF is also in use by vfio-pci.
2197 	 * Just reject these PFs and let the user sort it out.
2198 	 */
2199 	if (pci_num_vf(pdev)) {
2200 		pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
2201 		return -EBUSY;
2202 	}
2203 
2204 	if (pci_is_root_bus(pdev->bus) || pdev->is_virtfn) {
2205 		ret = vfio_assign_device_set(&vdev->vdev, vdev);
2206 	} else if (!pci_probe_reset_slot(pdev->slot)) {
2207 		ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
2208 	} else {
2209 		/*
2210 		 * If there is no slot reset support for this device, the whole
2211 		 * bus needs to be grouped together to support bus-wide resets.
2212 		 */
2213 		ret = vfio_assign_device_set(&vdev->vdev, pdev->bus);
2214 	}
2215 
2216 	if (ret)
2217 		return ret;
2218 	ret = vfio_pci_vf_init(vdev);
2219 	if (ret)
2220 		return ret;
2221 	ret = vfio_pci_vga_init(vdev);
2222 	if (ret)
2223 		goto out_vf;
2224 
2225 	vfio_pci_probe_power_state(vdev);
2226 
2227 	/*
2228 	 * pci-core sets the device power state to an unknown value at
2229 	 * bootup and after being removed from a driver.  The only
2230 	 * transition it allows from this unknown state is to D0, which
2231 	 * typically happens when a driver calls pci_enable_device().
2232 	 * We're not ready to enable the device yet, but we do want to
2233 	 * be able to get to D3.  Therefore first do a D0 transition
2234 	 * before enabling runtime PM.
2235 	 */
2236 	vfio_pci_set_power_state(vdev, PCI_D0);
2237 
2238 	dev->driver->pm = &vfio_pci_core_pm_ops;
2239 	pm_runtime_allow(dev);
2240 	if (!disable_idle_d3)
2241 		pm_runtime_put(dev);
2242 
2243 	ret = vfio_register_group_dev(&vdev->vdev);
2244 	if (ret)
2245 		goto out_power;
2246 	return 0;
2247 
2248 out_power:
2249 	if (!disable_idle_d3)
2250 		pm_runtime_get_noresume(dev);
2251 
2252 	pm_runtime_forbid(dev);
2253 out_vf:
2254 	vfio_pci_vf_uninit(vdev);
2255 	return ret;
2256 }
2257 EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
2258 
vfio_pci_core_unregister_device(struct vfio_pci_core_device * vdev)2259 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
2260 {
2261 	vfio_pci_core_sriov_configure(vdev, 0);
2262 
2263 	vfio_unregister_group_dev(&vdev->vdev);
2264 
2265 	vfio_pci_vf_uninit(vdev);
2266 	vfio_pci_vga_uninit(vdev);
2267 
2268 	if (!disable_idle_d3)
2269 		pm_runtime_get_noresume(&vdev->pdev->dev);
2270 
2271 	pm_runtime_forbid(&vdev->pdev->dev);
2272 }
2273 EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
2274 
vfio_pci_core_aer_err_detected(struct pci_dev * pdev,pci_channel_state_t state)2275 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
2276 						pci_channel_state_t state)
2277 {
2278 	struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
2279 	struct vfio_pci_eventfd *eventfd;
2280 
2281 	rcu_read_lock();
2282 	eventfd = rcu_dereference(vdev->err_trigger);
2283 	if (eventfd)
2284 		eventfd_signal(eventfd->ctx);
2285 	rcu_read_unlock();
2286 
2287 	return PCI_ERS_RESULT_CAN_RECOVER;
2288 }
2289 EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
2290 
vfio_pci_core_sriov_configure(struct vfio_pci_core_device * vdev,int nr_virtfn)2291 int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
2292 				  int nr_virtfn)
2293 {
2294 	struct pci_dev *pdev = vdev->pdev;
2295 	int ret = 0;
2296 
2297 	device_lock_assert(&pdev->dev);
2298 
2299 	if (nr_virtfn) {
2300 		mutex_lock(&vfio_pci_sriov_pfs_mutex);
2301 		/*
2302 		 * The thread that adds the vdev to the list is the only thread
2303 		 * that gets to call pci_enable_sriov() and we will only allow
2304 		 * it to be called once without going through
2305 		 * pci_disable_sriov()
2306 		 */
2307 		if (!list_empty(&vdev->sriov_pfs_item)) {
2308 			ret = -EINVAL;
2309 			goto out_unlock;
2310 		}
2311 		list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
2312 		mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2313 
2314 		/*
2315 		 * The PF power state should always be higher than the VF power
2316 		 * state. The PF can be in low power state either with runtime
2317 		 * power management (when there is no user) or PCI_PM_CTRL
2318 		 * register write by the user. If PF is in the low power state,
2319 		 * then change the power state to D0 first before enabling
2320 		 * SR-IOV. Also, this function can be called at any time, and
2321 		 * userspace PCI_PM_CTRL write can race against this code path,
2322 		 * so protect the same with 'memory_lock'.
2323 		 */
2324 		ret = pm_runtime_resume_and_get(&pdev->dev);
2325 		if (ret)
2326 			goto out_del;
2327 
2328 		down_write(&vdev->memory_lock);
2329 		vfio_pci_set_power_state(vdev, PCI_D0);
2330 		ret = pci_enable_sriov(pdev, nr_virtfn);
2331 		up_write(&vdev->memory_lock);
2332 		if (ret) {
2333 			pm_runtime_put(&pdev->dev);
2334 			goto out_del;
2335 		}
2336 		return nr_virtfn;
2337 	}
2338 
2339 	if (pci_num_vf(pdev)) {
2340 		pci_disable_sriov(pdev);
2341 		pm_runtime_put(&pdev->dev);
2342 	}
2343 
2344 out_del:
2345 	mutex_lock(&vfio_pci_sriov_pfs_mutex);
2346 	list_del_init(&vdev->sriov_pfs_item);
2347 out_unlock:
2348 	mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2349 	return ret;
2350 }
2351 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
2352 
2353 const struct pci_error_handlers vfio_pci_core_err_handlers = {
2354 	.error_detected = vfio_pci_core_aer_err_detected,
2355 };
2356 EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
2357 
vfio_dev_in_groups(struct vfio_device * vdev,struct vfio_pci_group_info * groups)2358 static bool vfio_dev_in_groups(struct vfio_device *vdev,
2359 			       struct vfio_pci_group_info *groups)
2360 {
2361 	unsigned int i;
2362 
2363 	if (!groups)
2364 		return false;
2365 
2366 	for (i = 0; i < groups->count; i++)
2367 		if (vfio_file_has_dev(groups->files[i], vdev))
2368 			return true;
2369 	return false;
2370 }
2371 
vfio_pci_is_device_in_set(struct pci_dev * pdev,void * data)2372 static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
2373 {
2374 	struct vfio_device_set *dev_set = data;
2375 
2376 	return vfio_find_device_in_devset(dev_set, &pdev->dev) ? 0 : -ENODEV;
2377 }
2378 
2379 /*
2380  * vfio-core considers a group to be viable and will create a vfio_device even
2381  * if some devices are bound to drivers like pci-stub or pcieport. Here we
2382  * require all PCI devices to be inside our dev_set since that ensures they stay
2383  * put and that every driver controlling the device can co-ordinate with the
2384  * device reset.
2385  *
2386  * Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
2387  * reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
2388  */
2389 static struct pci_dev *
vfio_pci_dev_set_resettable(struct vfio_device_set * dev_set)2390 vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
2391 {
2392 	struct pci_dev *pdev;
2393 
2394 	lockdep_assert_held(&dev_set->lock);
2395 
2396 	/*
2397 	 * By definition all PCI devices in the dev_set share the same PCI
2398 	 * reset, so any pci_dev will have the same outcomes for
2399 	 * pci_probe_reset_*() and pci_reset_bus().
2400 	 */
2401 	pdev = list_first_entry(&dev_set->device_list,
2402 				struct vfio_pci_core_device,
2403 				vdev.dev_set_list)->pdev;
2404 
2405 	/* pci_reset_bus() is supported */
2406 	if (pci_probe_reset_slot(pdev->slot) && pci_probe_reset_bus(pdev->bus))
2407 		return NULL;
2408 
2409 	if (vfio_pci_for_each_slot_or_bus(pdev, vfio_pci_is_device_in_set,
2410 					  dev_set,
2411 					  !pci_probe_reset_slot(pdev->slot)))
2412 		return NULL;
2413 	return pdev;
2414 }
2415 
vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set * dev_set)2416 static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
2417 {
2418 	struct vfio_pci_core_device *cur;
2419 	int ret;
2420 
2421 	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2422 		ret = pm_runtime_resume_and_get(&cur->pdev->dev);
2423 		if (ret)
2424 			goto unwind;
2425 	}
2426 
2427 	return 0;
2428 
2429 unwind:
2430 	list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
2431 					     vdev.dev_set_list)
2432 		pm_runtime_put(&cur->pdev->dev);
2433 
2434 	return ret;
2435 }
2436 
vfio_pci_dev_set_hot_reset(struct vfio_device_set * dev_set,struct vfio_pci_group_info * groups,struct iommufd_ctx * iommufd_ctx)2437 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
2438 				      struct vfio_pci_group_info *groups,
2439 				      struct iommufd_ctx *iommufd_ctx)
2440 {
2441 	struct vfio_pci_core_device *vdev;
2442 	struct pci_dev *pdev;
2443 	int ret;
2444 
2445 	mutex_lock(&dev_set->lock);
2446 
2447 	pdev = vfio_pci_dev_set_resettable(dev_set);
2448 	if (!pdev) {
2449 		ret = -EINVAL;
2450 		goto err_unlock;
2451 	}
2452 
2453 	/*
2454 	 * Some of the devices in the dev_set can be in the runtime suspended
2455 	 * state. Increment the usage count for all the devices in the dev_set
2456 	 * before reset and decrement the same after reset.
2457 	 */
2458 	ret = vfio_pci_dev_set_pm_runtime_get(dev_set);
2459 	if (ret)
2460 		goto err_unlock;
2461 
2462 	list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
2463 		bool owned;
2464 
2465 		/*
2466 		 * Test whether all the affected devices can be reset by the
2467 		 * user.
2468 		 *
2469 		 * If called from a group opened device and the user provides
2470 		 * a set of groups, all the devices in the dev_set should be
2471 		 * contained by the set of groups provided by the user.
2472 		 *
2473 		 * If called from a cdev opened device and the user provides
2474 		 * a zero-length array, all the devices in the dev_set must
2475 		 * be bound to the same iommufd_ctx as the input iommufd_ctx.
2476 		 * If there is any device that has not been bound to any
2477 		 * iommufd_ctx yet, check if its iommu_group has any device
2478 		 * bound to the input iommufd_ctx.  Such devices can be
2479 		 * considered owned by the input iommufd_ctx as the device
2480 		 * cannot be owned by another iommufd_ctx when its iommu_group
2481 		 * is owned.
2482 		 *
2483 		 * Otherwise, reset is not allowed.
2484 		 */
2485 		if (iommufd_ctx) {
2486 			int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
2487 							    iommufd_ctx);
2488 
2489 			owned = (devid > 0 || devid == -ENOENT);
2490 		} else {
2491 			owned = vfio_dev_in_groups(&vdev->vdev, groups);
2492 		}
2493 
2494 		if (!owned) {
2495 			ret = -EINVAL;
2496 			break;
2497 		}
2498 
2499 		/*
2500 		 * Take the memory write lock for each device and zap BAR
2501 		 * mappings to prevent the user accessing the device while in
2502 		 * reset.  Locking multiple devices is prone to deadlock,
2503 		 * runaway and unwind if we hit contention.
2504 		 */
2505 		if (!down_write_trylock(&vdev->memory_lock)) {
2506 			ret = -EBUSY;
2507 			break;
2508 		}
2509 
2510 		vfio_pci_dma_buf_move(vdev, true);
2511 		vfio_pci_zap_bars(vdev);
2512 	}
2513 
2514 	if (!list_entry_is_head(vdev,
2515 				&dev_set->device_list, vdev.dev_set_list)) {
2516 		vdev = list_prev_entry(vdev, vdev.dev_set_list);
2517 		goto err_undo;
2518 	}
2519 
2520 	/*
2521 	 * The pci_reset_bus() will reset all the devices in the bus.
2522 	 * The power state can be non-D0 for some of the devices in the bus.
2523 	 * For these devices, the pci_reset_bus() will internally set
2524 	 * the power state to D0 without vfio driver involvement.
2525 	 * For the devices which have NoSoftRst-, the reset function can
2526 	 * cause the PCI config space reset without restoring the original
2527 	 * state (saved locally in 'vdev->pm_save').
2528 	 */
2529 	list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
2530 		vfio_pci_set_power_state(vdev, PCI_D0);
2531 
2532 	ret = pci_reset_bus(pdev);
2533 
2534 	vdev = list_last_entry(&dev_set->device_list,
2535 			       struct vfio_pci_core_device, vdev.dev_set_list);
2536 
2537 err_undo:
2538 	list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
2539 					 vdev.dev_set_list) {
2540 		if (vdev->vdev.open_count && __vfio_pci_memory_enabled(vdev))
2541 			vfio_pci_dma_buf_move(vdev, false);
2542 		up_write(&vdev->memory_lock);
2543 	}
2544 
2545 	list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
2546 		pm_runtime_put(&vdev->pdev->dev);
2547 
2548 err_unlock:
2549 	mutex_unlock(&dev_set->lock);
2550 	return ret;
2551 }
2552 
vfio_pci_dev_set_needs_reset(struct vfio_device_set * dev_set)2553 static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
2554 {
2555 	struct vfio_pci_core_device *cur;
2556 	bool needs_reset = false;
2557 
2558 	/* No other VFIO device in the set can be open. */
2559 	if (vfio_device_set_open_count(dev_set) > 1)
2560 		return false;
2561 
2562 	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
2563 		needs_reset |= cur->needs_reset;
2564 	return needs_reset;
2565 }
2566 
2567 /*
2568  * If a bus or slot reset is available for the provided dev_set and:
2569  *  - All of the devices affected by that bus or slot reset are unused
2570  *  - At least one of the affected devices is marked dirty via
2571  *    needs_reset (such as by lack of FLR support)
2572  * Then attempt to perform that bus or slot reset.
2573  */
vfio_pci_dev_set_try_reset(struct vfio_device_set * dev_set)2574 static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
2575 {
2576 	struct vfio_pci_core_device *cur;
2577 	struct pci_dev *pdev;
2578 	bool reset_done = false;
2579 
2580 	if (!vfio_pci_dev_set_needs_reset(dev_set))
2581 		return;
2582 
2583 	pdev = vfio_pci_dev_set_resettable(dev_set);
2584 	if (!pdev)
2585 		return;
2586 
2587 	/*
2588 	 * Some of the devices in the bus can be in the runtime suspended
2589 	 * state. Increment the usage count for all the devices in the dev_set
2590 	 * before reset and decrement the same after reset.
2591 	 */
2592 	if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
2593 		return;
2594 
2595 	if (!pci_reset_bus(pdev))
2596 		reset_done = true;
2597 
2598 	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2599 		if (reset_done)
2600 			cur->needs_reset = false;
2601 
2602 		if (!disable_idle_d3)
2603 			pm_runtime_put(&cur->pdev->dev);
2604 	}
2605 }
2606 
vfio_pci_core_set_params(bool is_nointxmask,bool is_disable_vga,bool is_disable_idle_d3)2607 void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
2608 			      bool is_disable_idle_d3)
2609 {
2610 	nointxmask = is_nointxmask;
2611 	disable_vga = is_disable_vga;
2612 	disable_idle_d3 = is_disable_idle_d3;
2613 }
2614 EXPORT_SYMBOL_GPL(vfio_pci_core_set_params);
2615 
vfio_pci_core_cleanup(void)2616 static void vfio_pci_core_cleanup(void)
2617 {
2618 	vfio_pci_uninit_perm_bits();
2619 }
2620 
vfio_pci_core_init(void)2621 static int __init vfio_pci_core_init(void)
2622 {
2623 	/* Allocate shared config space permission data used by all devices */
2624 	return vfio_pci_init_perm_bits();
2625 }
2626 
2627 module_init(vfio_pci_core_init);
2628 module_exit(vfio_pci_core_cleanup);
2629 
2630 MODULE_LICENSE("GPL v2");
2631 MODULE_AUTHOR(DRIVER_AUTHOR);
2632 MODULE_DESCRIPTION(DRIVER_DESC);
2633