xref: /linux/drivers/pci/iov.c (revision 0bd0a41a5120f78685a132834865b0a631b9026a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Express I/O Virtualization (IOV) support
4  *   Single Root IOV 1.0
5  *   Address Translation Service 1.0
6  *
7  * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/log2.h>
13 #include <linux/pci.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/export.h>
17 #include <linux/string.h>
18 #include <linux/delay.h>
19 #include <asm/div64.h>
20 #include "pci.h"
21 
22 #define VIRTFN_ID_LEN	17	/* "virtfn%u\0" for 2^32 - 1 */
23 
pci_iov_virtfn_bus(struct pci_dev * dev,int vf_id)24 int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
25 {
26 	if (!dev->is_physfn)
27 		return -EINVAL;
28 	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
29 				    dev->sriov->stride * vf_id) >> 8);
30 }
31 
pci_iov_virtfn_devfn(struct pci_dev * dev,int vf_id)32 int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
33 {
34 	if (!dev->is_physfn)
35 		return -EINVAL;
36 	return (dev->devfn + dev->sriov->offset +
37 		dev->sriov->stride * vf_id) & 0xff;
38 }
39 EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
40 
pci_iov_vf_id(struct pci_dev * dev)41 int pci_iov_vf_id(struct pci_dev *dev)
42 {
43 	struct pci_dev *pf;
44 
45 	if (!dev->is_virtfn)
46 		return -EINVAL;
47 
48 	pf = pci_physfn(dev);
49 	return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) /
50 	       pf->sriov->stride;
51 }
52 EXPORT_SYMBOL_GPL(pci_iov_vf_id);
53 
54 /**
55  * pci_iov_get_pf_drvdata - Return the drvdata of a PF
56  * @dev: VF pci_dev
57  * @pf_driver: Device driver required to own the PF
58  *
59  * This must be called from a context that ensures that a VF driver is attached.
60  * The value returned is invalid once the VF driver completes its remove()
61  * callback.
62  *
63  * Locking is achieved by the driver core. A VF driver cannot be probed until
64  * pci_enable_sriov() is called and pci_disable_sriov() does not return until
65  * all VF drivers have completed their remove().
66  *
67  * The PF driver must call pci_disable_sriov() before it begins to destroy the
68  * drvdata.
69  */
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)70 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver)
71 {
72 	struct pci_dev *pf_dev;
73 
74 	if (!dev->is_virtfn)
75 		return ERR_PTR(-EINVAL);
76 	pf_dev = dev->physfn;
77 	if (pf_dev->driver != pf_driver)
78 		return ERR_PTR(-EINVAL);
79 	return pci_get_drvdata(pf_dev);
80 }
81 EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata);
82 
83 /*
84  * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
85  * change when NumVFs changes.
86  *
87  * Update iov->offset and iov->stride when NumVFs is written.
88  */
pci_iov_set_numvfs(struct pci_dev * dev,int nr_virtfn)89 static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
90 {
91 	struct pci_sriov *iov = dev->sriov;
92 
93 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
94 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
95 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
96 }
97 
98 /*
99  * The PF consumes one bus number.  NumVFs, First VF Offset, and VF Stride
100  * determine how many additional bus numbers will be consumed by VFs.
101  *
102  * Iterate over all valid NumVFs, validate offset and stride, and calculate
103  * the maximum number of bus numbers that could ever be required.
104  */
compute_max_vf_buses(struct pci_dev * dev)105 static int compute_max_vf_buses(struct pci_dev *dev)
106 {
107 	struct pci_sriov *iov = dev->sriov;
108 	int nr_virtfn, busnr, rc = 0;
109 
110 	for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
111 		pci_iov_set_numvfs(dev, nr_virtfn);
112 		if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
113 			rc = -EIO;
114 			goto out;
115 		}
116 
117 		busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
118 		if (busnr > iov->max_VF_buses)
119 			iov->max_VF_buses = busnr;
120 	}
121 
122 out:
123 	pci_iov_set_numvfs(dev, 0);
124 	return rc;
125 }
126 
virtfn_add_bus(struct pci_bus * bus,int busnr)127 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
128 {
129 	struct pci_bus *child;
130 
131 	if (bus->number == busnr)
132 		return bus;
133 
134 	child = pci_find_bus(pci_domain_nr(bus), busnr);
135 	if (child)
136 		return child;
137 
138 	child = pci_add_new_bus(bus, NULL, busnr);
139 	if (!child)
140 		return NULL;
141 
142 	pci_bus_insert_busn_res(child, busnr, busnr);
143 
144 	return child;
145 }
146 
virtfn_remove_bus(struct pci_bus * physbus,struct pci_bus * virtbus)147 static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
148 {
149 	if (physbus != virtbus && list_empty(&virtbus->devices))
150 		pci_remove_bus(virtbus);
151 }
152 
pci_iov_resource_size(struct pci_dev * dev,int resno)153 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
154 {
155 	if (!dev->is_physfn)
156 		return 0;
157 
158 	return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)];
159 }
160 
pci_iov_resource_set_size(struct pci_dev * dev,int resno,resource_size_t size)161 void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
162 			       resource_size_t size)
163 {
164 	if (!pci_resource_is_iov(resno)) {
165 		pci_warn(dev, "%s is not an IOV resource\n",
166 			 pci_resource_name(dev, resno));
167 		return;
168 	}
169 
170 	dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)] = size;
171 }
172 
pci_iov_is_memory_decoding_enabled(struct pci_dev * dev)173 bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
174 {
175 	u16 cmd;
176 
177 	pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd);
178 
179 	return cmd & PCI_SRIOV_CTRL_MSE;
180 }
181 
pci_read_vf_config_common(struct pci_dev * virtfn)182 static void pci_read_vf_config_common(struct pci_dev *virtfn)
183 {
184 	struct pci_dev *physfn = virtfn->physfn;
185 
186 	/*
187 	 * Some config registers are the same across all associated VFs.
188 	 * Read them once from VF0 so we can skip reading them from the
189 	 * other VFs.
190 	 *
191 	 * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
192 	 * have the same Revision ID and Subsystem ID, but we assume they
193 	 * do.
194 	 */
195 	pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
196 			      &physfn->sriov->class);
197 	pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
198 			     &physfn->sriov->hdr_type);
199 	pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
200 			     &physfn->sriov->subsystem_vendor);
201 	pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
202 			     &physfn->sriov->subsystem_device);
203 }
204 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)205 int pci_iov_sysfs_link(struct pci_dev *dev,
206 		struct pci_dev *virtfn, int id)
207 {
208 	char buf[VIRTFN_ID_LEN];
209 	int rc;
210 
211 	sprintf(buf, "virtfn%u", id);
212 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
213 	if (rc)
214 		goto failed;
215 	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
216 	if (rc)
217 		goto failed1;
218 
219 	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
220 
221 	return 0;
222 
223 failed1:
224 	sysfs_remove_link(&dev->dev.kobj, buf);
225 failed:
226 	return rc;
227 }
228 
229 #ifdef CONFIG_PCI_MSI
sriov_vf_total_msix_show(struct device * dev,struct device_attribute * attr,char * buf)230 static ssize_t sriov_vf_total_msix_show(struct device *dev,
231 					struct device_attribute *attr,
232 					char *buf)
233 {
234 	struct pci_dev *pdev = to_pci_dev(dev);
235 	u32 vf_total_msix = 0;
236 
237 	device_lock(dev);
238 	if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
239 		goto unlock;
240 
241 	vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
242 unlock:
243 	device_unlock(dev);
244 	return sysfs_emit(buf, "%u\n", vf_total_msix);
245 }
246 static DEVICE_ATTR_RO(sriov_vf_total_msix);
247 
sriov_vf_msix_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)248 static ssize_t sriov_vf_msix_count_store(struct device *dev,
249 					 struct device_attribute *attr,
250 					 const char *buf, size_t count)
251 {
252 	struct pci_dev *vf_dev = to_pci_dev(dev);
253 	struct pci_dev *pdev = pci_physfn(vf_dev);
254 	int val, ret = 0;
255 
256 	if (kstrtoint(buf, 0, &val) < 0)
257 		return -EINVAL;
258 
259 	if (val < 0)
260 		return -EINVAL;
261 
262 	device_lock(&pdev->dev);
263 	if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
264 		ret = -EOPNOTSUPP;
265 		goto err_pdev;
266 	}
267 
268 	device_lock(&vf_dev->dev);
269 	if (vf_dev->driver) {
270 		/*
271 		 * A driver is already attached to this VF and has configured
272 		 * itself based on the current MSI-X vector count. Changing
273 		 * the vector size could mess up the driver, so block it.
274 		 */
275 		ret = -EBUSY;
276 		goto err_dev;
277 	}
278 
279 	ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
280 
281 err_dev:
282 	device_unlock(&vf_dev->dev);
283 err_pdev:
284 	device_unlock(&pdev->dev);
285 	return ret ? : count;
286 }
287 static DEVICE_ATTR_WO(sriov_vf_msix_count);
288 #endif
289 
290 static struct attribute *sriov_vf_dev_attrs[] = {
291 #ifdef CONFIG_PCI_MSI
292 	&dev_attr_sriov_vf_msix_count.attr,
293 #endif
294 	NULL,
295 };
296 
sriov_vf_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)297 static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj,
298 					  struct attribute *a, int n)
299 {
300 	struct device *dev = kobj_to_dev(kobj);
301 	struct pci_dev *pdev = to_pci_dev(dev);
302 
303 	if (!pdev->is_virtfn)
304 		return 0;
305 
306 	return a->mode;
307 }
308 
309 const struct attribute_group sriov_vf_dev_attr_group = {
310 	.attrs = sriov_vf_dev_attrs,
311 	.is_visible = sriov_vf_attrs_are_visible,
312 };
313 
pci_iov_scan_device(struct pci_dev * dev,int id,struct pci_bus * bus)314 static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id,
315 					   struct pci_bus *bus)
316 {
317 	struct pci_sriov *iov = dev->sriov;
318 	struct pci_dev *virtfn;
319 	int rc;
320 
321 	virtfn = pci_alloc_dev(bus);
322 	if (!virtfn)
323 		return ERR_PTR(-ENOMEM);
324 
325 	virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
326 	virtfn->vendor = dev->vendor;
327 	virtfn->device = iov->vf_device;
328 	virtfn->is_virtfn = 1;
329 	virtfn->physfn = pci_dev_get(dev);
330 	virtfn->no_command_memory = 1;
331 
332 	if (id == 0)
333 		pci_read_vf_config_common(virtfn);
334 
335 	rc = pci_setup_device(virtfn);
336 	if (rc) {
337 		pci_dev_put(dev);
338 		pci_bus_put(virtfn->bus);
339 		kfree(virtfn);
340 		return ERR_PTR(rc);
341 	}
342 
343 	return virtfn;
344 }
345 
pci_iov_add_virtfn(struct pci_dev * dev,int id)346 int pci_iov_add_virtfn(struct pci_dev *dev, int id)
347 {
348 	struct pci_bus *bus;
349 	struct pci_dev *virtfn;
350 	struct resource *res;
351 	int rc, i;
352 	u64 size;
353 
354 	bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
355 	if (!bus) {
356 		rc = -ENOMEM;
357 		goto failed;
358 	}
359 
360 	virtfn = pci_iov_scan_device(dev, id, bus);
361 	if (IS_ERR(virtfn)) {
362 		rc = PTR_ERR(virtfn);
363 		goto failed0;
364 	}
365 
366 	virtfn->dev.parent = dev->dev.parent;
367 	virtfn->multifunction = 0;
368 
369 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
370 		int idx = pci_resource_num_from_vf_bar(i);
371 
372 		res = &dev->resource[idx];
373 		if (!res->parent)
374 			continue;
375 		virtfn->resource[i].name = pci_name(virtfn);
376 		virtfn->resource[i].flags = res->flags;
377 		size = pci_iov_resource_size(dev, idx);
378 		resource_set_range(&virtfn->resource[i],
379 				   res->start + size * id, size);
380 		rc = request_resource(res, &virtfn->resource[i]);
381 		BUG_ON(rc);
382 	}
383 
384 	pci_device_add(virtfn, virtfn->bus);
385 	rc = pci_iov_sysfs_link(dev, virtfn, id);
386 	if (rc)
387 		goto failed1;
388 
389 	pci_bus_add_device(virtfn);
390 
391 	return 0;
392 
393 failed1:
394 	pci_stop_and_remove_bus_device(virtfn);
395 	pci_dev_put(dev);
396 failed0:
397 	virtfn_remove_bus(dev->bus, bus);
398 failed:
399 
400 	return rc;
401 }
402 
pci_iov_remove_virtfn(struct pci_dev * dev,int id)403 void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
404 {
405 	char buf[VIRTFN_ID_LEN];
406 	struct pci_dev *virtfn;
407 
408 	virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
409 					     pci_iov_virtfn_bus(dev, id),
410 					     pci_iov_virtfn_devfn(dev, id));
411 	if (!virtfn)
412 		return;
413 
414 	sprintf(buf, "virtfn%u", id);
415 	sysfs_remove_link(&dev->dev.kobj, buf);
416 	/*
417 	 * pci_stop_dev() could have been called for this virtfn already,
418 	 * so the directory for the virtfn may have been removed before.
419 	 * Double check to avoid spurious sysfs warnings.
420 	 */
421 	if (virtfn->dev.kobj.sd)
422 		sysfs_remove_link(&virtfn->dev.kobj, "physfn");
423 
424 	pci_stop_and_remove_bus_device(virtfn);
425 	virtfn_remove_bus(dev->bus, virtfn->bus);
426 
427 	/* balance pci_get_domain_bus_and_slot() */
428 	pci_dev_put(virtfn);
429 	pci_dev_put(dev);
430 }
431 
sriov_totalvfs_show(struct device * dev,struct device_attribute * attr,char * buf)432 static ssize_t sriov_totalvfs_show(struct device *dev,
433 				   struct device_attribute *attr,
434 				   char *buf)
435 {
436 	struct pci_dev *pdev = to_pci_dev(dev);
437 
438 	return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
439 }
440 
sriov_numvfs_show(struct device * dev,struct device_attribute * attr,char * buf)441 static ssize_t sriov_numvfs_show(struct device *dev,
442 				 struct device_attribute *attr,
443 				 char *buf)
444 {
445 	struct pci_dev *pdev = to_pci_dev(dev);
446 	u16 num_vfs;
447 
448 	/* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
449 	device_lock(&pdev->dev);
450 	num_vfs = pdev->sriov->num_VFs;
451 	device_unlock(&pdev->dev);
452 
453 	return sysfs_emit(buf, "%u\n", num_vfs);
454 }
455 
456 /*
457  * num_vfs > 0; number of VFs to enable
458  * num_vfs = 0; disable all VFs
459  *
460  * Note: SRIOV spec does not allow partial VF
461  *	 disable, so it's all or none.
462  */
sriov_numvfs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)463 static ssize_t sriov_numvfs_store(struct device *dev,
464 				  struct device_attribute *attr,
465 				  const char *buf, size_t count)
466 {
467 	struct pci_dev *pdev = to_pci_dev(dev);
468 	int ret = 0;
469 	u16 num_vfs;
470 
471 	if (kstrtou16(buf, 0, &num_vfs) < 0)
472 		return -EINVAL;
473 
474 	if (num_vfs > pci_sriov_get_totalvfs(pdev))
475 		return -ERANGE;
476 
477 	device_lock(&pdev->dev);
478 
479 	if (num_vfs == pdev->sriov->num_VFs)
480 		goto exit;
481 
482 	/* is PF driver loaded */
483 	if (!pdev->driver) {
484 		pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
485 		ret = -ENOENT;
486 		goto exit;
487 	}
488 
489 	/* is PF driver loaded w/callback */
490 	if (!pdev->driver->sriov_configure) {
491 		pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
492 		ret = -ENOENT;
493 		goto exit;
494 	}
495 
496 	if (num_vfs == 0) {
497 		/* disable VFs */
498 		ret = pdev->driver->sriov_configure(pdev, 0);
499 		goto exit;
500 	}
501 
502 	/* enable VFs */
503 	if (pdev->sriov->num_VFs) {
504 		pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
505 			 pdev->sriov->num_VFs, num_vfs);
506 		ret = -EBUSY;
507 		goto exit;
508 	}
509 
510 	ret = pdev->driver->sriov_configure(pdev, num_vfs);
511 	if (ret < 0)
512 		goto exit;
513 
514 	if (ret != num_vfs)
515 		pci_warn(pdev, "%d VFs requested; only %d enabled\n",
516 			 num_vfs, ret);
517 
518 exit:
519 	device_unlock(&pdev->dev);
520 
521 	if (ret < 0)
522 		return ret;
523 
524 	return count;
525 }
526 
sriov_offset_show(struct device * dev,struct device_attribute * attr,char * buf)527 static ssize_t sriov_offset_show(struct device *dev,
528 				 struct device_attribute *attr,
529 				 char *buf)
530 {
531 	struct pci_dev *pdev = to_pci_dev(dev);
532 
533 	return sysfs_emit(buf, "%u\n", pdev->sriov->offset);
534 }
535 
sriov_stride_show(struct device * dev,struct device_attribute * attr,char * buf)536 static ssize_t sriov_stride_show(struct device *dev,
537 				 struct device_attribute *attr,
538 				 char *buf)
539 {
540 	struct pci_dev *pdev = to_pci_dev(dev);
541 
542 	return sysfs_emit(buf, "%u\n", pdev->sriov->stride);
543 }
544 
sriov_vf_device_show(struct device * dev,struct device_attribute * attr,char * buf)545 static ssize_t sriov_vf_device_show(struct device *dev,
546 				    struct device_attribute *attr,
547 				    char *buf)
548 {
549 	struct pci_dev *pdev = to_pci_dev(dev);
550 
551 	return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device);
552 }
553 
sriov_drivers_autoprobe_show(struct device * dev,struct device_attribute * attr,char * buf)554 static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
555 					    struct device_attribute *attr,
556 					    char *buf)
557 {
558 	struct pci_dev *pdev = to_pci_dev(dev);
559 
560 	return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe);
561 }
562 
sriov_drivers_autoprobe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)563 static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
564 					     struct device_attribute *attr,
565 					     const char *buf, size_t count)
566 {
567 	struct pci_dev *pdev = to_pci_dev(dev);
568 	bool drivers_autoprobe;
569 
570 	if (kstrtobool(buf, &drivers_autoprobe) < 0)
571 		return -EINVAL;
572 
573 	pdev->sriov->drivers_autoprobe = drivers_autoprobe;
574 
575 	return count;
576 }
577 
578 static DEVICE_ATTR_RO(sriov_totalvfs);
579 static DEVICE_ATTR_RW(sriov_numvfs);
580 static DEVICE_ATTR_RO(sriov_offset);
581 static DEVICE_ATTR_RO(sriov_stride);
582 static DEVICE_ATTR_RO(sriov_vf_device);
583 static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
584 
585 static struct attribute *sriov_pf_dev_attrs[] = {
586 	&dev_attr_sriov_totalvfs.attr,
587 	&dev_attr_sriov_numvfs.attr,
588 	&dev_attr_sriov_offset.attr,
589 	&dev_attr_sriov_stride.attr,
590 	&dev_attr_sriov_vf_device.attr,
591 	&dev_attr_sriov_drivers_autoprobe.attr,
592 #ifdef CONFIG_PCI_MSI
593 	&dev_attr_sriov_vf_total_msix.attr,
594 #endif
595 	NULL,
596 };
597 
sriov_pf_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)598 static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj,
599 					  struct attribute *a, int n)
600 {
601 	struct device *dev = kobj_to_dev(kobj);
602 
603 	if (!dev_is_pf(dev))
604 		return 0;
605 
606 	return a->mode;
607 }
608 
609 const struct attribute_group sriov_pf_dev_attr_group = {
610 	.attrs = sriov_pf_dev_attrs,
611 	.is_visible = sriov_pf_attrs_are_visible,
612 };
613 
pcibios_sriov_enable(struct pci_dev * pdev,u16 num_vfs)614 int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
615 {
616 	return 0;
617 }
618 
pcibios_sriov_disable(struct pci_dev * pdev)619 int __weak pcibios_sriov_disable(struct pci_dev *pdev)
620 {
621 	return 0;
622 }
623 
sriov_add_vfs(struct pci_dev * dev,u16 num_vfs)624 static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
625 {
626 	unsigned int i;
627 	int rc;
628 
629 	if (dev->no_vf_scan)
630 		return 0;
631 
632 	for (i = 0; i < num_vfs; i++) {
633 		rc = pci_iov_add_virtfn(dev, i);
634 		if (rc)
635 			goto failed;
636 	}
637 	return 0;
638 failed:
639 	while (i--)
640 		pci_iov_remove_virtfn(dev, i);
641 
642 	return rc;
643 }
644 
sriov_enable(struct pci_dev * dev,int nr_virtfn)645 static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
646 {
647 	int rc;
648 	int i;
649 	int nres;
650 	u16 initial;
651 	struct resource *res;
652 	struct pci_dev *pdev;
653 	struct pci_sriov *iov = dev->sriov;
654 	int bars = 0;
655 	int bus;
656 
657 	if (!nr_virtfn)
658 		return 0;
659 
660 	if (iov->num_VFs)
661 		return -EINVAL;
662 
663 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
664 	if (initial > iov->total_VFs ||
665 	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
666 		return -EIO;
667 
668 	if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
669 	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
670 		return -EINVAL;
671 
672 	nres = 0;
673 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
674 		int idx = pci_resource_num_from_vf_bar(i);
675 		resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx);
676 
677 		bars |= (1 << idx);
678 		res = &dev->resource[idx];
679 		if (vf_bar_sz * nr_virtfn > resource_size(res))
680 			continue;
681 		if (res->parent)
682 			nres++;
683 	}
684 	if (nres != iov->nres) {
685 		pci_err(dev, "not enough MMIO resources for SR-IOV\n");
686 		return -ENOMEM;
687 	}
688 
689 	bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
690 	if (bus > dev->bus->busn_res.end) {
691 		pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
692 			nr_virtfn, bus, &dev->bus->busn_res);
693 		return -ENOMEM;
694 	}
695 
696 	if (pci_enable_resources(dev, bars)) {
697 		pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
698 		return -ENOMEM;
699 	}
700 
701 	if (iov->link != dev->devfn) {
702 		pdev = pci_get_slot(dev->bus, iov->link);
703 		if (!pdev)
704 			return -ENODEV;
705 
706 		if (!pdev->is_physfn) {
707 			pci_dev_put(pdev);
708 			return -ENOSYS;
709 		}
710 
711 		rc = sysfs_create_link(&dev->dev.kobj,
712 					&pdev->dev.kobj, "dep_link");
713 		pci_dev_put(pdev);
714 		if (rc)
715 			return rc;
716 	}
717 
718 	iov->initial_VFs = initial;
719 	if (nr_virtfn < initial)
720 		initial = nr_virtfn;
721 
722 	rc = pcibios_sriov_enable(dev, initial);
723 	if (rc) {
724 		pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
725 		goto err_pcibios;
726 	}
727 
728 	pci_iov_set_numvfs(dev, nr_virtfn);
729 	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
730 	pci_cfg_access_lock(dev);
731 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
732 	msleep(100);
733 	pci_cfg_access_unlock(dev);
734 
735 	rc = sriov_add_vfs(dev, initial);
736 	if (rc)
737 		goto err_pcibios;
738 
739 	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
740 	iov->num_VFs = nr_virtfn;
741 
742 	return 0;
743 
744 err_pcibios:
745 	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
746 	pci_cfg_access_lock(dev);
747 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
748 	ssleep(1);
749 	pci_cfg_access_unlock(dev);
750 
751 	pcibios_sriov_disable(dev);
752 
753 	if (iov->link != dev->devfn)
754 		sysfs_remove_link(&dev->dev.kobj, "dep_link");
755 
756 	pci_iov_set_numvfs(dev, 0);
757 	return rc;
758 }
759 
sriov_del_vfs(struct pci_dev * dev)760 static void sriov_del_vfs(struct pci_dev *dev)
761 {
762 	struct pci_sriov *iov = dev->sriov;
763 	int i;
764 
765 	for (i = 0; i < iov->num_VFs; i++)
766 		pci_iov_remove_virtfn(dev, i);
767 }
768 
sriov_disable(struct pci_dev * dev)769 static void sriov_disable(struct pci_dev *dev)
770 {
771 	struct pci_sriov *iov = dev->sriov;
772 
773 	if (!iov->num_VFs)
774 		return;
775 
776 	sriov_del_vfs(dev);
777 	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
778 	pci_cfg_access_lock(dev);
779 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
780 	ssleep(1);
781 	pci_cfg_access_unlock(dev);
782 
783 	pcibios_sriov_disable(dev);
784 
785 	if (iov->link != dev->devfn)
786 		sysfs_remove_link(&dev->dev.kobj, "dep_link");
787 
788 	iov->num_VFs = 0;
789 	pci_iov_set_numvfs(dev, 0);
790 }
791 
sriov_init(struct pci_dev * dev,int pos)792 static int sriov_init(struct pci_dev *dev, int pos)
793 {
794 	int i, bar64;
795 	int rc;
796 	int nres;
797 	u32 pgsz;
798 	u16 ctrl, total;
799 	struct pci_sriov *iov;
800 	struct resource *res;
801 	const char *res_name;
802 	struct pci_dev *pdev;
803 	u32 sriovbars[PCI_SRIOV_NUM_BARS];
804 
805 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
806 	if (ctrl & PCI_SRIOV_CTRL_VFE) {
807 		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
808 		ssleep(1);
809 	}
810 
811 	ctrl = 0;
812 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
813 		if (pdev->is_physfn)
814 			goto found;
815 
816 	pdev = NULL;
817 	if (pci_ari_enabled(dev->bus))
818 		ctrl |= PCI_SRIOV_CTRL_ARI;
819 
820 found:
821 	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
822 
823 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
824 	if (!total)
825 		return 0;
826 
827 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
828 	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
829 	pgsz &= ~((1 << i) - 1);
830 	if (!pgsz)
831 		return -EIO;
832 
833 	pgsz &= ~(pgsz - 1);
834 	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
835 
836 	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
837 	if (!iov)
838 		return -ENOMEM;
839 
840 	/* Sizing SR-IOV BARs with VF Enable cleared - no decode */
841 	__pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
842 			   pos + PCI_SRIOV_BAR, sriovbars);
843 
844 	nres = 0;
845 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
846 		int idx = pci_resource_num_from_vf_bar(i);
847 
848 		res = &dev->resource[idx];
849 		res_name = pci_resource_name(dev, idx);
850 
851 		/*
852 		 * If it is already FIXED, don't change it, something
853 		 * (perhaps EA or header fixups) wants it this way.
854 		 */
855 		if (res->flags & IORESOURCE_PCI_FIXED)
856 			bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
857 		else
858 			bar64 = __pci_read_base(dev, pci_bar_unknown, res,
859 						pos + PCI_SRIOV_BAR + i * 4,
860 						&sriovbars[i]);
861 		if (!res->flags)
862 			continue;
863 		if (resource_size(res) & (PAGE_SIZE - 1)) {
864 			rc = -EIO;
865 			goto failed;
866 		}
867 		iov->barsz[i] = resource_size(res);
868 		resource_set_size(res, resource_size(res) * total);
869 		pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
870 			 res_name, res, i, total);
871 		i += bar64;
872 		nres++;
873 	}
874 
875 	iov->pos = pos;
876 	iov->nres = nres;
877 	iov->ctrl = ctrl;
878 	iov->total_VFs = total;
879 	iov->driver_max_VFs = total;
880 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
881 	iov->pgsz = pgsz;
882 	iov->self = dev;
883 	iov->drivers_autoprobe = true;
884 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
885 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
886 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
887 		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
888 	iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR);
889 
890 	if (pdev)
891 		iov->dev = pci_dev_get(pdev);
892 	else
893 		iov->dev = dev;
894 
895 	dev->sriov = iov;
896 	dev->is_physfn = 1;
897 	rc = compute_max_vf_buses(dev);
898 	if (rc)
899 		goto fail_max_buses;
900 
901 	return 0;
902 
903 fail_max_buses:
904 	dev->sriov = NULL;
905 	dev->is_physfn = 0;
906 failed:
907 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
908 		res = &dev->resource[pci_resource_num_from_vf_bar(i)];
909 		res->flags = 0;
910 	}
911 
912 	kfree(iov);
913 	return rc;
914 }
915 
sriov_release(struct pci_dev * dev)916 static void sriov_release(struct pci_dev *dev)
917 {
918 	BUG_ON(dev->sriov->num_VFs);
919 
920 	if (dev != dev->sriov->dev)
921 		pci_dev_put(dev->sriov->dev);
922 
923 	kfree(dev->sriov);
924 	dev->sriov = NULL;
925 }
926 
sriov_restore_vf_rebar_state(struct pci_dev * dev)927 static void sriov_restore_vf_rebar_state(struct pci_dev *dev)
928 {
929 	unsigned int pos, nbars, i;
930 	u32 ctrl;
931 
932 	pos = pci_iov_vf_rebar_cap(dev);
933 	if (!pos)
934 		return;
935 
936 	pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
937 	nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl);
938 
939 	for (i = 0; i < nbars; i++, pos += 8) {
940 		int bar_idx, size;
941 
942 		pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
943 		bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl);
944 		size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]);
945 		ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE;
946 		ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size);
947 		pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl);
948 	}
949 }
950 
sriov_restore_state(struct pci_dev * dev)951 static void sriov_restore_state(struct pci_dev *dev)
952 {
953 	int i;
954 	u16 ctrl;
955 	struct pci_sriov *iov = dev->sriov;
956 
957 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
958 	if (ctrl & PCI_SRIOV_CTRL_VFE)
959 		return;
960 
961 	/*
962 	 * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
963 	 * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
964 	 */
965 	ctrl &= ~PCI_SRIOV_CTRL_ARI;
966 	ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
967 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
968 
969 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
970 		pci_update_resource(dev, pci_resource_num_from_vf_bar(i));
971 
972 	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
973 	pci_iov_set_numvfs(dev, iov->num_VFs);
974 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
975 	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
976 		msleep(100);
977 }
978 
979 /**
980  * pci_iov_init - initialize the IOV capability
981  * @dev: the PCI device
982  *
983  * Returns 0 on success, or negative on failure.
984  */
pci_iov_init(struct pci_dev * dev)985 int pci_iov_init(struct pci_dev *dev)
986 {
987 	int pos;
988 
989 	if (!pci_is_pcie(dev))
990 		return -ENODEV;
991 
992 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
993 	if (pos)
994 		return sriov_init(dev, pos);
995 
996 	return -ENODEV;
997 }
998 
999 /**
1000  * pci_iov_release - release resources used by the IOV capability
1001  * @dev: the PCI device
1002  */
pci_iov_release(struct pci_dev * dev)1003 void pci_iov_release(struct pci_dev *dev)
1004 {
1005 	if (dev->is_physfn)
1006 		sriov_release(dev);
1007 }
1008 
1009 /**
1010  * pci_iov_remove - clean up SR-IOV state after PF driver is detached
1011  * @dev: the PCI device
1012  */
pci_iov_remove(struct pci_dev * dev)1013 void pci_iov_remove(struct pci_dev *dev)
1014 {
1015 	struct pci_sriov *iov = dev->sriov;
1016 
1017 	if (!dev->is_physfn)
1018 		return;
1019 
1020 	iov->driver_max_VFs = iov->total_VFs;
1021 	if (iov->num_VFs)
1022 		pci_warn(dev, "driver left SR-IOV enabled after remove\n");
1023 }
1024 
1025 /**
1026  * pci_iov_update_resource - update a VF BAR
1027  * @dev: the PCI device
1028  * @resno: the resource number
1029  *
1030  * Update a VF BAR in the SR-IOV capability of a PF.
1031  */
pci_iov_update_resource(struct pci_dev * dev,int resno)1032 void pci_iov_update_resource(struct pci_dev *dev, int resno)
1033 {
1034 	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
1035 	struct resource *res = pci_resource_n(dev, resno);
1036 	int vf_bar = pci_resource_num_to_vf_bar(resno);
1037 	struct pci_bus_region region;
1038 	u16 cmd;
1039 	u32 new;
1040 	int reg;
1041 
1042 	/*
1043 	 * The generic pci_restore_bars() path calls this for all devices,
1044 	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
1045 	 * have nothing to do.
1046 	 */
1047 	if (!iov)
1048 		return;
1049 
1050 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
1051 	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
1052 		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
1053 			 vf_bar, res);
1054 		return;
1055 	}
1056 
1057 	/*
1058 	 * Ignore unimplemented BARs, unused resource slots for 64-bit
1059 	 * BARs, and non-movable resources, e.g., those described via
1060 	 * Enhanced Allocation.
1061 	 */
1062 	if (!res->flags)
1063 		return;
1064 
1065 	if (res->flags & IORESOURCE_UNSET)
1066 		return;
1067 
1068 	if (res->flags & IORESOURCE_PCI_FIXED)
1069 		return;
1070 
1071 	pcibios_resource_to_bus(dev->bus, &region, res);
1072 	new = region.start;
1073 	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
1074 
1075 	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
1076 	pci_write_config_dword(dev, reg, new);
1077 	if (res->flags & IORESOURCE_MEM_64) {
1078 		new = region.start >> 16 >> 16;
1079 		pci_write_config_dword(dev, reg + 4, new);
1080 	}
1081 }
1082 
pcibios_iov_resource_alignment(struct pci_dev * dev,int resno)1083 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
1084 						      int resno)
1085 {
1086 	return pci_iov_resource_size(dev, resno);
1087 }
1088 
1089 /**
1090  * pci_sriov_resource_alignment - get resource alignment for VF BAR
1091  * @dev: the PCI device
1092  * @resno: the resource number
1093  *
1094  * Returns the alignment of the VF BAR found in the SR-IOV capability.
1095  * This is not the same as the resource size which is defined as
1096  * the VF BAR size multiplied by the number of VFs.  The alignment
1097  * is just the VF BAR size.
1098  */
pci_sriov_resource_alignment(struct pci_dev * dev,int resno)1099 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
1100 {
1101 	return pcibios_iov_resource_alignment(dev, resno);
1102 }
1103 
1104 /**
1105  * pci_restore_iov_state - restore the state of the IOV capability
1106  * @dev: the PCI device
1107  */
pci_restore_iov_state(struct pci_dev * dev)1108 void pci_restore_iov_state(struct pci_dev *dev)
1109 {
1110 	if (dev->is_physfn) {
1111 		sriov_restore_vf_rebar_state(dev);
1112 		sriov_restore_state(dev);
1113 	}
1114 }
1115 
1116 /**
1117  * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs
1118  * @dev: the PCI device
1119  * @auto_probe: set VF drivers auto probe flag
1120  */
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool auto_probe)1121 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe)
1122 {
1123 	if (dev->is_physfn)
1124 		dev->sriov->drivers_autoprobe = auto_probe;
1125 }
1126 
1127 /**
1128  * pci_iov_bus_range - find bus range used by Virtual Function
1129  * @bus: the PCI bus
1130  *
1131  * Returns max number of buses (exclude current one) used by Virtual
1132  * Functions.
1133  */
pci_iov_bus_range(struct pci_bus * bus)1134 int pci_iov_bus_range(struct pci_bus *bus)
1135 {
1136 	int max = 0;
1137 	struct pci_dev *dev;
1138 
1139 	list_for_each_entry(dev, &bus->devices, bus_list) {
1140 		if (!dev->is_physfn)
1141 			continue;
1142 		if (dev->sriov->max_VF_buses > max)
1143 			max = dev->sriov->max_VF_buses;
1144 	}
1145 
1146 	return max ? max - bus->number : 0;
1147 }
1148 
1149 /**
1150  * pci_enable_sriov - enable the SR-IOV capability
1151  * @dev: the PCI device
1152  * @nr_virtfn: number of virtual functions to enable
1153  *
1154  * Returns 0 on success, or negative on failure.
1155  */
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)1156 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1157 {
1158 	might_sleep();
1159 
1160 	if (!dev->is_physfn)
1161 		return -ENOSYS;
1162 
1163 	return sriov_enable(dev, nr_virtfn);
1164 }
1165 EXPORT_SYMBOL_GPL(pci_enable_sriov);
1166 
1167 /**
1168  * pci_disable_sriov - disable the SR-IOV capability
1169  * @dev: the PCI device
1170  */
pci_disable_sriov(struct pci_dev * dev)1171 void pci_disable_sriov(struct pci_dev *dev)
1172 {
1173 	might_sleep();
1174 
1175 	if (!dev->is_physfn)
1176 		return;
1177 
1178 	sriov_disable(dev);
1179 }
1180 EXPORT_SYMBOL_GPL(pci_disable_sriov);
1181 
1182 /**
1183  * pci_num_vf - return number of VFs associated with a PF device_release_driver
1184  * @dev: the PCI device
1185  *
1186  * Returns number of VFs, or 0 if SR-IOV is not enabled.
1187  */
pci_num_vf(struct pci_dev * dev)1188 int pci_num_vf(struct pci_dev *dev)
1189 {
1190 	if (!dev->is_physfn)
1191 		return 0;
1192 
1193 	return dev->sriov->num_VFs;
1194 }
1195 EXPORT_SYMBOL_GPL(pci_num_vf);
1196 
1197 /**
1198  * pci_vfs_assigned - returns number of VFs are assigned to a guest
1199  * @dev: the PCI device
1200  *
1201  * Returns number of VFs belonging to this device that are assigned to a guest.
1202  * If device is not a physical function returns 0.
1203  */
pci_vfs_assigned(struct pci_dev * dev)1204 int pci_vfs_assigned(struct pci_dev *dev)
1205 {
1206 	struct pci_dev *vfdev;
1207 	unsigned int vfs_assigned = 0;
1208 	unsigned short dev_id;
1209 
1210 	/* only search if we are a PF */
1211 	if (!dev->is_physfn)
1212 		return 0;
1213 
1214 	/*
1215 	 * determine the device ID for the VFs, the vendor ID will be the
1216 	 * same as the PF so there is no need to check for that one
1217 	 */
1218 	dev_id = dev->sriov->vf_device;
1219 
1220 	/* loop through all the VFs to see if we own any that are assigned */
1221 	vfdev = pci_get_device(dev->vendor, dev_id, NULL);
1222 	while (vfdev) {
1223 		/*
1224 		 * It is considered assigned if it is a virtual function with
1225 		 * our dev as the physical function and the assigned bit is set
1226 		 */
1227 		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
1228 			pci_is_dev_assigned(vfdev))
1229 			vfs_assigned++;
1230 
1231 		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
1232 	}
1233 
1234 	return vfs_assigned;
1235 }
1236 EXPORT_SYMBOL_GPL(pci_vfs_assigned);
1237 
1238 /**
1239  * pci_sriov_set_totalvfs -- reduce the TotalVFs available
1240  * @dev: the PCI PF device
1241  * @numvfs: number that should be used for TotalVFs supported
1242  *
1243  * Should be called from PF driver's probe routine with
1244  * device's mutex held.
1245  *
1246  * Returns 0 if PF is an SRIOV-capable device and
1247  * value of numvfs valid. If not a PF return -ENOSYS;
1248  * if numvfs is invalid return -EINVAL;
1249  * if VFs already enabled, return -EBUSY.
1250  */
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)1251 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
1252 {
1253 	if (!dev->is_physfn)
1254 		return -ENOSYS;
1255 
1256 	if (numvfs > dev->sriov->total_VFs)
1257 		return -EINVAL;
1258 
1259 	/* Shouldn't change if VFs already enabled */
1260 	if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
1261 		return -EBUSY;
1262 
1263 	dev->sriov->driver_max_VFs = numvfs;
1264 	return 0;
1265 }
1266 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
1267 
1268 /**
1269  * pci_sriov_get_totalvfs -- get total VFs supported on this device
1270  * @dev: the PCI PF device
1271  *
1272  * For a PCIe device with SRIOV support, return the PCIe
1273  * SRIOV capability value of TotalVFs or the value of driver_max_VFs
1274  * if the driver reduced it.  Otherwise 0.
1275  */
pci_sriov_get_totalvfs(struct pci_dev * dev)1276 int pci_sriov_get_totalvfs(struct pci_dev *dev)
1277 {
1278 	if (!dev->is_physfn)
1279 		return 0;
1280 
1281 	return dev->sriov->driver_max_VFs;
1282 }
1283 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
1284 
1285 /**
1286  * pci_sriov_configure_simple - helper to configure SR-IOV
1287  * @dev: the PCI device
1288  * @nr_virtfn: number of virtual functions to enable, 0 to disable
1289  *
1290  * Enable or disable SR-IOV for devices that don't require any PF setup
1291  * before enabling SR-IOV.  Return value is negative on error, or number of
1292  * VFs allocated on success.
1293  */
pci_sriov_configure_simple(struct pci_dev * dev,int nr_virtfn)1294 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
1295 {
1296 	int rc;
1297 
1298 	might_sleep();
1299 
1300 	if (!dev->is_physfn)
1301 		return -ENODEV;
1302 
1303 	if (pci_vfs_assigned(dev)) {
1304 		pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
1305 		return -EPERM;
1306 	}
1307 
1308 	if (nr_virtfn == 0) {
1309 		sriov_disable(dev);
1310 		return 0;
1311 	}
1312 
1313 	rc = sriov_enable(dev, nr_virtfn);
1314 	if (rc < 0)
1315 		return rc;
1316 
1317 	return nr_virtfn;
1318 }
1319 EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
1320 
1321 /**
1322  * pci_iov_vf_bar_set_size - set a new size for a VF BAR
1323  * @dev: the PCI device
1324  * @resno: the resource number
1325  * @size: new size as defined in the spec (0=1MB, 31=128TB)
1326  *
1327  * Set the new size of a VF BAR that supports VF resizable BAR capability.
1328  * Unlike pci_resize_resource(), this does not cause the resource that
1329  * reserves the MMIO space (originally up to total_VFs) to be resized, which
1330  * means that following calls to pci_enable_sriov() can fail if the resources
1331  * no longer fit.
1332  *
1333  * Return: 0 on success, or negative on failure.
1334  */
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)1335 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
1336 {
1337 	u32 sizes;
1338 	int ret;
1339 
1340 	if (!pci_resource_is_iov(resno))
1341 		return -EINVAL;
1342 
1343 	if (pci_iov_is_memory_decoding_enabled(dev))
1344 		return -EBUSY;
1345 
1346 	sizes = pci_rebar_get_possible_sizes(dev, resno);
1347 	if (!sizes)
1348 		return -ENOTSUPP;
1349 
1350 	if (!(sizes & BIT(size)))
1351 		return -EINVAL;
1352 
1353 	ret = pci_rebar_set_size(dev, resno, size);
1354 	if (ret)
1355 		return ret;
1356 
1357 	pci_iov_resource_set_size(dev, resno, pci_rebar_size_to_bytes(size));
1358 
1359 	return 0;
1360 }
1361 EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size);
1362 
1363 /**
1364  * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs
1365  * @dev: the PCI device
1366  * @resno: the resource number
1367  * @num_vfs: number of VFs
1368  *
1369  * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within
1370  * the currently assigned size of the resource @resno.
1371  *
1372  * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB,
1373  * bit 31=128TB).
1374  */
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)1375 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
1376 {
1377 	u64 vf_len = pci_resource_len(dev, resno);
1378 	u32 sizes;
1379 
1380 	if (!num_vfs)
1381 		return 0;
1382 
1383 	do_div(vf_len, num_vfs);
1384 	sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M);
1385 
1386 	return sizes & pci_rebar_get_possible_sizes(dev, resno);
1387 }
1388 EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes);
1389