xref: /linux/drivers/pci/iov.c (revision f884ab15afdc5514e88105c92a4e2e1e6539869a)
1 /*
2  * drivers/pci/iov.c
3  *
4  * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
5  *
6  * PCI Express I/O Virtualization (IOV) support.
7  *   Single Root IOV 1.0
8  *   Address Translation Service 1.0
9  */
10 
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/pci-ats.h>
18 #include "pci.h"
19 
20 #define VIRTFN_ID_LEN	16
21 
22 static inline u8 virtfn_bus(struct pci_dev *dev, int id)
23 {
24 	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
25 				    dev->sriov->stride * id) >> 8);
26 }
27 
28 static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
29 {
30 	return (dev->devfn + dev->sriov->offset +
31 		dev->sriov->stride * id) & 0xff;
32 }
33 
34 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
35 {
36 	struct pci_bus *child;
37 
38 	if (bus->number == busnr)
39 		return bus;
40 
41 	child = pci_find_bus(pci_domain_nr(bus), busnr);
42 	if (child)
43 		return child;
44 
45 	child = pci_add_new_bus(bus, NULL, busnr);
46 	if (!child)
47 		return NULL;
48 
49 	pci_bus_insert_busn_res(child, busnr, busnr);
50 	bus->is_added = 1;
51 
52 	return child;
53 }
54 
55 static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
56 {
57 	struct pci_bus *child;
58 
59 	if (bus->number == busnr)
60 		return;
61 
62 	child = pci_find_bus(pci_domain_nr(bus), busnr);
63 	BUG_ON(!child);
64 
65 	if (list_empty(&child->devices))
66 		pci_remove_bus(child);
67 }
68 
69 static int virtfn_add(struct pci_dev *dev, int id, int reset)
70 {
71 	int i;
72 	int rc;
73 	u64 size;
74 	char buf[VIRTFN_ID_LEN];
75 	struct pci_dev *virtfn;
76 	struct resource *res;
77 	struct pci_sriov *iov = dev->sriov;
78 
79 	virtfn = alloc_pci_dev();
80 	if (!virtfn)
81 		return -ENOMEM;
82 
83 	mutex_lock(&iov->dev->sriov->lock);
84 	virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
85 	if (!virtfn->bus) {
86 		kfree(virtfn);
87 		mutex_unlock(&iov->dev->sriov->lock);
88 		return -ENOMEM;
89 	}
90 	virtfn->devfn = virtfn_devfn(dev, id);
91 	virtfn->vendor = dev->vendor;
92 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
93 	pci_setup_device(virtfn);
94 	virtfn->dev.parent = dev->dev.parent;
95 
96 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
97 		res = dev->resource + PCI_IOV_RESOURCES + i;
98 		if (!res->parent)
99 			continue;
100 		virtfn->resource[i].name = pci_name(virtfn);
101 		virtfn->resource[i].flags = res->flags;
102 		size = resource_size(res);
103 		do_div(size, iov->total_VFs);
104 		virtfn->resource[i].start = res->start + size * id;
105 		virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
106 		rc = request_resource(res, &virtfn->resource[i]);
107 		BUG_ON(rc);
108 	}
109 
110 	if (reset)
111 		__pci_reset_function(virtfn);
112 
113 	pci_device_add(virtfn, virtfn->bus);
114 	mutex_unlock(&iov->dev->sriov->lock);
115 
116 	virtfn->physfn = pci_dev_get(dev);
117 	virtfn->is_virtfn = 1;
118 
119 	rc = pci_bus_add_device(virtfn);
120 	sprintf(buf, "virtfn%u", id);
121 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
122 	if (rc)
123 		goto failed1;
124 	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
125 	if (rc)
126 		goto failed2;
127 
128 	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
129 
130 	return 0;
131 
132 failed2:
133 	sysfs_remove_link(&dev->dev.kobj, buf);
134 failed1:
135 	pci_dev_put(dev);
136 	mutex_lock(&iov->dev->sriov->lock);
137 	pci_stop_and_remove_bus_device(virtfn);
138 	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
139 	mutex_unlock(&iov->dev->sriov->lock);
140 
141 	return rc;
142 }
143 
144 static void virtfn_remove(struct pci_dev *dev, int id, int reset)
145 {
146 	char buf[VIRTFN_ID_LEN];
147 	struct pci_bus *bus;
148 	struct pci_dev *virtfn;
149 	struct pci_sriov *iov = dev->sriov;
150 
151 	bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
152 	if (!bus)
153 		return;
154 
155 	virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
156 	if (!virtfn)
157 		return;
158 
159 	pci_dev_put(virtfn);
160 
161 	if (reset) {
162 		device_release_driver(&virtfn->dev);
163 		__pci_reset_function(virtfn);
164 	}
165 
166 	sprintf(buf, "virtfn%u", id);
167 	sysfs_remove_link(&dev->dev.kobj, buf);
168 	/*
169 	 * pci_stop_dev() could have been called for this virtfn already,
170 	 * so the directory for the virtfn may have been removed before.
171 	 * Double check to avoid spurious sysfs warnings.
172 	 */
173 	if (virtfn->dev.kobj.sd)
174 		sysfs_remove_link(&virtfn->dev.kobj, "physfn");
175 
176 	mutex_lock(&iov->dev->sriov->lock);
177 	pci_stop_and_remove_bus_device(virtfn);
178 	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
179 	mutex_unlock(&iov->dev->sriov->lock);
180 
181 	pci_dev_put(dev);
182 }
183 
184 static int sriov_migration(struct pci_dev *dev)
185 {
186 	u16 status;
187 	struct pci_sriov *iov = dev->sriov;
188 
189 	if (!iov->num_VFs)
190 		return 0;
191 
192 	if (!(iov->cap & PCI_SRIOV_CAP_VFM))
193 		return 0;
194 
195 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
196 	if (!(status & PCI_SRIOV_STATUS_VFM))
197 		return 0;
198 
199 	schedule_work(&iov->mtask);
200 
201 	return 1;
202 }
203 
204 static void sriov_migration_task(struct work_struct *work)
205 {
206 	int i;
207 	u8 state;
208 	u16 status;
209 	struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
210 
211 	for (i = iov->initial_VFs; i < iov->num_VFs; i++) {
212 		state = readb(iov->mstate + i);
213 		if (state == PCI_SRIOV_VFM_MI) {
214 			writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
215 			state = readb(iov->mstate + i);
216 			if (state == PCI_SRIOV_VFM_AV)
217 				virtfn_add(iov->self, i, 1);
218 		} else if (state == PCI_SRIOV_VFM_MO) {
219 			virtfn_remove(iov->self, i, 1);
220 			writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
221 			state = readb(iov->mstate + i);
222 			if (state == PCI_SRIOV_VFM_AV)
223 				virtfn_add(iov->self, i, 0);
224 		}
225 	}
226 
227 	pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
228 	status &= ~PCI_SRIOV_STATUS_VFM;
229 	pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
230 }
231 
232 static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
233 {
234 	int bir;
235 	u32 table;
236 	resource_size_t pa;
237 	struct pci_sriov *iov = dev->sriov;
238 
239 	if (nr_virtfn <= iov->initial_VFs)
240 		return 0;
241 
242 	pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
243 	bir = PCI_SRIOV_VFM_BIR(table);
244 	if (bir > PCI_STD_RESOURCE_END)
245 		return -EIO;
246 
247 	table = PCI_SRIOV_VFM_OFFSET(table);
248 	if (table + nr_virtfn > pci_resource_len(dev, bir))
249 		return -EIO;
250 
251 	pa = pci_resource_start(dev, bir) + table;
252 	iov->mstate = ioremap(pa, nr_virtfn);
253 	if (!iov->mstate)
254 		return -ENOMEM;
255 
256 	INIT_WORK(&iov->mtask, sriov_migration_task);
257 
258 	iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
259 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
260 
261 	return 0;
262 }
263 
264 static void sriov_disable_migration(struct pci_dev *dev)
265 {
266 	struct pci_sriov *iov = dev->sriov;
267 
268 	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
269 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
270 
271 	cancel_work_sync(&iov->mtask);
272 	iounmap(iov->mstate);
273 }
274 
275 static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
276 {
277 	int rc;
278 	int i, j;
279 	int nres;
280 	u16 offset, stride, initial;
281 	struct resource *res;
282 	struct pci_dev *pdev;
283 	struct pci_sriov *iov = dev->sriov;
284 	int bars = 0;
285 
286 	if (!nr_virtfn)
287 		return 0;
288 
289 	if (iov->num_VFs)
290 		return -EINVAL;
291 
292 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
293 	if (initial > iov->total_VFs ||
294 	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
295 		return -EIO;
296 
297 	if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
298 	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
299 		return -EINVAL;
300 
301 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
302 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
303 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
304 	if (!offset || (nr_virtfn > 1 && !stride))
305 		return -EIO;
306 
307 	nres = 0;
308 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
309 		bars |= (1 << (i + PCI_IOV_RESOURCES));
310 		res = dev->resource + PCI_IOV_RESOURCES + i;
311 		if (res->parent)
312 			nres++;
313 	}
314 	if (nres != iov->nres) {
315 		dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
316 		return -ENOMEM;
317 	}
318 
319 	iov->offset = offset;
320 	iov->stride = stride;
321 
322 	if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) {
323 		dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
324 		return -ENOMEM;
325 	}
326 
327 	if (pci_enable_resources(dev, bars)) {
328 		dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
329 		return -ENOMEM;
330 	}
331 
332 	if (iov->link != dev->devfn) {
333 		pdev = pci_get_slot(dev->bus, iov->link);
334 		if (!pdev)
335 			return -ENODEV;
336 
337 		pci_dev_put(pdev);
338 
339 		if (!pdev->is_physfn)
340 			return -ENODEV;
341 
342 		rc = sysfs_create_link(&dev->dev.kobj,
343 					&pdev->dev.kobj, "dep_link");
344 		if (rc)
345 			return rc;
346 	}
347 
348 	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
349 	pci_cfg_access_lock(dev);
350 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
351 	msleep(100);
352 	pci_cfg_access_unlock(dev);
353 
354 	iov->initial_VFs = initial;
355 	if (nr_virtfn < initial)
356 		initial = nr_virtfn;
357 
358 	for (i = 0; i < initial; i++) {
359 		rc = virtfn_add(dev, i, 0);
360 		if (rc)
361 			goto failed;
362 	}
363 
364 	if (iov->cap & PCI_SRIOV_CAP_VFM) {
365 		rc = sriov_enable_migration(dev, nr_virtfn);
366 		if (rc)
367 			goto failed;
368 	}
369 
370 	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
371 	iov->num_VFs = nr_virtfn;
372 
373 	return 0;
374 
375 failed:
376 	for (j = 0; j < i; j++)
377 		virtfn_remove(dev, j, 0);
378 
379 	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
380 	pci_cfg_access_lock(dev);
381 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
382 	ssleep(1);
383 	pci_cfg_access_unlock(dev);
384 
385 	if (iov->link != dev->devfn)
386 		sysfs_remove_link(&dev->dev.kobj, "dep_link");
387 
388 	return rc;
389 }
390 
391 static void sriov_disable(struct pci_dev *dev)
392 {
393 	int i;
394 	struct pci_sriov *iov = dev->sriov;
395 
396 	if (!iov->num_VFs)
397 		return;
398 
399 	if (iov->cap & PCI_SRIOV_CAP_VFM)
400 		sriov_disable_migration(dev);
401 
402 	for (i = 0; i < iov->num_VFs; i++)
403 		virtfn_remove(dev, i, 0);
404 
405 	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
406 	pci_cfg_access_lock(dev);
407 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
408 	ssleep(1);
409 	pci_cfg_access_unlock(dev);
410 
411 	if (iov->link != dev->devfn)
412 		sysfs_remove_link(&dev->dev.kobj, "dep_link");
413 
414 	iov->num_VFs = 0;
415 }
416 
417 static int sriov_init(struct pci_dev *dev, int pos)
418 {
419 	int i;
420 	int rc;
421 	int nres;
422 	u32 pgsz;
423 	u16 ctrl, total, offset, stride;
424 	struct pci_sriov *iov;
425 	struct resource *res;
426 	struct pci_dev *pdev;
427 
428 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END &&
429 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT)
430 		return -ENODEV;
431 
432 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
433 	if (ctrl & PCI_SRIOV_CTRL_VFE) {
434 		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
435 		ssleep(1);
436 	}
437 
438 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
439 	if (!total)
440 		return 0;
441 
442 	ctrl = 0;
443 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
444 		if (pdev->is_physfn)
445 			goto found;
446 
447 	pdev = NULL;
448 	if (pci_ari_enabled(dev->bus))
449 		ctrl |= PCI_SRIOV_CTRL_ARI;
450 
451 found:
452 	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
453 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
454 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
455 	if (!offset || (total > 1 && !stride))
456 		return -EIO;
457 
458 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
459 	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
460 	pgsz &= ~((1 << i) - 1);
461 	if (!pgsz)
462 		return -EIO;
463 
464 	pgsz &= ~(pgsz - 1);
465 	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
466 
467 	nres = 0;
468 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
469 		res = dev->resource + PCI_IOV_RESOURCES + i;
470 		i += __pci_read_base(dev, pci_bar_unknown, res,
471 				     pos + PCI_SRIOV_BAR + i * 4);
472 		if (!res->flags)
473 			continue;
474 		if (resource_size(res) & (PAGE_SIZE - 1)) {
475 			rc = -EIO;
476 			goto failed;
477 		}
478 		res->end = res->start + resource_size(res) * total - 1;
479 		nres++;
480 	}
481 
482 	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
483 	if (!iov) {
484 		rc = -ENOMEM;
485 		goto failed;
486 	}
487 
488 	iov->pos = pos;
489 	iov->nres = nres;
490 	iov->ctrl = ctrl;
491 	iov->total_VFs = total;
492 	iov->offset = offset;
493 	iov->stride = stride;
494 	iov->pgsz = pgsz;
495 	iov->self = dev;
496 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
497 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
498 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
499 		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
500 
501 	if (pdev)
502 		iov->dev = pci_dev_get(pdev);
503 	else
504 		iov->dev = dev;
505 
506 	mutex_init(&iov->lock);
507 
508 	dev->sriov = iov;
509 	dev->is_physfn = 1;
510 
511 	return 0;
512 
513 failed:
514 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
515 		res = dev->resource + PCI_IOV_RESOURCES + i;
516 		res->flags = 0;
517 	}
518 
519 	return rc;
520 }
521 
522 static void sriov_release(struct pci_dev *dev)
523 {
524 	BUG_ON(dev->sriov->num_VFs);
525 
526 	if (dev != dev->sriov->dev)
527 		pci_dev_put(dev->sriov->dev);
528 
529 	mutex_destroy(&dev->sriov->lock);
530 
531 	kfree(dev->sriov);
532 	dev->sriov = NULL;
533 }
534 
535 static void sriov_restore_state(struct pci_dev *dev)
536 {
537 	int i;
538 	u16 ctrl;
539 	struct pci_sriov *iov = dev->sriov;
540 
541 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
542 	if (ctrl & PCI_SRIOV_CTRL_VFE)
543 		return;
544 
545 	for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
546 		pci_update_resource(dev, i);
547 
548 	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
549 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs);
550 	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
551 	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
552 		msleep(100);
553 }
554 
555 /**
556  * pci_iov_init - initialize the IOV capability
557  * @dev: the PCI device
558  *
559  * Returns 0 on success, or negative on failure.
560  */
561 int pci_iov_init(struct pci_dev *dev)
562 {
563 	int pos;
564 
565 	if (!pci_is_pcie(dev))
566 		return -ENODEV;
567 
568 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
569 	if (pos)
570 		return sriov_init(dev, pos);
571 
572 	return -ENODEV;
573 }
574 
575 /**
576  * pci_iov_release - release resources used by the IOV capability
577  * @dev: the PCI device
578  */
579 void pci_iov_release(struct pci_dev *dev)
580 {
581 	if (dev->is_physfn)
582 		sriov_release(dev);
583 }
584 
585 /**
586  * pci_iov_resource_bar - get position of the SR-IOV BAR
587  * @dev: the PCI device
588  * @resno: the resource number
589  * @type: the BAR type to be filled in
590  *
591  * Returns position of the BAR encapsulated in the SR-IOV capability.
592  */
593 int pci_iov_resource_bar(struct pci_dev *dev, int resno,
594 			 enum pci_bar_type *type)
595 {
596 	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
597 		return 0;
598 
599 	BUG_ON(!dev->is_physfn);
600 
601 	*type = pci_bar_unknown;
602 
603 	return dev->sriov->pos + PCI_SRIOV_BAR +
604 		4 * (resno - PCI_IOV_RESOURCES);
605 }
606 
607 /**
608  * pci_sriov_resource_alignment - get resource alignment for VF BAR
609  * @dev: the PCI device
610  * @resno: the resource number
611  *
612  * Returns the alignment of the VF BAR found in the SR-IOV capability.
613  * This is not the same as the resource size which is defined as
614  * the VF BAR size multiplied by the number of VFs.  The alignment
615  * is just the VF BAR size.
616  */
617 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
618 {
619 	struct resource tmp;
620 	enum pci_bar_type type;
621 	int reg = pci_iov_resource_bar(dev, resno, &type);
622 
623 	if (!reg)
624 		return 0;
625 
626 	 __pci_read_base(dev, type, &tmp, reg);
627 	return resource_alignment(&tmp);
628 }
629 
630 /**
631  * pci_restore_iov_state - restore the state of the IOV capability
632  * @dev: the PCI device
633  */
634 void pci_restore_iov_state(struct pci_dev *dev)
635 {
636 	if (dev->is_physfn)
637 		sriov_restore_state(dev);
638 }
639 
640 /**
641  * pci_iov_bus_range - find bus range used by Virtual Function
642  * @bus: the PCI bus
643  *
644  * Returns max number of buses (exclude current one) used by Virtual
645  * Functions.
646  */
647 int pci_iov_bus_range(struct pci_bus *bus)
648 {
649 	int max = 0;
650 	u8 busnr;
651 	struct pci_dev *dev;
652 
653 	list_for_each_entry(dev, &bus->devices, bus_list) {
654 		if (!dev->is_physfn)
655 			continue;
656 		busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1);
657 		if (busnr > max)
658 			max = busnr;
659 	}
660 
661 	return max ? max - bus->number : 0;
662 }
663 
664 /**
665  * pci_enable_sriov - enable the SR-IOV capability
666  * @dev: the PCI device
667  * @nr_virtfn: number of virtual functions to enable
668  *
669  * Returns 0 on success, or negative on failure.
670  */
671 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
672 {
673 	might_sleep();
674 
675 	if (!dev->is_physfn)
676 		return -ENODEV;
677 
678 	return sriov_enable(dev, nr_virtfn);
679 }
680 EXPORT_SYMBOL_GPL(pci_enable_sriov);
681 
682 /**
683  * pci_disable_sriov - disable the SR-IOV capability
684  * @dev: the PCI device
685  */
686 void pci_disable_sriov(struct pci_dev *dev)
687 {
688 	might_sleep();
689 
690 	if (!dev->is_physfn)
691 		return;
692 
693 	sriov_disable(dev);
694 }
695 EXPORT_SYMBOL_GPL(pci_disable_sriov);
696 
697 /**
698  * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
699  * @dev: the PCI device
700  *
701  * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
702  *
703  * Physical Function driver is responsible to register IRQ handler using
704  * VF Migration Interrupt Message Number, and call this function when the
705  * interrupt is generated by the hardware.
706  */
707 irqreturn_t pci_sriov_migration(struct pci_dev *dev)
708 {
709 	if (!dev->is_physfn)
710 		return IRQ_NONE;
711 
712 	return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
713 }
714 EXPORT_SYMBOL_GPL(pci_sriov_migration);
715 
716 /**
717  * pci_num_vf - return number of VFs associated with a PF device_release_driver
718  * @dev: the PCI device
719  *
720  * Returns number of VFs, or 0 if SR-IOV is not enabled.
721  */
722 int pci_num_vf(struct pci_dev *dev)
723 {
724 	if (!dev->is_physfn)
725 		return 0;
726 
727 	return dev->sriov->num_VFs;
728 }
729 EXPORT_SYMBOL_GPL(pci_num_vf);
730 
731 /**
732  * pci_vfs_assigned - returns number of VFs are assigned to a guest
733  * @dev: the PCI device
734  *
735  * Returns number of VFs belonging to this device that are assigned to a guest.
736  * If device is not a physical function returns -ENODEV.
737  */
738 int pci_vfs_assigned(struct pci_dev *dev)
739 {
740 	struct pci_dev *vfdev;
741 	unsigned int vfs_assigned = 0;
742 	unsigned short dev_id;
743 
744 	/* only search if we are a PF */
745 	if (!dev->is_physfn)
746 		return 0;
747 
748 	/*
749 	 * determine the device ID for the VFs, the vendor ID will be the
750 	 * same as the PF so there is no need to check for that one
751 	 */
752 	pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
753 
754 	/* loop through all the VFs to see if we own any that are assigned */
755 	vfdev = pci_get_device(dev->vendor, dev_id, NULL);
756 	while (vfdev) {
757 		/*
758 		 * It is considered assigned if it is a virtual function with
759 		 * our dev as the physical function and the assigned bit is set
760 		 */
761 		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
762 		    (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
763 			vfs_assigned++;
764 
765 		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
766 	}
767 
768 	return vfs_assigned;
769 }
770 EXPORT_SYMBOL_GPL(pci_vfs_assigned);
771 
772 /**
773  * pci_sriov_set_totalvfs -- reduce the TotalVFs available
774  * @dev: the PCI PF device
775  * @numvfs: number that should be used for TotalVFs supported
776  *
777  * Should be called from PF driver's probe routine with
778  * device's mutex held.
779  *
780  * Returns 0 if PF is an SRIOV-capable device and
781  * value of numvfs valid. If not a PF with VFS, return -EINVAL;
782  * if VFs already enabled, return -EBUSY.
783  */
784 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
785 {
786 	if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs))
787 		return -EINVAL;
788 
789 	/* Shouldn't change if VFs already enabled */
790 	if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
791 		return -EBUSY;
792 	else
793 		dev->sriov->driver_max_VFs = numvfs;
794 
795 	return 0;
796 }
797 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
798 
799 /**
800  * pci_sriov_get_totalvfs -- get total VFs supported on this devic3
801  * @dev: the PCI PF device
802  *
803  * For a PCIe device with SRIOV support, return the PCIe
804  * SRIOV capability value of TotalVFs or the value of driver_max_VFs
805  * if the driver reduced it.  Otherwise, -EINVAL.
806  */
807 int pci_sriov_get_totalvfs(struct pci_dev *dev)
808 {
809 	if (!dev->is_physfn)
810 		return -EINVAL;
811 
812 	if (dev->sriov->driver_max_VFs)
813 		return dev->sriov->driver_max_VFs;
814 
815 	return dev->sriov->total_VFs;
816 }
817 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
818