1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio PCI driver - common functionality for all device versions 4 * 5 * This module allows virtio devices to be used over a virtual PCI device. 6 * This can be used with QEMU based VMMs like KVM or Xen. 7 * 8 * Copyright IBM Corp. 2007 9 * Copyright Red Hat, Inc. 2014 10 * 11 * Authors: 12 * Anthony Liguori <aliguori@us.ibm.com> 13 * Rusty Russell <rusty@rustcorp.com.au> 14 * Michael S. Tsirkin <mst@redhat.com> 15 */ 16 17 #include "virtio_pci_common.h" 18 19 static bool force_legacy = false; 20 21 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) 22 module_param(force_legacy, bool, 0444); 23 MODULE_PARM_DESC(force_legacy, 24 "Force legacy mode for transitional virtio 1 devices"); 25 #endif 26 27 /* wait for pending irq handlers */ 28 void vp_synchronize_vectors(struct virtio_device *vdev) 29 { 30 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 31 int i; 32 33 if (vp_dev->intx_enabled) 34 synchronize_irq(vp_dev->pci_dev->irq); 35 36 for (i = 0; i < vp_dev->msix_vectors; ++i) 37 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 38 } 39 40 /* the notify function used when creating a virt queue */ 41 bool vp_notify(struct virtqueue *vq) 42 { 43 /* we write the queue's selector into the notification register to 44 * signal the other end */ 45 iowrite16(vq->index, (void __iomem *)vq->priv); 46 return true; 47 } 48 49 /* Handle a configuration change: Tell driver if it wants to know. */ 50 static irqreturn_t vp_config_changed(int irq, void *opaque) 51 { 52 struct virtio_pci_device *vp_dev = opaque; 53 54 virtio_config_changed(&vp_dev->vdev); 55 return IRQ_HANDLED; 56 } 57 58 /* Notify all virtqueues on an interrupt. */ 59 static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 60 { 61 struct virtio_pci_device *vp_dev = opaque; 62 struct virtio_pci_vq_info *info; 63 irqreturn_t ret = IRQ_NONE; 64 unsigned long flags; 65 66 spin_lock_irqsave(&vp_dev->lock, flags); 67 list_for_each_entry(info, &vp_dev->virtqueues, node) { 68 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) 69 ret = IRQ_HANDLED; 70 } 71 spin_unlock_irqrestore(&vp_dev->lock, flags); 72 73 return ret; 74 } 75 76 /* A small wrapper to also acknowledge the interrupt when it's handled. 77 * I really need an EIO hook for the vring so I can ack the interrupt once we 78 * know that we'll be handling the IRQ but before we invoke the callback since 79 * the callback may notify the host which results in the host attempting to 80 * raise an interrupt that we would then mask once we acknowledged the 81 * interrupt. */ 82 static irqreturn_t vp_interrupt(int irq, void *opaque) 83 { 84 struct virtio_pci_device *vp_dev = opaque; 85 u8 isr; 86 87 /* reading the ISR has the effect of also clearing it so it's very 88 * important to save off the value. */ 89 isr = ioread8(vp_dev->isr); 90 91 /* It's definitely not us if the ISR was not high */ 92 if (!isr) 93 return IRQ_NONE; 94 95 /* Configuration change? Tell driver if it wants to know. */ 96 if (isr & VIRTIO_PCI_ISR_CONFIG) 97 vp_config_changed(irq, opaque); 98 99 return vp_vring_interrupt(irq, opaque); 100 } 101 102 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, 103 bool per_vq_vectors, struct irq_affinity *desc) 104 { 105 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 106 const char *name = dev_name(&vp_dev->vdev.dev); 107 unsigned int flags = PCI_IRQ_MSIX; 108 unsigned int i, v; 109 int err = -ENOMEM; 110 111 vp_dev->msix_vectors = nvectors; 112 113 vp_dev->msix_names = kmalloc_array(nvectors, 114 sizeof(*vp_dev->msix_names), 115 GFP_KERNEL); 116 if (!vp_dev->msix_names) 117 goto error; 118 vp_dev->msix_affinity_masks 119 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks), 120 GFP_KERNEL); 121 if (!vp_dev->msix_affinity_masks) 122 goto error; 123 for (i = 0; i < nvectors; ++i) 124 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 125 GFP_KERNEL)) 126 goto error; 127 128 if (desc) { 129 flags |= PCI_IRQ_AFFINITY; 130 desc->pre_vectors++; /* virtio config vector */ 131 } 132 133 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, 134 nvectors, flags, desc); 135 if (err < 0) 136 goto error; 137 vp_dev->msix_enabled = 1; 138 139 /* Set the vector used for configuration */ 140 v = vp_dev->msix_used_vectors; 141 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, 142 "%s-config", name); 143 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), 144 vp_config_changed, 0, vp_dev->msix_names[v], 145 vp_dev); 146 if (err) 147 goto error; 148 ++vp_dev->msix_used_vectors; 149 150 v = vp_dev->config_vector(vp_dev, v); 151 /* Verify we had enough resources to assign the vector */ 152 if (v == VIRTIO_MSI_NO_VECTOR) { 153 err = -EBUSY; 154 goto error; 155 } 156 157 if (!per_vq_vectors) { 158 /* Shared vector for all VQs */ 159 v = vp_dev->msix_used_vectors; 160 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, 161 "%s-virtqueues", name); 162 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), 163 vp_vring_interrupt, 0, vp_dev->msix_names[v], 164 vp_dev); 165 if (err) 166 goto error; 167 ++vp_dev->msix_used_vectors; 168 } 169 return 0; 170 error: 171 return err; 172 } 173 174 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index, 175 void (*callback)(struct virtqueue *vq), 176 const char *name, 177 bool ctx, 178 u16 msix_vec) 179 { 180 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 181 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); 182 struct virtqueue *vq; 183 unsigned long flags; 184 185 /* fill out our structure that represents an active queue */ 186 if (!info) 187 return ERR_PTR(-ENOMEM); 188 189 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, 190 msix_vec); 191 if (IS_ERR(vq)) 192 goto out_info; 193 194 info->vq = vq; 195 if (callback) { 196 spin_lock_irqsave(&vp_dev->lock, flags); 197 list_add(&info->node, &vp_dev->virtqueues); 198 spin_unlock_irqrestore(&vp_dev->lock, flags); 199 } else { 200 INIT_LIST_HEAD(&info->node); 201 } 202 203 vp_dev->vqs[index] = info; 204 return vq; 205 206 out_info: 207 kfree(info); 208 return vq; 209 } 210 211 static void vp_del_vq(struct virtqueue *vq) 212 { 213 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; 215 unsigned long flags; 216 217 /* 218 * If it fails during re-enable reset vq. This way we won't rejoin 219 * info->node to the queue. Prevent unexpected irqs. 220 */ 221 if (!vq->reset) { 222 spin_lock_irqsave(&vp_dev->lock, flags); 223 list_del(&info->node); 224 spin_unlock_irqrestore(&vp_dev->lock, flags); 225 } 226 227 vp_dev->del_vq(info); 228 kfree(info); 229 } 230 231 /* the config->del_vqs() implementation */ 232 void vp_del_vqs(struct virtio_device *vdev) 233 { 234 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 235 struct virtqueue *vq, *n; 236 int i; 237 238 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 239 if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index)) 240 continue; 241 242 if (vp_dev->per_vq_vectors) { 243 int v = vp_dev->vqs[vq->index]->msix_vector; 244 245 if (v != VIRTIO_MSI_NO_VECTOR) { 246 int irq = pci_irq_vector(vp_dev->pci_dev, v); 247 248 irq_update_affinity_hint(irq, NULL); 249 free_irq(irq, vq); 250 } 251 } 252 vp_del_vq(vq); 253 } 254 vp_dev->per_vq_vectors = false; 255 256 if (vp_dev->intx_enabled) { 257 free_irq(vp_dev->pci_dev->irq, vp_dev); 258 vp_dev->intx_enabled = 0; 259 } 260 261 for (i = 0; i < vp_dev->msix_used_vectors; ++i) 262 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); 263 264 if (vp_dev->msix_affinity_masks) { 265 for (i = 0; i < vp_dev->msix_vectors; i++) 266 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 267 } 268 269 if (vp_dev->msix_enabled) { 270 /* Disable the vector used for configuration */ 271 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 272 273 pci_free_irq_vectors(vp_dev->pci_dev); 274 vp_dev->msix_enabled = 0; 275 } 276 277 vp_dev->msix_vectors = 0; 278 vp_dev->msix_used_vectors = 0; 279 kfree(vp_dev->msix_names); 280 vp_dev->msix_names = NULL; 281 kfree(vp_dev->msix_affinity_masks); 282 vp_dev->msix_affinity_masks = NULL; 283 kfree(vp_dev->vqs); 284 vp_dev->vqs = NULL; 285 } 286 287 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, 288 struct virtqueue *vqs[], 289 struct virtqueue_info vqs_info[], 290 bool per_vq_vectors, 291 struct irq_affinity *desc) 292 { 293 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 294 struct virtqueue_info *vqi; 295 u16 msix_vec; 296 int i, err, nvectors, allocated_vectors, queue_idx = 0; 297 298 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 299 if (!vp_dev->vqs) 300 return -ENOMEM; 301 302 if (per_vq_vectors) { 303 /* Best option: one for change interrupt, one per vq. */ 304 nvectors = 1; 305 for (i = 0; i < nvqs; ++i) { 306 vqi = &vqs_info[i]; 307 if (vqi->name && vqi->callback) 308 ++nvectors; 309 } 310 } else { 311 /* Second best: one for change, shared for all vqs. */ 312 nvectors = 2; 313 } 314 315 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, 316 per_vq_vectors ? desc : NULL); 317 if (err) 318 goto error_find; 319 320 vp_dev->per_vq_vectors = per_vq_vectors; 321 allocated_vectors = vp_dev->msix_used_vectors; 322 for (i = 0; i < nvqs; ++i) { 323 vqi = &vqs_info[i]; 324 if (!vqi->name) { 325 vqs[i] = NULL; 326 continue; 327 } 328 329 if (!vqi->callback) 330 msix_vec = VIRTIO_MSI_NO_VECTOR; 331 else if (vp_dev->per_vq_vectors) 332 msix_vec = allocated_vectors++; 333 else 334 msix_vec = VP_MSIX_VQ_VECTOR; 335 vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback, 336 vqi->name, vqi->ctx, msix_vec); 337 if (IS_ERR(vqs[i])) { 338 err = PTR_ERR(vqs[i]); 339 goto error_find; 340 } 341 342 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) 343 continue; 344 345 /* allocate per-vq irq if available and necessary */ 346 snprintf(vp_dev->msix_names[msix_vec], 347 sizeof *vp_dev->msix_names, 348 "%s-%s", 349 dev_name(&vp_dev->vdev.dev), vqi->name); 350 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 351 vring_interrupt, 0, 352 vp_dev->msix_names[msix_vec], 353 vqs[i]); 354 if (err) { 355 vp_del_vq(vqs[i]); 356 goto error_find; 357 } 358 } 359 return 0; 360 361 error_find: 362 vp_del_vqs(vdev); 363 return err; 364 } 365 366 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, 367 struct virtqueue *vqs[], 368 struct virtqueue_info vqs_info[]) 369 { 370 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 371 int i, err, queue_idx = 0; 372 373 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 374 if (!vp_dev->vqs) 375 return -ENOMEM; 376 377 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 378 dev_name(&vdev->dev), vp_dev); 379 if (err) 380 goto out_del_vqs; 381 382 vp_dev->intx_enabled = 1; 383 vp_dev->per_vq_vectors = false; 384 for (i = 0; i < nvqs; ++i) { 385 struct virtqueue_info *vqi = &vqs_info[i]; 386 387 if (!vqi->name) { 388 vqs[i] = NULL; 389 continue; 390 } 391 vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback, 392 vqi->name, vqi->ctx, 393 VIRTIO_MSI_NO_VECTOR); 394 if (IS_ERR(vqs[i])) { 395 err = PTR_ERR(vqs[i]); 396 goto out_del_vqs; 397 } 398 } 399 400 return 0; 401 out_del_vqs: 402 vp_del_vqs(vdev); 403 return err; 404 } 405 406 /* the config->find_vqs() implementation */ 407 int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, 408 struct virtqueue *vqs[], struct virtqueue_info vqs_info[], 409 struct irq_affinity *desc) 410 { 411 int err; 412 413 /* Try MSI-X with one vector per queue. */ 414 err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, true, desc); 415 if (!err) 416 return 0; 417 /* Fallback: MSI-X with one vector for config, one shared for queues. */ 418 err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, false, desc); 419 if (!err) 420 return 0; 421 /* Is there an interrupt? If not give up. */ 422 if (!(to_vp_device(vdev)->pci_dev->irq)) 423 return err; 424 /* Finally fall back to regular interrupts. */ 425 return vp_find_vqs_intx(vdev, nvqs, vqs, vqs_info); 426 } 427 428 const char *vp_bus_name(struct virtio_device *vdev) 429 { 430 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 431 432 return pci_name(vp_dev->pci_dev); 433 } 434 435 /* Setup the affinity for a virtqueue: 436 * - force the affinity for per vq vector 437 * - OR over all affinities for shared MSI 438 * - ignore the affinity request if we're using INTX 439 */ 440 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) 441 { 442 struct virtio_device *vdev = vq->vdev; 443 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 444 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; 445 struct cpumask *mask; 446 unsigned int irq; 447 448 if (!vq->callback) 449 return -EINVAL; 450 451 if (vp_dev->msix_enabled) { 452 mask = vp_dev->msix_affinity_masks[info->msix_vector]; 453 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); 454 if (!cpu_mask) 455 irq_update_affinity_hint(irq, NULL); 456 else { 457 cpumask_copy(mask, cpu_mask); 458 irq_set_affinity_and_hint(irq, mask); 459 } 460 } 461 return 0; 462 } 463 464 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 465 { 466 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 467 468 if (!vp_dev->per_vq_vectors || 469 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR) 470 return NULL; 471 472 return pci_irq_get_affinity(vp_dev->pci_dev, 473 vp_dev->vqs[index]->msix_vector); 474 } 475 476 #ifdef CONFIG_PM_SLEEP 477 static int virtio_pci_freeze(struct device *dev) 478 { 479 struct pci_dev *pci_dev = to_pci_dev(dev); 480 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 481 int ret; 482 483 ret = virtio_device_freeze(&vp_dev->vdev); 484 485 if (!ret) 486 pci_disable_device(pci_dev); 487 return ret; 488 } 489 490 static int virtio_pci_restore(struct device *dev) 491 { 492 struct pci_dev *pci_dev = to_pci_dev(dev); 493 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 494 int ret; 495 496 ret = pci_enable_device(pci_dev); 497 if (ret) 498 return ret; 499 500 pci_set_master(pci_dev); 501 return virtio_device_restore(&vp_dev->vdev); 502 } 503 504 static bool vp_supports_pm_no_reset(struct device *dev) 505 { 506 struct pci_dev *pci_dev = to_pci_dev(dev); 507 u16 pmcsr; 508 509 if (!pci_dev->pm_cap) 510 return false; 511 512 pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr); 513 if (PCI_POSSIBLE_ERROR(pmcsr)) { 514 dev_err(dev, "Unable to query pmcsr"); 515 return false; 516 } 517 518 return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET; 519 } 520 521 static int virtio_pci_suspend(struct device *dev) 522 { 523 return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev); 524 } 525 526 static int virtio_pci_resume(struct device *dev) 527 { 528 return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev); 529 } 530 531 static const struct dev_pm_ops virtio_pci_pm_ops = { 532 .suspend = virtio_pci_suspend, 533 .resume = virtio_pci_resume, 534 .freeze = virtio_pci_freeze, 535 .thaw = virtio_pci_restore, 536 .poweroff = virtio_pci_freeze, 537 .restore = virtio_pci_restore, 538 }; 539 #endif 540 541 542 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ 543 static const struct pci_device_id virtio_pci_id_table[] = { 544 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) }, 545 { 0 } 546 }; 547 548 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); 549 550 static void virtio_pci_release_dev(struct device *_d) 551 { 552 struct virtio_device *vdev = dev_to_virtio(_d); 553 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 554 555 /* As struct device is a kobject, it's not safe to 556 * free the memory (including the reference counter itself) 557 * until it's release callback. */ 558 kfree(vp_dev); 559 } 560 561 static int virtio_pci_probe(struct pci_dev *pci_dev, 562 const struct pci_device_id *id) 563 { 564 struct virtio_pci_device *vp_dev, *reg_dev = NULL; 565 int rc; 566 567 /* allocate our structure and fill it out */ 568 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); 569 if (!vp_dev) 570 return -ENOMEM; 571 572 pci_set_drvdata(pci_dev, vp_dev); 573 vp_dev->vdev.dev.parent = &pci_dev->dev; 574 vp_dev->vdev.dev.release = virtio_pci_release_dev; 575 vp_dev->pci_dev = pci_dev; 576 INIT_LIST_HEAD(&vp_dev->virtqueues); 577 spin_lock_init(&vp_dev->lock); 578 579 /* enable the device */ 580 rc = pci_enable_device(pci_dev); 581 if (rc) 582 goto err_enable_device; 583 584 if (force_legacy) { 585 rc = virtio_pci_legacy_probe(vp_dev); 586 /* Also try modern mode if we can't map BAR0 (no IO space). */ 587 if (rc == -ENODEV || rc == -ENOMEM) 588 rc = virtio_pci_modern_probe(vp_dev); 589 if (rc) 590 goto err_probe; 591 } else { 592 rc = virtio_pci_modern_probe(vp_dev); 593 if (rc == -ENODEV) 594 rc = virtio_pci_legacy_probe(vp_dev); 595 if (rc) 596 goto err_probe; 597 } 598 599 pci_set_master(pci_dev); 600 601 rc = register_virtio_device(&vp_dev->vdev); 602 reg_dev = vp_dev; 603 if (rc) 604 goto err_register; 605 606 return 0; 607 608 err_register: 609 if (vp_dev->is_legacy) 610 virtio_pci_legacy_remove(vp_dev); 611 else 612 virtio_pci_modern_remove(vp_dev); 613 err_probe: 614 pci_disable_device(pci_dev); 615 err_enable_device: 616 if (reg_dev) 617 put_device(&vp_dev->vdev.dev); 618 else 619 kfree(vp_dev); 620 return rc; 621 } 622 623 static void virtio_pci_remove(struct pci_dev *pci_dev) 624 { 625 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 626 struct device *dev = get_device(&vp_dev->vdev.dev); 627 628 /* 629 * Device is marked broken on surprise removal so that virtio upper 630 * layers can abort any ongoing operation. 631 */ 632 if (!pci_device_is_present(pci_dev)) 633 virtio_break_device(&vp_dev->vdev); 634 635 pci_disable_sriov(pci_dev); 636 637 unregister_virtio_device(&vp_dev->vdev); 638 639 if (vp_dev->is_legacy) 640 virtio_pci_legacy_remove(vp_dev); 641 else 642 virtio_pci_modern_remove(vp_dev); 643 644 pci_disable_device(pci_dev); 645 put_device(dev); 646 } 647 648 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs) 649 { 650 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 651 struct virtio_device *vdev = &vp_dev->vdev; 652 int ret; 653 654 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) 655 return -EBUSY; 656 657 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV)) 658 return -EINVAL; 659 660 if (pci_vfs_assigned(pci_dev)) 661 return -EPERM; 662 663 if (num_vfs == 0) { 664 pci_disable_sriov(pci_dev); 665 return 0; 666 } 667 668 ret = pci_enable_sriov(pci_dev, num_vfs); 669 if (ret < 0) 670 return ret; 671 672 return num_vfs; 673 } 674 675 static struct pci_driver virtio_pci_driver = { 676 .name = "virtio-pci", 677 .id_table = virtio_pci_id_table, 678 .probe = virtio_pci_probe, 679 .remove = virtio_pci_remove, 680 #ifdef CONFIG_PM_SLEEP 681 .driver.pm = &virtio_pci_pm_ops, 682 #endif 683 .sriov_configure = virtio_pci_sriov_configure, 684 }; 685 686 struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev) 687 { 688 struct virtio_pci_device *pf_vp_dev; 689 690 pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver); 691 if (IS_ERR(pf_vp_dev)) 692 return NULL; 693 694 return &pf_vp_dev->vdev; 695 } 696 697 module_pci_driver(virtio_pci_driver); 698 699 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); 700 MODULE_DESCRIPTION("virtio-pci"); 701 MODULE_LICENSE("GPL"); 702 MODULE_VERSION("1"); 703