1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved 4 */ 5 6 #include <linux/device.h> 7 #include <linux/module.h> 8 #include <linux/mutex.h> 9 #include <linux/pci.h> 10 #include <linux/pm_runtime.h> 11 #include <linux/types.h> 12 #include <linux/uaccess.h> 13 #include <linux/vfio.h> 14 #include <linux/vfio_pci_core.h> 15 #include <linux/virtio_pci.h> 16 #include <linux/virtio_net.h> 17 #include <linux/virtio_pci_admin.h> 18 19 #include "common.h" 20 21 static int virtiovf_pci_open_device(struct vfio_device *core_vdev) 22 { 23 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, 24 struct virtiovf_pci_core_device, core_device.vdev); 25 struct vfio_pci_core_device *vdev = &virtvdev->core_device; 26 int ret; 27 28 ret = vfio_pci_core_enable(vdev); 29 if (ret) 30 return ret; 31 32 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY 33 ret = virtiovf_open_legacy_io(virtvdev); 34 if (ret) { 35 vfio_pci_core_disable(vdev); 36 return ret; 37 } 38 #endif 39 40 virtiovf_open_migration(virtvdev); 41 vfio_pci_core_finish_enable(vdev); 42 return 0; 43 } 44 45 static void virtiovf_pci_close_device(struct vfio_device *core_vdev) 46 { 47 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, 48 struct virtiovf_pci_core_device, core_device.vdev); 49 50 virtiovf_close_migration(virtvdev); 51 vfio_pci_core_close_device(core_vdev); 52 } 53 54 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY 55 static int virtiovf_pci_init_device(struct vfio_device *core_vdev) 56 { 57 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, 58 struct virtiovf_pci_core_device, core_device.vdev); 59 int ret; 60 61 ret = vfio_pci_core_init_dev(core_vdev); 62 if (ret) 63 return ret; 64 65 /* 66 * The vfio_device_ops.init() callback is set to virtiovf_pci_init_device() 67 * only when legacy I/O is supported. Now, let's initialize it. 68 */ 69 return virtiovf_init_legacy_io(virtvdev); 70 } 71 #endif 72 73 static void virtiovf_pci_core_release_dev(struct vfio_device *core_vdev) 74 { 75 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY 76 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, 77 struct virtiovf_pci_core_device, core_device.vdev); 78 79 virtiovf_release_legacy_io(virtvdev); 80 #endif 81 vfio_pci_core_release_dev(core_vdev); 82 } 83 84 static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = { 85 .name = "virtio-vfio-pci-lm", 86 .init = vfio_pci_core_init_dev, 87 .release = virtiovf_pci_core_release_dev, 88 .open_device = virtiovf_pci_open_device, 89 .close_device = virtiovf_pci_close_device, 90 .ioctl = vfio_pci_core_ioctl, 91 .device_feature = vfio_pci_core_ioctl_feature, 92 .read = vfio_pci_core_read, 93 .write = vfio_pci_core_write, 94 .mmap = vfio_pci_core_mmap, 95 .request = vfio_pci_core_request, 96 .match = vfio_pci_core_match, 97 .bind_iommufd = vfio_iommufd_physical_bind, 98 .unbind_iommufd = vfio_iommufd_physical_unbind, 99 .attach_ioas = vfio_iommufd_physical_attach_ioas, 100 .detach_ioas = vfio_iommufd_physical_detach_ioas, 101 }; 102 103 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY 104 static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = { 105 .name = "virtio-vfio-pci-trans-lm", 106 .init = virtiovf_pci_init_device, 107 .release = virtiovf_pci_core_release_dev, 108 .open_device = virtiovf_pci_open_device, 109 .close_device = virtiovf_pci_close_device, 110 .ioctl = virtiovf_vfio_pci_core_ioctl, 111 .device_feature = vfio_pci_core_ioctl_feature, 112 .read = virtiovf_pci_core_read, 113 .write = virtiovf_pci_core_write, 114 .mmap = vfio_pci_core_mmap, 115 .request = vfio_pci_core_request, 116 .match = vfio_pci_core_match, 117 .bind_iommufd = vfio_iommufd_physical_bind, 118 .unbind_iommufd = vfio_iommufd_physical_unbind, 119 .attach_ioas = vfio_iommufd_physical_attach_ioas, 120 .detach_ioas = vfio_iommufd_physical_detach_ioas, 121 }; 122 #endif 123 124 static const struct vfio_device_ops virtiovf_vfio_pci_ops = { 125 .name = "virtio-vfio-pci", 126 .init = vfio_pci_core_init_dev, 127 .release = vfio_pci_core_release_dev, 128 .open_device = virtiovf_pci_open_device, 129 .close_device = vfio_pci_core_close_device, 130 .ioctl = vfio_pci_core_ioctl, 131 .device_feature = vfio_pci_core_ioctl_feature, 132 .read = vfio_pci_core_read, 133 .write = vfio_pci_core_write, 134 .mmap = vfio_pci_core_mmap, 135 .request = vfio_pci_core_request, 136 .match = vfio_pci_core_match, 137 .bind_iommufd = vfio_iommufd_physical_bind, 138 .unbind_iommufd = vfio_iommufd_physical_unbind, 139 .attach_ioas = vfio_iommufd_physical_attach_ioas, 140 .detach_ioas = vfio_iommufd_physical_detach_ioas, 141 }; 142 143 static int virtiovf_pci_probe(struct pci_dev *pdev, 144 const struct pci_device_id *id) 145 { 146 const struct vfio_device_ops *ops = &virtiovf_vfio_pci_ops; 147 struct virtiovf_pci_core_device *virtvdev; 148 bool sup_legacy_io = false; 149 bool sup_lm = false; 150 int ret; 151 152 if (pdev->is_virtfn) { 153 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY 154 sup_legacy_io = virtiovf_support_legacy_io(pdev); 155 if (sup_legacy_io) 156 ops = &virtiovf_vfio_pci_tran_lm_ops; 157 #endif 158 sup_lm = virtio_pci_admin_has_dev_parts(pdev); 159 if (sup_lm && !sup_legacy_io) 160 ops = &virtiovf_vfio_pci_lm_ops; 161 } 162 163 virtvdev = vfio_alloc_device(virtiovf_pci_core_device, core_device.vdev, 164 &pdev->dev, ops); 165 if (IS_ERR(virtvdev)) 166 return PTR_ERR(virtvdev); 167 168 if (sup_lm) 169 virtiovf_set_migratable(virtvdev); 170 171 dev_set_drvdata(&pdev->dev, &virtvdev->core_device); 172 ret = vfio_pci_core_register_device(&virtvdev->core_device); 173 if (ret) 174 goto out; 175 return 0; 176 out: 177 vfio_put_device(&virtvdev->core_device.vdev); 178 return ret; 179 } 180 181 static void virtiovf_pci_remove(struct pci_dev *pdev) 182 { 183 struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); 184 185 vfio_pci_core_unregister_device(&virtvdev->core_device); 186 vfio_put_device(&virtvdev->core_device.vdev); 187 } 188 189 static const struct pci_device_id virtiovf_pci_table[] = { 190 /* Only virtio-net is supported/tested so far */ 191 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1041) }, 192 {} 193 }; 194 195 MODULE_DEVICE_TABLE(pci, virtiovf_pci_table); 196 197 static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev) 198 { 199 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY 200 virtiovf_legacy_io_reset_done(pdev); 201 #endif 202 virtiovf_migration_reset_done(pdev); 203 } 204 205 static const struct pci_error_handlers virtiovf_err_handlers = { 206 .reset_done = virtiovf_pci_aer_reset_done, 207 .error_detected = vfio_pci_core_aer_err_detected, 208 }; 209 210 static struct pci_driver virtiovf_pci_driver = { 211 .name = KBUILD_MODNAME, 212 .id_table = virtiovf_pci_table, 213 .probe = virtiovf_pci_probe, 214 .remove = virtiovf_pci_remove, 215 .err_handler = &virtiovf_err_handlers, 216 .driver_managed_dma = true, 217 }; 218 219 module_pci_driver(virtiovf_pci_driver); 220 221 MODULE_LICENSE("GPL"); 222 MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>"); 223 MODULE_DESCRIPTION( 224 "VIRTIO VFIO PCI - User Level meta-driver for VIRTIO NET devices"); 225