1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4 */
5
6 #include <linux/device.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/pci.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/types.h>
12 #include <linux/uaccess.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/virtio_pci.h>
16 #include <linux/virtio_net.h>
17 #include <linux/virtio_pci_admin.h>
18
19 #include "common.h"
20
virtiovf_pci_open_device(struct vfio_device * core_vdev)21 static int virtiovf_pci_open_device(struct vfio_device *core_vdev)
22 {
23 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev,
24 struct virtiovf_pci_core_device, core_device.vdev);
25 struct vfio_pci_core_device *vdev = &virtvdev->core_device;
26 int ret;
27
28 ret = vfio_pci_core_enable(vdev);
29 if (ret)
30 return ret;
31
32 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
33 ret = virtiovf_open_legacy_io(virtvdev);
34 if (ret) {
35 vfio_pci_core_disable(vdev);
36 return ret;
37 }
38 #endif
39
40 virtiovf_open_migration(virtvdev);
41 vfio_pci_core_finish_enable(vdev);
42 return 0;
43 }
44
virtiovf_pci_close_device(struct vfio_device * core_vdev)45 static void virtiovf_pci_close_device(struct vfio_device *core_vdev)
46 {
47 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev,
48 struct virtiovf_pci_core_device, core_device.vdev);
49
50 virtiovf_close_migration(virtvdev);
51 vfio_pci_core_close_device(core_vdev);
52 }
53
54 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
virtiovf_pci_init_device(struct vfio_device * core_vdev)55 static int virtiovf_pci_init_device(struct vfio_device *core_vdev)
56 {
57 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev,
58 struct virtiovf_pci_core_device, core_device.vdev);
59 int ret;
60
61 ret = vfio_pci_core_init_dev(core_vdev);
62 if (ret)
63 return ret;
64
65 /*
66 * The vfio_device_ops.init() callback is set to virtiovf_pci_init_device()
67 * only when legacy I/O is supported. Now, let's initialize it.
68 */
69 return virtiovf_init_legacy_io(virtvdev);
70 }
71 #endif
72
virtiovf_pci_core_release_dev(struct vfio_device * core_vdev)73 static void virtiovf_pci_core_release_dev(struct vfio_device *core_vdev)
74 {
75 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
76 struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev,
77 struct virtiovf_pci_core_device, core_device.vdev);
78
79 virtiovf_release_legacy_io(virtvdev);
80 #endif
81 vfio_pci_core_release_dev(core_vdev);
82 }
83
84 static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = {
85 .name = "virtio-vfio-pci-lm",
86 .init = vfio_pci_core_init_dev,
87 .release = virtiovf_pci_core_release_dev,
88 .open_device = virtiovf_pci_open_device,
89 .close_device = virtiovf_pci_close_device,
90 .ioctl = vfio_pci_core_ioctl,
91 .device_feature = vfio_pci_core_ioctl_feature,
92 .read = vfio_pci_core_read,
93 .write = vfio_pci_core_write,
94 .mmap = vfio_pci_core_mmap,
95 .request = vfio_pci_core_request,
96 .match = vfio_pci_core_match,
97 .match_token_uuid = vfio_pci_core_match_token_uuid,
98 .bind_iommufd = vfio_iommufd_physical_bind,
99 .unbind_iommufd = vfio_iommufd_physical_unbind,
100 .attach_ioas = vfio_iommufd_physical_attach_ioas,
101 .detach_ioas = vfio_iommufd_physical_detach_ioas,
102 };
103
104 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
105 static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = {
106 .name = "virtio-vfio-pci-trans-lm",
107 .init = virtiovf_pci_init_device,
108 .release = virtiovf_pci_core_release_dev,
109 .open_device = virtiovf_pci_open_device,
110 .close_device = virtiovf_pci_close_device,
111 .ioctl = virtiovf_vfio_pci_core_ioctl,
112 .device_feature = vfio_pci_core_ioctl_feature,
113 .read = virtiovf_pci_core_read,
114 .write = virtiovf_pci_core_write,
115 .mmap = vfio_pci_core_mmap,
116 .request = vfio_pci_core_request,
117 .match = vfio_pci_core_match,
118 .match_token_uuid = vfio_pci_core_match_token_uuid,
119 .bind_iommufd = vfio_iommufd_physical_bind,
120 .unbind_iommufd = vfio_iommufd_physical_unbind,
121 .attach_ioas = vfio_iommufd_physical_attach_ioas,
122 .detach_ioas = vfio_iommufd_physical_detach_ioas,
123 };
124 #endif
125
126 static const struct vfio_device_ops virtiovf_vfio_pci_ops = {
127 .name = "virtio-vfio-pci",
128 .init = vfio_pci_core_init_dev,
129 .release = vfio_pci_core_release_dev,
130 .open_device = virtiovf_pci_open_device,
131 .close_device = vfio_pci_core_close_device,
132 .ioctl = vfio_pci_core_ioctl,
133 .device_feature = vfio_pci_core_ioctl_feature,
134 .read = vfio_pci_core_read,
135 .write = vfio_pci_core_write,
136 .mmap = vfio_pci_core_mmap,
137 .request = vfio_pci_core_request,
138 .match = vfio_pci_core_match,
139 .match_token_uuid = vfio_pci_core_match_token_uuid,
140 .bind_iommufd = vfio_iommufd_physical_bind,
141 .unbind_iommufd = vfio_iommufd_physical_unbind,
142 .attach_ioas = vfio_iommufd_physical_attach_ioas,
143 .detach_ioas = vfio_iommufd_physical_detach_ioas,
144 };
145
virtiovf_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)146 static int virtiovf_pci_probe(struct pci_dev *pdev,
147 const struct pci_device_id *id)
148 {
149 const struct vfio_device_ops *ops = &virtiovf_vfio_pci_ops;
150 struct virtiovf_pci_core_device *virtvdev;
151 bool sup_legacy_io = false;
152 bool sup_lm = false;
153 int ret;
154
155 if (pdev->is_virtfn) {
156 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
157 sup_legacy_io = virtiovf_support_legacy_io(pdev);
158 if (sup_legacy_io)
159 ops = &virtiovf_vfio_pci_tran_lm_ops;
160 #endif
161 sup_lm = virtio_pci_admin_has_dev_parts(pdev);
162 if (sup_lm && !sup_legacy_io)
163 ops = &virtiovf_vfio_pci_lm_ops;
164 }
165
166 virtvdev = vfio_alloc_device(virtiovf_pci_core_device, core_device.vdev,
167 &pdev->dev, ops);
168 if (IS_ERR(virtvdev))
169 return PTR_ERR(virtvdev);
170
171 if (sup_lm)
172 virtiovf_set_migratable(virtvdev);
173
174 dev_set_drvdata(&pdev->dev, &virtvdev->core_device);
175 ret = vfio_pci_core_register_device(&virtvdev->core_device);
176 if (ret)
177 goto out;
178 return 0;
179 out:
180 vfio_put_device(&virtvdev->core_device.vdev);
181 return ret;
182 }
183
virtiovf_pci_remove(struct pci_dev * pdev)184 static void virtiovf_pci_remove(struct pci_dev *pdev)
185 {
186 struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
187
188 vfio_pci_core_unregister_device(&virtvdev->core_device);
189 vfio_put_device(&virtvdev->core_device.vdev);
190 }
191
192 static const struct pci_device_id virtiovf_pci_table[] = {
193 /* Only virtio-net and virtio-block are supported/tested so far */
194 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1041) },
195 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1042) },
196 {}
197 };
198
199 MODULE_DEVICE_TABLE(pci, virtiovf_pci_table);
200
virtiovf_pci_aer_reset_done(struct pci_dev * pdev)201 static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev)
202 {
203 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
204 virtiovf_legacy_io_reset_done(pdev);
205 #endif
206 virtiovf_migration_reset_done(pdev);
207 }
208
209 static const struct pci_error_handlers virtiovf_err_handlers = {
210 .reset_done = virtiovf_pci_aer_reset_done,
211 .error_detected = vfio_pci_core_aer_err_detected,
212 };
213
214 static struct pci_driver virtiovf_pci_driver = {
215 .name = KBUILD_MODNAME,
216 .id_table = virtiovf_pci_table,
217 .probe = virtiovf_pci_probe,
218 .remove = virtiovf_pci_remove,
219 .err_handler = &virtiovf_err_handlers,
220 .driver_managed_dma = true,
221 };
222
223 module_pci_driver(virtiovf_pci_driver);
224
225 MODULE_LICENSE("GPL");
226 MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
227 MODULE_DESCRIPTION(
228 "VIRTIO VFIO PCI - User Level meta-driver for VIRTIO NET and BLOCK devices");
229