1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4 */
5
6 #include <linux/vfio.h>
7 #include <linux/cdx/cdx_bus.h>
8
9 #include "private.h"
10
vfio_cdx_open_device(struct vfio_device * core_vdev)11 static int vfio_cdx_open_device(struct vfio_device *core_vdev)
12 {
13 struct vfio_cdx_device *vdev =
14 container_of(core_vdev, struct vfio_cdx_device, vdev);
15 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16 int count = cdx_dev->res_count;
17 int i, ret;
18
19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
20 GFP_KERNEL_ACCOUNT);
21 if (!vdev->regions)
22 return -ENOMEM;
23
24 for (i = 0; i < count; i++) {
25 struct resource *res = &cdx_dev->res[i];
26
27 vdev->regions[i].addr = res->start;
28 vdev->regions[i].size = resource_size(res);
29 vdev->regions[i].type = res->flags;
30 /*
31 * Only regions addressed with PAGE granularity may be
32 * MMAP'ed securely.
33 */
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35 !(vdev->regions[i].size & ~PAGE_MASK))
36 vdev->regions[i].flags |=
37 VFIO_REGION_INFO_FLAG_MMAP;
38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39 if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
41 }
42 ret = cdx_dev_reset(core_vdev->dev);
43 if (ret) {
44 kfree(vdev->regions);
45 vdev->regions = NULL;
46 return ret;
47 }
48 ret = cdx_clear_master(cdx_dev);
49 if (ret)
50 vdev->flags &= ~BME_SUPPORT;
51 else
52 vdev->flags |= BME_SUPPORT;
53
54 return 0;
55 }
56
vfio_cdx_close_device(struct vfio_device * core_vdev)57 static void vfio_cdx_close_device(struct vfio_device *core_vdev)
58 {
59 struct vfio_cdx_device *vdev =
60 container_of(core_vdev, struct vfio_cdx_device, vdev);
61
62 kfree(vdev->regions);
63 cdx_dev_reset(core_vdev->dev);
64 vfio_cdx_irqs_cleanup(vdev);
65 }
66
vfio_cdx_bm_ctrl(struct vfio_device * core_vdev,u32 flags,void __user * arg,size_t argsz)67 static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags,
68 void __user *arg, size_t argsz)
69 {
70 size_t minsz =
71 offsetofend(struct vfio_device_feature_bus_master, op);
72 struct vfio_cdx_device *vdev =
73 container_of(core_vdev, struct vfio_cdx_device, vdev);
74 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
75 struct vfio_device_feature_bus_master ops;
76 int ret;
77
78 if (!(vdev->flags & BME_SUPPORT))
79 return -ENOTTY;
80
81 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
82 sizeof(ops));
83 if (ret != 1)
84 return ret;
85
86 if (copy_from_user(&ops, arg, minsz))
87 return -EFAULT;
88
89 switch (ops.op) {
90 case VFIO_DEVICE_FEATURE_CLEAR_MASTER:
91 return cdx_clear_master(cdx_dev);
92 case VFIO_DEVICE_FEATURE_SET_MASTER:
93 return cdx_set_master(cdx_dev);
94 default:
95 return -EINVAL;
96 }
97 }
98
vfio_cdx_ioctl_feature(struct vfio_device * device,u32 flags,void __user * arg,size_t argsz)99 static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags,
100 void __user *arg, size_t argsz)
101 {
102 switch (flags & VFIO_DEVICE_FEATURE_MASK) {
103 case VFIO_DEVICE_FEATURE_BUS_MASTER:
104 return vfio_cdx_bm_ctrl(device, flags, arg, argsz);
105 default:
106 return -ENOTTY;
107 }
108 }
109
vfio_cdx_ioctl_get_info(struct vfio_cdx_device * vdev,struct vfio_device_info __user * arg)110 static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
111 struct vfio_device_info __user *arg)
112 {
113 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
114 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
115 struct vfio_device_info info;
116
117 if (copy_from_user(&info, arg, minsz))
118 return -EFAULT;
119
120 if (info.argsz < minsz)
121 return -EINVAL;
122
123 info.flags = VFIO_DEVICE_FLAGS_CDX;
124 info.flags |= VFIO_DEVICE_FLAGS_RESET;
125
126 info.num_regions = cdx_dev->res_count;
127 info.num_irqs = cdx_dev->num_msi ? 1 : 0;
128
129 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
130 }
131
vfio_cdx_ioctl_get_region_info(struct vfio_device * core_vdev,struct vfio_region_info * info,struct vfio_info_cap * caps)132 static int vfio_cdx_ioctl_get_region_info(struct vfio_device *core_vdev,
133 struct vfio_region_info *info,
134 struct vfio_info_cap *caps)
135 {
136 struct vfio_cdx_device *vdev =
137 container_of(core_vdev, struct vfio_cdx_device, vdev);
138 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
139
140 if (info->index >= cdx_dev->res_count)
141 return -EINVAL;
142
143 /* map offset to the physical address */
144 info->offset = vfio_cdx_index_to_offset(info->index);
145 info->size = vdev->regions[info->index].size;
146 info->flags = vdev->regions[info->index].flags;
147 return 0;
148 }
149
vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device * vdev,struct vfio_irq_info __user * arg)150 static int vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device *vdev,
151 struct vfio_irq_info __user *arg)
152 {
153 unsigned long minsz = offsetofend(struct vfio_irq_info, count);
154 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
155 struct vfio_irq_info info;
156
157 if (copy_from_user(&info, arg, minsz))
158 return -EFAULT;
159
160 if (info.argsz < minsz)
161 return -EINVAL;
162
163 if (info.index >= 1)
164 return -EINVAL;
165
166 if (!cdx_dev->num_msi)
167 return -EINVAL;
168
169 info.flags = VFIO_IRQ_INFO_EVENTFD | VFIO_IRQ_INFO_NORESIZE;
170 info.count = cdx_dev->num_msi;
171
172 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
173 }
174
vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device * vdev,struct vfio_irq_set __user * arg)175 static int vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device *vdev,
176 struct vfio_irq_set __user *arg)
177 {
178 unsigned long minsz = offsetofend(struct vfio_irq_set, count);
179 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
180 struct vfio_irq_set hdr;
181 size_t data_size = 0;
182 u8 *data = NULL;
183 int ret = 0;
184
185 if (copy_from_user(&hdr, arg, minsz))
186 return -EFAULT;
187
188 ret = vfio_set_irqs_validate_and_prepare(&hdr, cdx_dev->num_msi,
189 1, &data_size);
190 if (ret)
191 return ret;
192
193 if (data_size) {
194 data = memdup_user(arg->data, data_size);
195 if (IS_ERR(data))
196 return PTR_ERR(data);
197 }
198
199 ret = vfio_cdx_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
200 hdr.start, hdr.count, data);
201 kfree(data);
202
203 return ret;
204 }
205
vfio_cdx_ioctl(struct vfio_device * core_vdev,unsigned int cmd,unsigned long arg)206 static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
207 unsigned int cmd, unsigned long arg)
208 {
209 struct vfio_cdx_device *vdev =
210 container_of(core_vdev, struct vfio_cdx_device, vdev);
211 void __user *uarg = (void __user *)arg;
212
213 switch (cmd) {
214 case VFIO_DEVICE_GET_INFO:
215 return vfio_cdx_ioctl_get_info(vdev, uarg);
216 case VFIO_DEVICE_GET_IRQ_INFO:
217 return vfio_cdx_ioctl_get_irq_info(vdev, uarg);
218 case VFIO_DEVICE_SET_IRQS:
219 return vfio_cdx_ioctl_set_irqs(vdev, uarg);
220 case VFIO_DEVICE_RESET:
221 return cdx_dev_reset(core_vdev->dev);
222 default:
223 return -ENOTTY;
224 }
225 }
226
vfio_cdx_mmap_mmio(struct vfio_cdx_region region,struct vm_area_struct * vma)227 static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
228 struct vm_area_struct *vma)
229 {
230 u64 size = vma->vm_end - vma->vm_start;
231 u64 pgoff, base;
232
233 pgoff = vma->vm_pgoff &
234 ((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
235 base = pgoff << PAGE_SHIFT;
236
237 if (base + size > region.size)
238 return -EINVAL;
239
240 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
241 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
242
243 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
244 size, vma->vm_page_prot);
245 }
246
vfio_cdx_mmap(struct vfio_device * core_vdev,struct vm_area_struct * vma)247 static int vfio_cdx_mmap(struct vfio_device *core_vdev,
248 struct vm_area_struct *vma)
249 {
250 struct vfio_cdx_device *vdev =
251 container_of(core_vdev, struct vfio_cdx_device, vdev);
252 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
253 unsigned int index;
254
255 index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
256
257 if (index >= cdx_dev->res_count)
258 return -EINVAL;
259
260 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
261 return -EINVAL;
262
263 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
264 (vma->vm_flags & VM_READ))
265 return -EPERM;
266
267 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
268 (vma->vm_flags & VM_WRITE))
269 return -EPERM;
270
271 return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
272 }
273
274 static const struct vfio_device_ops vfio_cdx_ops = {
275 .name = "vfio-cdx",
276 .open_device = vfio_cdx_open_device,
277 .close_device = vfio_cdx_close_device,
278 .ioctl = vfio_cdx_ioctl,
279 .get_region_info_caps = vfio_cdx_ioctl_get_region_info,
280 .device_feature = vfio_cdx_ioctl_feature,
281 .mmap = vfio_cdx_mmap,
282 .bind_iommufd = vfio_iommufd_physical_bind,
283 .unbind_iommufd = vfio_iommufd_physical_unbind,
284 .attach_ioas = vfio_iommufd_physical_attach_ioas,
285 };
286
vfio_cdx_probe(struct cdx_device * cdx_dev)287 static int vfio_cdx_probe(struct cdx_device *cdx_dev)
288 {
289 struct vfio_cdx_device *vdev;
290 struct device *dev = &cdx_dev->dev;
291 int ret;
292
293 vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
294 &vfio_cdx_ops);
295 if (IS_ERR(vdev))
296 return PTR_ERR(vdev);
297
298 ret = vfio_register_group_dev(&vdev->vdev);
299 if (ret)
300 goto out_uninit;
301
302 dev_set_drvdata(dev, vdev);
303 return 0;
304
305 out_uninit:
306 vfio_put_device(&vdev->vdev);
307 return ret;
308 }
309
vfio_cdx_remove(struct cdx_device * cdx_dev)310 static int vfio_cdx_remove(struct cdx_device *cdx_dev)
311 {
312 struct device *dev = &cdx_dev->dev;
313 struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
314
315 vfio_unregister_group_dev(&vdev->vdev);
316 vfio_put_device(&vdev->vdev);
317
318 return 0;
319 }
320
321 static const struct cdx_device_id vfio_cdx_table[] = {
322 { CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
323 CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
324 {}
325 };
326
327 MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
328
329 static struct cdx_driver vfio_cdx_driver = {
330 .probe = vfio_cdx_probe,
331 .remove = vfio_cdx_remove,
332 .match_id_table = vfio_cdx_table,
333 .driver = {
334 .name = "vfio-cdx",
335 },
336 .driver_managed_dma = true,
337 };
338
339 module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
340
341 MODULE_LICENSE("GPL");
342 MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
343 MODULE_IMPORT_NS("CDX_BUS");
344