xref: /linux/drivers/vfio/cdx/main.c (revision 72bea132f3680ee51e7ed2cee62892b6f5121909)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4  */
5 
6 #include <linux/vfio.h>
7 #include <linux/cdx/cdx_bus.h>
8 
9 #include "private.h"
10 
11 static int vfio_cdx_open_device(struct vfio_device *core_vdev)
12 {
13 	struct vfio_cdx_device *vdev =
14 		container_of(core_vdev, struct vfio_cdx_device, vdev);
15 	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16 	int count = cdx_dev->res_count;
17 	int i, ret;
18 
19 	vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
20 				GFP_KERNEL_ACCOUNT);
21 	if (!vdev->regions)
22 		return -ENOMEM;
23 
24 	for (i = 0; i < count; i++) {
25 		struct resource *res = &cdx_dev->res[i];
26 
27 		vdev->regions[i].addr = res->start;
28 		vdev->regions[i].size = resource_size(res);
29 		vdev->regions[i].type = res->flags;
30 		/*
31 		 * Only regions addressed with PAGE granularity may be
32 		 * MMAP'ed securely.
33 		 */
34 		if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35 		    !(vdev->regions[i].size & ~PAGE_MASK))
36 			vdev->regions[i].flags |=
37 					VFIO_REGION_INFO_FLAG_MMAP;
38 		vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39 		if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40 			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
41 	}
42 	ret = cdx_dev_reset(core_vdev->dev);
43 	if (ret) {
44 		kfree(vdev->regions);
45 		vdev->regions = NULL;
46 		return ret;
47 	}
48 	ret = cdx_clear_master(cdx_dev);
49 	if (ret)
50 		vdev->flags &= ~BME_SUPPORT;
51 	else
52 		vdev->flags |= BME_SUPPORT;
53 
54 	return 0;
55 }
56 
57 static void vfio_cdx_close_device(struct vfio_device *core_vdev)
58 {
59 	struct vfio_cdx_device *vdev =
60 		container_of(core_vdev, struct vfio_cdx_device, vdev);
61 
62 	kfree(vdev->regions);
63 	cdx_dev_reset(core_vdev->dev);
64 }
65 
66 static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags,
67 			    void __user *arg, size_t argsz)
68 {
69 	size_t minsz =
70 		offsetofend(struct vfio_device_feature_bus_master, op);
71 	struct vfio_cdx_device *vdev =
72 		container_of(core_vdev, struct vfio_cdx_device, vdev);
73 	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
74 	struct vfio_device_feature_bus_master ops;
75 	int ret;
76 
77 	if (!(vdev->flags & BME_SUPPORT))
78 		return -ENOTTY;
79 
80 	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
81 				 sizeof(ops));
82 	if (ret != 1)
83 		return ret;
84 
85 	if (copy_from_user(&ops, arg, minsz))
86 		return -EFAULT;
87 
88 	switch (ops.op) {
89 	case VFIO_DEVICE_FEATURE_CLEAR_MASTER:
90 		return cdx_clear_master(cdx_dev);
91 	case VFIO_DEVICE_FEATURE_SET_MASTER:
92 		return cdx_set_master(cdx_dev);
93 	default:
94 		return -EINVAL;
95 	}
96 }
97 
98 static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags,
99 				  void __user *arg, size_t argsz)
100 {
101 	switch (flags & VFIO_DEVICE_FEATURE_MASK) {
102 	case VFIO_DEVICE_FEATURE_BUS_MASTER:
103 		return vfio_cdx_bm_ctrl(device, flags, arg, argsz);
104 	default:
105 		return -ENOTTY;
106 	}
107 }
108 
109 static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
110 				   struct vfio_device_info __user *arg)
111 {
112 	unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
113 	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
114 	struct vfio_device_info info;
115 
116 	if (copy_from_user(&info, arg, minsz))
117 		return -EFAULT;
118 
119 	if (info.argsz < minsz)
120 		return -EINVAL;
121 
122 	info.flags = VFIO_DEVICE_FLAGS_CDX;
123 	info.flags |= VFIO_DEVICE_FLAGS_RESET;
124 
125 	info.num_regions = cdx_dev->res_count;
126 	info.num_irqs = 0;
127 
128 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
129 }
130 
131 static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
132 					  struct vfio_region_info __user *arg)
133 {
134 	unsigned long minsz = offsetofend(struct vfio_region_info, offset);
135 	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
136 	struct vfio_region_info info;
137 
138 	if (copy_from_user(&info, arg, minsz))
139 		return -EFAULT;
140 
141 	if (info.argsz < minsz)
142 		return -EINVAL;
143 
144 	if (info.index >= cdx_dev->res_count)
145 		return -EINVAL;
146 
147 	/* map offset to the physical address */
148 	info.offset = vfio_cdx_index_to_offset(info.index);
149 	info.size = vdev->regions[info.index].size;
150 	info.flags = vdev->regions[info.index].flags;
151 
152 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
153 }
154 
155 static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
156 			   unsigned int cmd, unsigned long arg)
157 {
158 	struct vfio_cdx_device *vdev =
159 		container_of(core_vdev, struct vfio_cdx_device, vdev);
160 	void __user *uarg = (void __user *)arg;
161 
162 	switch (cmd) {
163 	case VFIO_DEVICE_GET_INFO:
164 		return vfio_cdx_ioctl_get_info(vdev, uarg);
165 	case VFIO_DEVICE_GET_REGION_INFO:
166 		return vfio_cdx_ioctl_get_region_info(vdev, uarg);
167 	case VFIO_DEVICE_RESET:
168 		return cdx_dev_reset(core_vdev->dev);
169 	default:
170 		return -ENOTTY;
171 	}
172 }
173 
174 static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
175 			      struct vm_area_struct *vma)
176 {
177 	u64 size = vma->vm_end - vma->vm_start;
178 	u64 pgoff, base;
179 
180 	pgoff = vma->vm_pgoff &
181 		((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
182 	base = pgoff << PAGE_SHIFT;
183 
184 	if (base + size > region.size)
185 		return -EINVAL;
186 
187 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
188 	vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
189 
190 	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
191 				  size, vma->vm_page_prot);
192 }
193 
194 static int vfio_cdx_mmap(struct vfio_device *core_vdev,
195 			 struct vm_area_struct *vma)
196 {
197 	struct vfio_cdx_device *vdev =
198 		container_of(core_vdev, struct vfio_cdx_device, vdev);
199 	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
200 	unsigned int index;
201 
202 	index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
203 
204 	if (index >= cdx_dev->res_count)
205 		return -EINVAL;
206 
207 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
208 		return -EINVAL;
209 
210 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
211 	    (vma->vm_flags & VM_READ))
212 		return -EPERM;
213 
214 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
215 	    (vma->vm_flags & VM_WRITE))
216 		return -EPERM;
217 
218 	return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
219 }
220 
221 static const struct vfio_device_ops vfio_cdx_ops = {
222 	.name		= "vfio-cdx",
223 	.open_device	= vfio_cdx_open_device,
224 	.close_device	= vfio_cdx_close_device,
225 	.ioctl		= vfio_cdx_ioctl,
226 	.device_feature = vfio_cdx_ioctl_feature,
227 	.mmap		= vfio_cdx_mmap,
228 	.bind_iommufd	= vfio_iommufd_physical_bind,
229 	.unbind_iommufd	= vfio_iommufd_physical_unbind,
230 	.attach_ioas	= vfio_iommufd_physical_attach_ioas,
231 };
232 
233 static int vfio_cdx_probe(struct cdx_device *cdx_dev)
234 {
235 	struct vfio_cdx_device *vdev;
236 	struct device *dev = &cdx_dev->dev;
237 	int ret;
238 
239 	vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
240 				 &vfio_cdx_ops);
241 	if (IS_ERR(vdev))
242 		return PTR_ERR(vdev);
243 
244 	ret = vfio_register_group_dev(&vdev->vdev);
245 	if (ret)
246 		goto out_uninit;
247 
248 	dev_set_drvdata(dev, vdev);
249 	return 0;
250 
251 out_uninit:
252 	vfio_put_device(&vdev->vdev);
253 	return ret;
254 }
255 
256 static int vfio_cdx_remove(struct cdx_device *cdx_dev)
257 {
258 	struct device *dev = &cdx_dev->dev;
259 	struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
260 
261 	vfio_unregister_group_dev(&vdev->vdev);
262 	vfio_put_device(&vdev->vdev);
263 
264 	return 0;
265 }
266 
267 static const struct cdx_device_id vfio_cdx_table[] = {
268 	{ CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
269 				     CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
270 	{}
271 };
272 
273 MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
274 
275 static struct cdx_driver vfio_cdx_driver = {
276 	.probe		= vfio_cdx_probe,
277 	.remove		= vfio_cdx_remove,
278 	.match_id_table	= vfio_cdx_table,
279 	.driver	= {
280 		.name	= "vfio-cdx",
281 	},
282 	.driver_managed_dma = true,
283 };
284 
285 module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
286 
287 MODULE_LICENSE("GPL");
288 MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
289 MODULE_IMPORT_NS(CDX_BUS);
290