1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2023 Intel Corporation.
4 */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7
8 #include "vfio.h"
9
10 static dev_t device_devt;
11
vfio_init_device_cdev(struct vfio_device * device)12 void vfio_init_device_cdev(struct vfio_device *device)
13 {
14 device->device.devt = MKDEV(MAJOR(device_devt), device->index);
15 cdev_init(&device->cdev, &vfio_device_fops);
16 device->cdev.owner = THIS_MODULE;
17 }
18
19 /*
20 * device access via the fd opened by this function is blocked until
21 * .open_device() is called successfully during BIND_IOMMUFD.
22 */
vfio_device_fops_cdev_open(struct inode * inode,struct file * filep)23 int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
24 {
25 struct vfio_device *device = container_of(inode->i_cdev,
26 struct vfio_device, cdev);
27 struct vfio_device_file *df;
28 int ret;
29
30 /* Paired with the put in vfio_device_fops_release() */
31 if (!vfio_device_try_get_registration(device))
32 return -ENODEV;
33
34 df = vfio_allocate_device_file(device);
35 if (IS_ERR(df)) {
36 ret = PTR_ERR(df);
37 goto err_put_registration;
38 }
39
40 filep->private_data = df;
41
42 /*
43 * Use the pseudo fs inode on the device to link all mmaps
44 * to the same address space, allowing us to unmap all vmas
45 * associated to this device using unmap_mapping_range().
46 */
47 filep->f_mapping = device->inode->i_mapping;
48
49 return 0;
50
51 err_put_registration:
52 vfio_device_put_registration(device);
53 return ret;
54 }
55
vfio_df_get_kvm_safe(struct vfio_device_file * df)56 static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
57 {
58 spin_lock(&df->kvm_ref_lock);
59 vfio_device_get_kvm_safe(df->device, df->kvm);
60 spin_unlock(&df->kvm_ref_lock);
61 }
62
vfio_df_check_token(struct vfio_device * device,const struct vfio_device_bind_iommufd * bind)63 static int vfio_df_check_token(struct vfio_device *device,
64 const struct vfio_device_bind_iommufd *bind)
65 {
66 uuid_t uuid;
67
68 if (!device->ops->match_token_uuid) {
69 if (bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN)
70 return -EINVAL;
71 return 0;
72 }
73
74 if (!(bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN))
75 return device->ops->match_token_uuid(device, NULL);
76
77 if (copy_from_user(&uuid, u64_to_user_ptr(bind->token_uuid_ptr),
78 sizeof(uuid)))
79 return -EFAULT;
80 return device->ops->match_token_uuid(device, &uuid);
81 }
82
vfio_df_ioctl_bind_iommufd(struct vfio_device_file * df,struct vfio_device_bind_iommufd __user * arg)83 long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
84 struct vfio_device_bind_iommufd __user *arg)
85 {
86 const u32 VALID_FLAGS = VFIO_DEVICE_BIND_FLAG_TOKEN;
87 struct vfio_device *device = df->device;
88 struct vfio_device_bind_iommufd bind;
89 unsigned long minsz;
90 u32 user_size;
91 int ret;
92
93 static_assert(__same_type(arg->out_devid, df->devid));
94
95 minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
96
97 ret = get_user(user_size, &arg->argsz);
98 if (ret)
99 return ret;
100 if (user_size < minsz)
101 return -EINVAL;
102 ret = copy_struct_from_user(&bind, minsz, arg, user_size);
103 if (ret)
104 return ret;
105
106 if (bind.iommufd < 0 || bind.flags & ~VALID_FLAGS)
107 return -EINVAL;
108
109 /* BIND_IOMMUFD only allowed for cdev fds */
110 if (df->group)
111 return -EINVAL;
112
113 ret = vfio_device_block_group(device);
114 if (ret)
115 return ret;
116
117 mutex_lock(&device->dev_set->lock);
118 /* one device cannot be bound twice */
119 if (df->access_granted) {
120 ret = -EINVAL;
121 goto out_unlock;
122 }
123
124 ret = vfio_df_check_token(device, &bind);
125 if (ret)
126 goto out_unlock;
127
128 df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
129 if (IS_ERR(df->iommufd)) {
130 ret = PTR_ERR(df->iommufd);
131 df->iommufd = NULL;
132 goto out_unlock;
133 }
134
135 /*
136 * Before the device open, get the KVM pointer currently
137 * associated with the device file (if there is) and obtain
138 * a reference. This reference is held until device closed.
139 * Save the pointer in the device for use by drivers.
140 */
141 vfio_df_get_kvm_safe(df);
142
143 ret = vfio_df_open(df);
144 if (ret)
145 goto out_put_kvm;
146
147 ret = copy_to_user(&arg->out_devid, &df->devid,
148 sizeof(df->devid)) ? -EFAULT : 0;
149 if (ret)
150 goto out_close_device;
151
152 device->cdev_opened = true;
153 /*
154 * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
155 * read/write/mmap
156 */
157 smp_store_release(&df->access_granted, true);
158 mutex_unlock(&device->dev_set->lock);
159 return 0;
160
161 out_close_device:
162 vfio_df_close(df);
163 out_put_kvm:
164 vfio_device_put_kvm(device);
165 iommufd_ctx_put(df->iommufd);
166 df->iommufd = NULL;
167 out_unlock:
168 mutex_unlock(&device->dev_set->lock);
169 vfio_device_unblock_group(device);
170 return ret;
171 }
172
vfio_df_unbind_iommufd(struct vfio_device_file * df)173 void vfio_df_unbind_iommufd(struct vfio_device_file *df)
174 {
175 struct vfio_device *device = df->device;
176
177 /*
178 * In the time of close, there is no contention with another one
179 * changing this flag. So read df->access_granted without lock
180 * and no smp_load_acquire() is ok.
181 */
182 if (!df->access_granted)
183 return;
184
185 mutex_lock(&device->dev_set->lock);
186 vfio_df_close(df);
187 vfio_device_put_kvm(device);
188 iommufd_ctx_put(df->iommufd);
189 device->cdev_opened = false;
190 mutex_unlock(&device->dev_set->lock);
191 vfio_device_unblock_group(device);
192 }
193
vfio_df_ioctl_attach_pt(struct vfio_device_file * df,struct vfio_device_attach_iommufd_pt __user * arg)194 int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
195 struct vfio_device_attach_iommufd_pt __user *arg)
196 {
197 struct vfio_device_attach_iommufd_pt attach;
198 struct vfio_device *device = df->device;
199 unsigned long minsz, xend = 0;
200 int ret;
201
202 minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
203
204 if (copy_from_user(&attach, arg, minsz))
205 return -EFAULT;
206
207 if (attach.argsz < minsz)
208 return -EINVAL;
209
210 if (attach.flags & ~VFIO_DEVICE_ATTACH_PASID)
211 return -EINVAL;
212
213 if (attach.flags & VFIO_DEVICE_ATTACH_PASID) {
214 if (!device->ops->pasid_attach_ioas)
215 return -EOPNOTSUPP;
216 xend = offsetofend(struct vfio_device_attach_iommufd_pt, pasid);
217 }
218
219 if (xend) {
220 if (attach.argsz < xend)
221 return -EINVAL;
222
223 if (copy_from_user((void *)&attach + minsz,
224 (void __user *)arg + minsz, xend - minsz))
225 return -EFAULT;
226 }
227
228 mutex_lock(&device->dev_set->lock);
229 if (attach.flags & VFIO_DEVICE_ATTACH_PASID)
230 ret = device->ops->pasid_attach_ioas(device,
231 attach.pasid,
232 &attach.pt_id);
233 else
234 ret = device->ops->attach_ioas(device, &attach.pt_id);
235 if (ret)
236 goto out_unlock;
237
238 if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
239 ret = -EFAULT;
240 goto out_detach;
241 }
242 mutex_unlock(&device->dev_set->lock);
243
244 return 0;
245
246 out_detach:
247 device->ops->detach_ioas(device);
248 out_unlock:
249 mutex_unlock(&device->dev_set->lock);
250 return ret;
251 }
252
vfio_df_ioctl_detach_pt(struct vfio_device_file * df,struct vfio_device_detach_iommufd_pt __user * arg)253 int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
254 struct vfio_device_detach_iommufd_pt __user *arg)
255 {
256 struct vfio_device_detach_iommufd_pt detach;
257 struct vfio_device *device = df->device;
258 unsigned long minsz, xend = 0;
259
260 minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
261
262 if (copy_from_user(&detach, arg, minsz))
263 return -EFAULT;
264
265 if (detach.argsz < minsz)
266 return -EINVAL;
267
268 if (detach.flags & ~VFIO_DEVICE_DETACH_PASID)
269 return -EINVAL;
270
271 if (detach.flags & VFIO_DEVICE_DETACH_PASID) {
272 if (!device->ops->pasid_detach_ioas)
273 return -EOPNOTSUPP;
274 xend = offsetofend(struct vfio_device_detach_iommufd_pt, pasid);
275 }
276
277 if (xend) {
278 if (detach.argsz < xend)
279 return -EINVAL;
280
281 if (copy_from_user((void *)&detach + minsz,
282 (void __user *)arg + minsz, xend - minsz))
283 return -EFAULT;
284 }
285
286 mutex_lock(&device->dev_set->lock);
287 if (detach.flags & VFIO_DEVICE_DETACH_PASID)
288 device->ops->pasid_detach_ioas(device, detach.pasid);
289 else
290 device->ops->detach_ioas(device);
291 mutex_unlock(&device->dev_set->lock);
292
293 return 0;
294 }
295
vfio_device_devnode(const struct device * dev,umode_t * mode)296 static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
297 {
298 return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
299 }
300
vfio_cdev_init(struct class * device_class)301 int vfio_cdev_init(struct class *device_class)
302 {
303 device_class->devnode = vfio_device_devnode;
304 return alloc_chrdev_region(&device_devt, 0,
305 MINORMASK + 1, "vfio-dev");
306 }
307
vfio_cdev_cleanup(void)308 void vfio_cdev_cleanup(void)
309 {
310 unregister_chrdev_region(device_devt, MINORMASK + 1);
311 }
312