1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7
8 #include "vfio.h"
9
10 MODULE_IMPORT_NS("IOMMUFD");
11 MODULE_IMPORT_NS("IOMMUFD_VFIO");
12
vfio_iommufd_device_has_compat_ioas(struct vfio_device * vdev,struct iommufd_ctx * ictx)13 bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
14 struct iommufd_ctx *ictx)
15 {
16 u32 ioas_id;
17
18 return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
19 }
20
vfio_df_iommufd_bind(struct vfio_device_file * df)21 int vfio_df_iommufd_bind(struct vfio_device_file *df)
22 {
23 struct vfio_device *vdev = df->device;
24 struct iommufd_ctx *ictx = df->iommufd;
25
26 lockdep_assert_held(&vdev->dev_set->lock);
27
28 /* Returns 0 to permit device opening under noiommu mode */
29 if (vfio_device_is_noiommu(vdev))
30 return 0;
31
32 return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
33 }
34
vfio_iommufd_compat_attach_ioas(struct vfio_device * vdev,struct iommufd_ctx * ictx)35 int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
36 struct iommufd_ctx *ictx)
37 {
38 u32 ioas_id;
39 int ret;
40
41 lockdep_assert_held(&vdev->dev_set->lock);
42
43 /* compat noiommu does not need to do ioas attach */
44 if (vfio_device_is_noiommu(vdev))
45 return 0;
46
47 ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
48 if (ret)
49 return ret;
50
51 /* The legacy path has no way to return the selected pt_id */
52 return vdev->ops->attach_ioas(vdev, &ioas_id);
53 }
54
vfio_df_iommufd_unbind(struct vfio_device_file * df)55 void vfio_df_iommufd_unbind(struct vfio_device_file *df)
56 {
57 struct vfio_device *vdev = df->device;
58
59 lockdep_assert_held(&vdev->dev_set->lock);
60
61 if (vfio_device_is_noiommu(vdev))
62 return;
63
64 if (vdev->ops->unbind_iommufd)
65 vdev->ops->unbind_iommufd(vdev);
66 }
67
vfio_iommufd_device_ictx(struct vfio_device * vdev)68 struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
69 {
70 if (vdev->iommufd_device)
71 return iommufd_device_to_ictx(vdev->iommufd_device);
72 return NULL;
73 }
74 EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
75
vfio_iommufd_device_id(struct vfio_device * vdev)76 static int vfio_iommufd_device_id(struct vfio_device *vdev)
77 {
78 if (vdev->iommufd_device)
79 return iommufd_device_to_id(vdev->iommufd_device);
80 return -EINVAL;
81 }
82
83 /*
84 * Return devid for a device.
85 * valid ID for the device that is owned by the ictx
86 * -ENOENT = device is owned but there is no ID
87 * -ENODEV or other error = device is not owned
88 */
vfio_iommufd_get_dev_id(struct vfio_device * vdev,struct iommufd_ctx * ictx)89 int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
90 {
91 struct iommu_group *group;
92 int devid;
93
94 if (vfio_iommufd_device_ictx(vdev) == ictx)
95 return vfio_iommufd_device_id(vdev);
96
97 group = iommu_group_get(vdev->dev);
98 if (!group)
99 return -ENODEV;
100
101 if (iommufd_ctx_has_group(ictx, group))
102 devid = -ENOENT;
103 else
104 devid = -ENODEV;
105
106 iommu_group_put(group);
107
108 return devid;
109 }
110 EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
111
112 /*
113 * The physical standard ops mean that the iommufd_device is bound to the
114 * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
115 * using this ops set should call vfio_register_group_dev()
116 */
vfio_iommufd_physical_bind(struct vfio_device * vdev,struct iommufd_ctx * ictx,u32 * out_device_id)117 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
118 struct iommufd_ctx *ictx, u32 *out_device_id)
119 {
120 struct iommufd_device *idev;
121
122 idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
123 if (IS_ERR(idev))
124 return PTR_ERR(idev);
125 vdev->iommufd_device = idev;
126 ida_init(&vdev->pasids);
127 return 0;
128 }
129 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
130
vfio_iommufd_physical_unbind(struct vfio_device * vdev)131 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
132 {
133 int pasid;
134
135 lockdep_assert_held(&vdev->dev_set->lock);
136
137 while ((pasid = ida_find_first(&vdev->pasids)) >= 0) {
138 iommufd_device_detach(vdev->iommufd_device, pasid);
139 ida_free(&vdev->pasids, pasid);
140 }
141
142 if (vdev->iommufd_attached) {
143 iommufd_device_detach(vdev->iommufd_device, IOMMU_NO_PASID);
144 vdev->iommufd_attached = false;
145 }
146 iommufd_device_unbind(vdev->iommufd_device);
147 vdev->iommufd_device = NULL;
148 }
149 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
150
vfio_iommufd_physical_attach_ioas(struct vfio_device * vdev,u32 * pt_id)151 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
152 {
153 int rc;
154
155 lockdep_assert_held(&vdev->dev_set->lock);
156
157 if (WARN_ON(!vdev->iommufd_device))
158 return -EINVAL;
159
160 if (vdev->iommufd_attached)
161 rc = iommufd_device_replace(vdev->iommufd_device,
162 IOMMU_NO_PASID, pt_id);
163 else
164 rc = iommufd_device_attach(vdev->iommufd_device,
165 IOMMU_NO_PASID, pt_id);
166 if (rc)
167 return rc;
168 vdev->iommufd_attached = true;
169 return 0;
170 }
171 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
172
vfio_iommufd_physical_detach_ioas(struct vfio_device * vdev)173 void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
174 {
175 lockdep_assert_held(&vdev->dev_set->lock);
176
177 if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
178 return;
179
180 iommufd_device_detach(vdev->iommufd_device, IOMMU_NO_PASID);
181 vdev->iommufd_attached = false;
182 }
183 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
184
vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device * vdev,u32 pasid,u32 * pt_id)185 int vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device *vdev,
186 u32 pasid, u32 *pt_id)
187 {
188 int rc;
189
190 lockdep_assert_held(&vdev->dev_set->lock);
191
192 if (WARN_ON(!vdev->iommufd_device))
193 return -EINVAL;
194
195 if (ida_exists(&vdev->pasids, pasid))
196 return iommufd_device_replace(vdev->iommufd_device,
197 pasid, pt_id);
198
199 rc = ida_alloc_range(&vdev->pasids, pasid, pasid, GFP_KERNEL);
200 if (rc < 0)
201 return rc;
202
203 rc = iommufd_device_attach(vdev->iommufd_device, pasid, pt_id);
204 if (rc)
205 ida_free(&vdev->pasids, pasid);
206
207 return rc;
208 }
209 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_pasid_attach_ioas);
210
vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device * vdev,u32 pasid)211 void vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device *vdev,
212 u32 pasid)
213 {
214 lockdep_assert_held(&vdev->dev_set->lock);
215
216 if (WARN_ON(!vdev->iommufd_device))
217 return;
218
219 if (!ida_exists(&vdev->pasids, pasid))
220 return;
221
222 iommufd_device_detach(vdev->iommufd_device, pasid);
223 ida_free(&vdev->pasids, pasid);
224 }
225 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_pasid_detach_ioas);
226
227 /*
228 * The emulated standard ops mean that vfio_device is going to use the
229 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
230 * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
231 * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
232 */
233
vfio_emulated_unmap(void * data,unsigned long iova,unsigned long length)234 static void vfio_emulated_unmap(void *data, unsigned long iova,
235 unsigned long length)
236 {
237 struct vfio_device *vdev = data;
238
239 if (vdev->ops->dma_unmap)
240 vdev->ops->dma_unmap(vdev, iova, length);
241 }
242
243 static const struct iommufd_access_ops vfio_user_ops = {
244 .needs_pin_pages = 1,
245 .unmap = vfio_emulated_unmap,
246 };
247
vfio_iommufd_emulated_bind(struct vfio_device * vdev,struct iommufd_ctx * ictx,u32 * out_device_id)248 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
249 struct iommufd_ctx *ictx, u32 *out_device_id)
250 {
251 struct iommufd_access *user;
252
253 lockdep_assert_held(&vdev->dev_set->lock);
254
255 user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
256 if (IS_ERR(user))
257 return PTR_ERR(user);
258 vdev->iommufd_access = user;
259 return 0;
260 }
261 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
262
vfio_iommufd_emulated_unbind(struct vfio_device * vdev)263 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
264 {
265 lockdep_assert_held(&vdev->dev_set->lock);
266
267 if (vdev->iommufd_access) {
268 iommufd_access_destroy(vdev->iommufd_access);
269 vdev->iommufd_attached = false;
270 vdev->iommufd_access = NULL;
271 }
272 }
273 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
274
vfio_iommufd_emulated_attach_ioas(struct vfio_device * vdev,u32 * pt_id)275 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
276 {
277 int rc;
278
279 lockdep_assert_held(&vdev->dev_set->lock);
280
281 if (vdev->iommufd_attached)
282 rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
283 else
284 rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
285 if (rc)
286 return rc;
287 vdev->iommufd_attached = true;
288 return 0;
289 }
290 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
291
vfio_iommufd_emulated_detach_ioas(struct vfio_device * vdev)292 void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
293 {
294 lockdep_assert_held(&vdev->dev_set->lock);
295
296 if (WARN_ON(!vdev->iommufd_access) ||
297 !vdev->iommufd_attached)
298 return;
299
300 iommufd_access_detach(vdev->iommufd_access);
301 vdev->iommufd_attached = false;
302 }
303 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
304