xref: /linux/drivers/vfio/iommufd.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7 
8 #include "vfio.h"
9 
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
12 
13 bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
14 					 struct iommufd_ctx *ictx)
15 {
16 	u32 ioas_id;
17 
18 	return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
19 }
20 
21 int vfio_df_iommufd_bind(struct vfio_device_file *df)
22 {
23 	struct vfio_device *vdev = df->device;
24 	struct iommufd_ctx *ictx = df->iommufd;
25 
26 	lockdep_assert_held(&vdev->dev_set->lock);
27 
28 	return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
29 }
30 
31 int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
32 				    struct iommufd_ctx *ictx)
33 {
34 	u32 ioas_id;
35 	int ret;
36 
37 	lockdep_assert_held(&vdev->dev_set->lock);
38 
39 	/* compat noiommu does not need to do ioas attach */
40 	if (vfio_device_is_noiommu(vdev))
41 		return 0;
42 
43 	ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
44 	if (ret)
45 		return ret;
46 
47 	/* The legacy path has no way to return the selected pt_id */
48 	return vdev->ops->attach_ioas(vdev, &ioas_id);
49 }
50 
51 void vfio_df_iommufd_unbind(struct vfio_device_file *df)
52 {
53 	struct vfio_device *vdev = df->device;
54 
55 	lockdep_assert_held(&vdev->dev_set->lock);
56 
57 	if (vfio_device_is_noiommu(vdev))
58 		return;
59 
60 	if (vdev->ops->unbind_iommufd)
61 		vdev->ops->unbind_iommufd(vdev);
62 }
63 
64 struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
65 {
66 	if (vdev->iommufd_device)
67 		return iommufd_device_to_ictx(vdev->iommufd_device);
68 	return NULL;
69 }
70 EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
71 
72 static int vfio_iommufd_device_id(struct vfio_device *vdev)
73 {
74 	if (vdev->iommufd_device)
75 		return iommufd_device_to_id(vdev->iommufd_device);
76 	return -EINVAL;
77 }
78 
79 /*
80  * Return devid for a device.
81  *  valid ID for the device that is owned by the ictx
82  *  -ENOENT = device is owned but there is no ID
83  *  -ENODEV or other error = device is not owned
84  */
85 int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
86 {
87 	struct iommu_group *group;
88 	int devid;
89 
90 	if (vfio_iommufd_device_ictx(vdev) == ictx)
91 		return vfio_iommufd_device_id(vdev);
92 
93 	group = iommu_group_get(vdev->dev);
94 	if (!group)
95 		return -ENODEV;
96 
97 	if (iommufd_ctx_has_group(ictx, group))
98 		devid = -ENOENT;
99 	else
100 		devid = -ENODEV;
101 
102 	iommu_group_put(group);
103 
104 	return devid;
105 }
106 EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
107 
108 /*
109  * The physical standard ops mean that the iommufd_device is bound to the
110  * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
111  * using this ops set should call vfio_register_group_dev()
112  */
113 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
114 			       struct iommufd_ctx *ictx, u32 *out_device_id)
115 {
116 	struct iommufd_device *idev;
117 
118 	idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
119 	if (IS_ERR(idev))
120 		return PTR_ERR(idev);
121 	vdev->iommufd_device = idev;
122 	return 0;
123 }
124 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
125 
126 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
127 {
128 	lockdep_assert_held(&vdev->dev_set->lock);
129 
130 	if (vdev->iommufd_attached) {
131 		iommufd_device_detach(vdev->iommufd_device);
132 		vdev->iommufd_attached = false;
133 	}
134 	iommufd_device_unbind(vdev->iommufd_device);
135 	vdev->iommufd_device = NULL;
136 }
137 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
138 
139 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
140 {
141 	int rc;
142 
143 	lockdep_assert_held(&vdev->dev_set->lock);
144 
145 	if (WARN_ON(!vdev->iommufd_device))
146 		return -EINVAL;
147 
148 	if (vdev->iommufd_attached)
149 		rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
150 	else
151 		rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
152 	if (rc)
153 		return rc;
154 	vdev->iommufd_attached = true;
155 	return 0;
156 }
157 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
158 
159 void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
160 {
161 	lockdep_assert_held(&vdev->dev_set->lock);
162 
163 	if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
164 		return;
165 
166 	iommufd_device_detach(vdev->iommufd_device);
167 	vdev->iommufd_attached = false;
168 }
169 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
170 
171 /*
172  * The emulated standard ops mean that vfio_device is going to use the
173  * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
174  * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
175  * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
176  */
177 
178 static void vfio_emulated_unmap(void *data, unsigned long iova,
179 				unsigned long length)
180 {
181 	struct vfio_device *vdev = data;
182 
183 	if (vdev->ops->dma_unmap)
184 		vdev->ops->dma_unmap(vdev, iova, length);
185 }
186 
187 static const struct iommufd_access_ops vfio_user_ops = {
188 	.needs_pin_pages = 1,
189 	.unmap = vfio_emulated_unmap,
190 };
191 
192 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
193 			       struct iommufd_ctx *ictx, u32 *out_device_id)
194 {
195 	struct iommufd_access *user;
196 
197 	lockdep_assert_held(&vdev->dev_set->lock);
198 
199 	user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
200 	if (IS_ERR(user))
201 		return PTR_ERR(user);
202 	vdev->iommufd_access = user;
203 	return 0;
204 }
205 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
206 
207 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
208 {
209 	lockdep_assert_held(&vdev->dev_set->lock);
210 
211 	if (vdev->iommufd_access) {
212 		iommufd_access_destroy(vdev->iommufd_access);
213 		vdev->iommufd_attached = false;
214 		vdev->iommufd_access = NULL;
215 	}
216 }
217 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
218 
219 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
220 {
221 	int rc;
222 
223 	lockdep_assert_held(&vdev->dev_set->lock);
224 
225 	if (vdev->iommufd_attached)
226 		rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
227 	else
228 		rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
229 	if (rc)
230 		return rc;
231 	vdev->iommufd_attached = true;
232 	return 0;
233 }
234 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
235 
236 void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
237 {
238 	lockdep_assert_held(&vdev->dev_set->lock);
239 
240 	if (WARN_ON(!vdev->iommufd_access) ||
241 	    !vdev->iommufd_attached)
242 		return;
243 
244 	iommufd_access_detach(vdev->iommufd_access);
245 	vdev->iommufd_attached = false;
246 }
247 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
248