xref: /linux/drivers/iommu/iommufd/hw_pagetable.c (revision 576d7fed09c7edbae7600f29a8a3ed6c1ead904f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/iommu.h>
6 #include <uapi/linux/iommufd.h>
7 
8 #include "../iommu-priv.h"
9 #include "iommufd_private.h"
10 
11 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
12 {
13 	struct iommufd_hwpt_paging *hwpt_paging =
14 		container_of(obj, struct iommufd_hwpt_paging, common.obj);
15 
16 	if (!list_empty(&hwpt_paging->hwpt_item)) {
17 		mutex_lock(&hwpt_paging->ioas->mutex);
18 		list_del(&hwpt_paging->hwpt_item);
19 		mutex_unlock(&hwpt_paging->ioas->mutex);
20 
21 		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
22 					 hwpt_paging->common.domain);
23 	}
24 
25 	if (hwpt_paging->common.domain)
26 		iommu_domain_free(hwpt_paging->common.domain);
27 
28 	refcount_dec(&hwpt_paging->ioas->obj.users);
29 }
30 
31 void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
32 {
33 	struct iommufd_hwpt_paging *hwpt_paging =
34 		container_of(obj, struct iommufd_hwpt_paging, common.obj);
35 
36 	/* The ioas->mutex must be held until finalize is called. */
37 	lockdep_assert_held(&hwpt_paging->ioas->mutex);
38 
39 	if (!list_empty(&hwpt_paging->hwpt_item)) {
40 		list_del_init(&hwpt_paging->hwpt_item);
41 		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
42 					 hwpt_paging->common.domain);
43 	}
44 	iommufd_hwpt_paging_destroy(obj);
45 }
46 
47 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
48 {
49 	struct iommufd_hwpt_nested *hwpt_nested =
50 		container_of(obj, struct iommufd_hwpt_nested, common.obj);
51 
52 	if (hwpt_nested->common.domain)
53 		iommu_domain_free(hwpt_nested->common.domain);
54 
55 	refcount_dec(&hwpt_nested->parent->common.obj.users);
56 }
57 
58 void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
59 {
60 	iommufd_hwpt_nested_destroy(obj);
61 }
62 
63 static int
64 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
65 {
66 	struct iommu_domain *paging_domain = hwpt_paging->common.domain;
67 
68 	if (hwpt_paging->enforce_cache_coherency)
69 		return 0;
70 
71 	if (paging_domain->ops->enforce_cache_coherency)
72 		hwpt_paging->enforce_cache_coherency =
73 			paging_domain->ops->enforce_cache_coherency(
74 				paging_domain);
75 	if (!hwpt_paging->enforce_cache_coherency)
76 		return -EINVAL;
77 	return 0;
78 }
79 
80 /**
81  * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
82  * @ictx: iommufd context
83  * @ioas: IOAS to associate the domain with
84  * @idev: Device to get an iommu_domain for
85  * @flags: Flags from userspace
86  * @immediate_attach: True if idev should be attached to the hwpt
87  * @user_data: The user provided driver specific data describing the domain to
88  *             create
89  *
90  * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
91  * will be linked to the given ioas and upon return the underlying iommu_domain
92  * is fully popoulated.
93  *
94  * The caller must hold the ioas->mutex until after
95  * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
96  * the returned hwpt.
97  */
98 struct iommufd_hwpt_paging *
99 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
100 			  struct iommufd_device *idev, u32 flags,
101 			  bool immediate_attach,
102 			  const struct iommu_user_data *user_data)
103 {
104 	const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
105 				IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
106 	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
107 	struct iommufd_hwpt_paging *hwpt_paging;
108 	struct iommufd_hw_pagetable *hwpt;
109 	int rc;
110 
111 	lockdep_assert_held(&ioas->mutex);
112 
113 	if ((flags || user_data) && !ops->domain_alloc_user)
114 		return ERR_PTR(-EOPNOTSUPP);
115 	if (flags & ~valid_flags)
116 		return ERR_PTR(-EOPNOTSUPP);
117 
118 	hwpt_paging = __iommufd_object_alloc(
119 		ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
120 	if (IS_ERR(hwpt_paging))
121 		return ERR_CAST(hwpt_paging);
122 	hwpt = &hwpt_paging->common;
123 
124 	INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
125 	/* Pairs with iommufd_hw_pagetable_destroy() */
126 	refcount_inc(&ioas->obj.users);
127 	hwpt_paging->ioas = ioas;
128 	hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
129 
130 	if (ops->domain_alloc_user) {
131 		hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL,
132 						      user_data);
133 		if (IS_ERR(hwpt->domain)) {
134 			rc = PTR_ERR(hwpt->domain);
135 			hwpt->domain = NULL;
136 			goto out_abort;
137 		}
138 	} else {
139 		hwpt->domain = iommu_domain_alloc(idev->dev->bus);
140 		if (!hwpt->domain) {
141 			rc = -ENOMEM;
142 			goto out_abort;
143 		}
144 	}
145 
146 	/*
147 	 * Set the coherency mode before we do iopt_table_add_domain() as some
148 	 * iommus have a per-PTE bit that controls it and need to decide before
149 	 * doing any maps. It is an iommu driver bug to report
150 	 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
151 	 * a new domain.
152 	 *
153 	 * The cache coherency mode must be configured here and unchanged later.
154 	 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
155 	 * reused by another device (either non-CC or CC). However, A HWPT (CC)
156 	 * created for a device (CC) cannot be reused by another device (non-CC)
157 	 * but only devices (CC). Instead user space in this case would need to
158 	 * allocate a separate HWPT (non-CC).
159 	 */
160 	if (idev->enforce_cache_coherency) {
161 		rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
162 		if (WARN_ON(rc))
163 			goto out_abort;
164 	}
165 
166 	/*
167 	 * immediate_attach exists only to accommodate iommu drivers that cannot
168 	 * directly allocate a domain. These drivers do not finish creating the
169 	 * domain until attach is completed. Thus we must have this call
170 	 * sequence. Once those drivers are fixed this should be removed.
171 	 */
172 	if (immediate_attach) {
173 		rc = iommufd_hw_pagetable_attach(hwpt, idev);
174 		if (rc)
175 			goto out_abort;
176 	}
177 
178 	rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
179 	if (rc)
180 		goto out_detach;
181 	list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
182 	return hwpt_paging;
183 
184 out_detach:
185 	if (immediate_attach)
186 		iommufd_hw_pagetable_detach(idev);
187 out_abort:
188 	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
189 	return ERR_PTR(rc);
190 }
191 
192 /**
193  * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
194  * @ictx: iommufd context
195  * @parent: Parent PAGING-type hwpt to associate the domain with
196  * @idev: Device to get an iommu_domain for
197  * @flags: Flags from userspace
198  * @user_data: user_data pointer. Must be valid
199  *
200  * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
201  * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
202  * being a parent.
203  */
204 static struct iommufd_hwpt_nested *
205 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
206 			  struct iommufd_hwpt_paging *parent,
207 			  struct iommufd_device *idev, u32 flags,
208 			  const struct iommu_user_data *user_data)
209 {
210 	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
211 	struct iommufd_hwpt_nested *hwpt_nested;
212 	struct iommufd_hw_pagetable *hwpt;
213 	int rc;
214 
215 	if (flags || !user_data->len || !ops->domain_alloc_user)
216 		return ERR_PTR(-EOPNOTSUPP);
217 	if (parent->auto_domain || !parent->nest_parent)
218 		return ERR_PTR(-EINVAL);
219 
220 	hwpt_nested = __iommufd_object_alloc(
221 		ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
222 	if (IS_ERR(hwpt_nested))
223 		return ERR_CAST(hwpt_nested);
224 	hwpt = &hwpt_nested->common;
225 
226 	refcount_inc(&parent->common.obj.users);
227 	hwpt_nested->parent = parent;
228 
229 	hwpt->domain = ops->domain_alloc_user(idev->dev, flags,
230 					      parent->common.domain, user_data);
231 	if (IS_ERR(hwpt->domain)) {
232 		rc = PTR_ERR(hwpt->domain);
233 		hwpt->domain = NULL;
234 		goto out_abort;
235 	}
236 
237 	if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
238 		rc = -EINVAL;
239 		goto out_abort;
240 	}
241 	return hwpt_nested;
242 
243 out_abort:
244 	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
245 	return ERR_PTR(rc);
246 }
247 
248 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
249 {
250 	struct iommu_hwpt_alloc *cmd = ucmd->cmd;
251 	const struct iommu_user_data user_data = {
252 		.type = cmd->data_type,
253 		.uptr = u64_to_user_ptr(cmd->data_uptr),
254 		.len = cmd->data_len,
255 	};
256 	struct iommufd_hw_pagetable *hwpt;
257 	struct iommufd_ioas *ioas = NULL;
258 	struct iommufd_object *pt_obj;
259 	struct iommufd_device *idev;
260 	int rc;
261 
262 	if (cmd->__reserved)
263 		return -EOPNOTSUPP;
264 	if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
265 		return -EINVAL;
266 
267 	idev = iommufd_get_device(ucmd, cmd->dev_id);
268 	if (IS_ERR(idev))
269 		return PTR_ERR(idev);
270 
271 	pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
272 	if (IS_ERR(pt_obj)) {
273 		rc = -EINVAL;
274 		goto out_put_idev;
275 	}
276 
277 	if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
278 		struct iommufd_hwpt_paging *hwpt_paging;
279 
280 		ioas = container_of(pt_obj, struct iommufd_ioas, obj);
281 		mutex_lock(&ioas->mutex);
282 		hwpt_paging = iommufd_hwpt_paging_alloc(
283 			ucmd->ictx, ioas, idev, cmd->flags, false,
284 			user_data.len ? &user_data : NULL);
285 		if (IS_ERR(hwpt_paging)) {
286 			rc = PTR_ERR(hwpt_paging);
287 			goto out_unlock;
288 		}
289 		hwpt = &hwpt_paging->common;
290 	} else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
291 		struct iommufd_hwpt_nested *hwpt_nested;
292 
293 		hwpt_nested = iommufd_hwpt_nested_alloc(
294 			ucmd->ictx,
295 			container_of(pt_obj, struct iommufd_hwpt_paging,
296 				     common.obj),
297 			idev, cmd->flags, &user_data);
298 		if (IS_ERR(hwpt_nested)) {
299 			rc = PTR_ERR(hwpt_nested);
300 			goto out_unlock;
301 		}
302 		hwpt = &hwpt_nested->common;
303 	} else {
304 		rc = -EINVAL;
305 		goto out_put_pt;
306 	}
307 
308 	cmd->out_hwpt_id = hwpt->obj.id;
309 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
310 	if (rc)
311 		goto out_hwpt;
312 	iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
313 	goto out_unlock;
314 
315 out_hwpt:
316 	iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
317 out_unlock:
318 	if (ioas)
319 		mutex_unlock(&ioas->mutex);
320 out_put_pt:
321 	iommufd_put_object(ucmd->ictx, pt_obj);
322 out_put_idev:
323 	iommufd_put_object(ucmd->ictx, &idev->obj);
324 	return rc;
325 }
326 
327 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
328 {
329 	struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
330 	struct iommufd_hwpt_paging *hwpt_paging;
331 	struct iommufd_ioas *ioas;
332 	int rc = -EOPNOTSUPP;
333 	bool enable;
334 
335 	if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
336 		return rc;
337 
338 	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
339 	if (IS_ERR(hwpt_paging))
340 		return PTR_ERR(hwpt_paging);
341 
342 	ioas = hwpt_paging->ioas;
343 	enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
344 
345 	rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
346 				     enable);
347 
348 	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
349 	return rc;
350 }
351 
352 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
353 {
354 	struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
355 	struct iommufd_hwpt_paging *hwpt_paging;
356 	struct iommufd_ioas *ioas;
357 	int rc = -EOPNOTSUPP;
358 
359 	if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
360 	    cmd->__reserved)
361 		return -EOPNOTSUPP;
362 
363 	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
364 	if (IS_ERR(hwpt_paging))
365 		return PTR_ERR(hwpt_paging);
366 
367 	ioas = hwpt_paging->ioas;
368 	rc = iopt_read_and_clear_dirty_data(
369 		&ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
370 
371 	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
372 	return rc;
373 }
374