xref: /linux/drivers/iommu/iommufd/hw_pagetable.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/iommu.h>
6 #include <uapi/linux/iommufd.h>
7 
8 #include "../iommu-priv.h"
9 #include "iommufd_private.h"
10 
__iommufd_hwpt_destroy(struct iommufd_hw_pagetable * hwpt)11 static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
12 {
13 	if (hwpt->domain)
14 		iommu_domain_free(hwpt->domain);
15 
16 	if (hwpt->fault)
17 		refcount_dec(&hwpt->fault->obj.users);
18 }
19 
iommufd_hwpt_paging_destroy(struct iommufd_object * obj)20 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
21 {
22 	struct iommufd_hwpt_paging *hwpt_paging =
23 		container_of(obj, struct iommufd_hwpt_paging, common.obj);
24 
25 	if (!list_empty(&hwpt_paging->hwpt_item)) {
26 		mutex_lock(&hwpt_paging->ioas->mutex);
27 		list_del(&hwpt_paging->hwpt_item);
28 		mutex_unlock(&hwpt_paging->ioas->mutex);
29 
30 		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
31 					 hwpt_paging->common.domain);
32 	}
33 
34 	__iommufd_hwpt_destroy(&hwpt_paging->common);
35 	refcount_dec(&hwpt_paging->ioas->obj.users);
36 }
37 
iommufd_hwpt_paging_abort(struct iommufd_object * obj)38 void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
39 {
40 	struct iommufd_hwpt_paging *hwpt_paging =
41 		container_of(obj, struct iommufd_hwpt_paging, common.obj);
42 
43 	/* The ioas->mutex must be held until finalize is called. */
44 	lockdep_assert_held(&hwpt_paging->ioas->mutex);
45 
46 	if (!list_empty(&hwpt_paging->hwpt_item)) {
47 		list_del_init(&hwpt_paging->hwpt_item);
48 		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
49 					 hwpt_paging->common.domain);
50 	}
51 	iommufd_hwpt_paging_destroy(obj);
52 }
53 
iommufd_hwpt_nested_destroy(struct iommufd_object * obj)54 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
55 {
56 	struct iommufd_hwpt_nested *hwpt_nested =
57 		container_of(obj, struct iommufd_hwpt_nested, common.obj);
58 
59 	__iommufd_hwpt_destroy(&hwpt_nested->common);
60 	refcount_dec(&hwpt_nested->parent->common.obj.users);
61 }
62 
iommufd_hwpt_nested_abort(struct iommufd_object * obj)63 void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
64 {
65 	iommufd_hwpt_nested_destroy(obj);
66 }
67 
68 static int
iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging * hwpt_paging)69 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
70 {
71 	struct iommu_domain *paging_domain = hwpt_paging->common.domain;
72 
73 	if (hwpt_paging->enforce_cache_coherency)
74 		return 0;
75 
76 	if (paging_domain->ops->enforce_cache_coherency)
77 		hwpt_paging->enforce_cache_coherency =
78 			paging_domain->ops->enforce_cache_coherency(
79 				paging_domain);
80 	if (!hwpt_paging->enforce_cache_coherency)
81 		return -EINVAL;
82 	return 0;
83 }
84 
85 /**
86  * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
87  * @ictx: iommufd context
88  * @ioas: IOAS to associate the domain with
89  * @idev: Device to get an iommu_domain for
90  * @flags: Flags from userspace
91  * @immediate_attach: True if idev should be attached to the hwpt
92  * @user_data: The user provided driver specific data describing the domain to
93  *             create
94  *
95  * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
96  * will be linked to the given ioas and upon return the underlying iommu_domain
97  * is fully popoulated.
98  *
99  * The caller must hold the ioas->mutex until after
100  * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
101  * the returned hwpt.
102  */
103 struct iommufd_hwpt_paging *
iommufd_hwpt_paging_alloc(struct iommufd_ctx * ictx,struct iommufd_ioas * ioas,struct iommufd_device * idev,u32 flags,bool immediate_attach,const struct iommu_user_data * user_data)104 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
105 			  struct iommufd_device *idev, u32 flags,
106 			  bool immediate_attach,
107 			  const struct iommu_user_data *user_data)
108 {
109 	const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
110 				IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
111 	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
112 	struct iommufd_hwpt_paging *hwpt_paging;
113 	struct iommufd_hw_pagetable *hwpt;
114 	int rc;
115 
116 	lockdep_assert_held(&ioas->mutex);
117 
118 	if ((flags || user_data) && !ops->domain_alloc_user)
119 		return ERR_PTR(-EOPNOTSUPP);
120 	if (flags & ~valid_flags)
121 		return ERR_PTR(-EOPNOTSUPP);
122 	if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
123 	    !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
124 		return ERR_PTR(-EOPNOTSUPP);
125 
126 	hwpt_paging = __iommufd_object_alloc(
127 		ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
128 	if (IS_ERR(hwpt_paging))
129 		return ERR_CAST(hwpt_paging);
130 	hwpt = &hwpt_paging->common;
131 
132 	INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
133 	/* Pairs with iommufd_hw_pagetable_destroy() */
134 	refcount_inc(&ioas->obj.users);
135 	hwpt_paging->ioas = ioas;
136 	hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
137 
138 	if (ops->domain_alloc_user) {
139 		hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL,
140 						      user_data);
141 		if (IS_ERR(hwpt->domain)) {
142 			rc = PTR_ERR(hwpt->domain);
143 			hwpt->domain = NULL;
144 			goto out_abort;
145 		}
146 		hwpt->domain->owner = ops;
147 	} else {
148 		hwpt->domain = iommu_paging_domain_alloc(idev->dev);
149 		if (IS_ERR(hwpt->domain)) {
150 			rc = PTR_ERR(hwpt->domain);
151 			hwpt->domain = NULL;
152 			goto out_abort;
153 		}
154 	}
155 
156 	/*
157 	 * Set the coherency mode before we do iopt_table_add_domain() as some
158 	 * iommus have a per-PTE bit that controls it and need to decide before
159 	 * doing any maps. It is an iommu driver bug to report
160 	 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
161 	 * a new domain.
162 	 *
163 	 * The cache coherency mode must be configured here and unchanged later.
164 	 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
165 	 * reused by another device (either non-CC or CC). However, A HWPT (CC)
166 	 * created for a device (CC) cannot be reused by another device (non-CC)
167 	 * but only devices (CC). Instead user space in this case would need to
168 	 * allocate a separate HWPT (non-CC).
169 	 */
170 	if (idev->enforce_cache_coherency) {
171 		rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
172 		if (WARN_ON(rc))
173 			goto out_abort;
174 	}
175 
176 	/*
177 	 * immediate_attach exists only to accommodate iommu drivers that cannot
178 	 * directly allocate a domain. These drivers do not finish creating the
179 	 * domain until attach is completed. Thus we must have this call
180 	 * sequence. Once those drivers are fixed this should be removed.
181 	 */
182 	if (immediate_attach) {
183 		rc = iommufd_hw_pagetable_attach(hwpt, idev);
184 		if (rc)
185 			goto out_abort;
186 	}
187 
188 	rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
189 	if (rc)
190 		goto out_detach;
191 	list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
192 	return hwpt_paging;
193 
194 out_detach:
195 	if (immediate_attach)
196 		iommufd_hw_pagetable_detach(idev);
197 out_abort:
198 	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
199 	return ERR_PTR(rc);
200 }
201 
202 /**
203  * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
204  * @ictx: iommufd context
205  * @parent: Parent PAGING-type hwpt to associate the domain with
206  * @idev: Device to get an iommu_domain for
207  * @flags: Flags from userspace
208  * @user_data: user_data pointer. Must be valid
209  *
210  * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
211  * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
212  * being a parent.
213  */
214 static struct iommufd_hwpt_nested *
iommufd_hwpt_nested_alloc(struct iommufd_ctx * ictx,struct iommufd_hwpt_paging * parent,struct iommufd_device * idev,u32 flags,const struct iommu_user_data * user_data)215 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
216 			  struct iommufd_hwpt_paging *parent,
217 			  struct iommufd_device *idev, u32 flags,
218 			  const struct iommu_user_data *user_data)
219 {
220 	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
221 	struct iommufd_hwpt_nested *hwpt_nested;
222 	struct iommufd_hw_pagetable *hwpt;
223 	int rc;
224 
225 	if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
226 	    !user_data->len || !ops->domain_alloc_user)
227 		return ERR_PTR(-EOPNOTSUPP);
228 	if (parent->auto_domain || !parent->nest_parent)
229 		return ERR_PTR(-EINVAL);
230 
231 	hwpt_nested = __iommufd_object_alloc(
232 		ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
233 	if (IS_ERR(hwpt_nested))
234 		return ERR_CAST(hwpt_nested);
235 	hwpt = &hwpt_nested->common;
236 
237 	refcount_inc(&parent->common.obj.users);
238 	hwpt_nested->parent = parent;
239 
240 	hwpt->domain = ops->domain_alloc_user(idev->dev,
241 					      flags & ~IOMMU_HWPT_FAULT_ID_VALID,
242 					      parent->common.domain, user_data);
243 	if (IS_ERR(hwpt->domain)) {
244 		rc = PTR_ERR(hwpt->domain);
245 		hwpt->domain = NULL;
246 		goto out_abort;
247 	}
248 	hwpt->domain->owner = ops;
249 
250 	if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
251 			 !hwpt->domain->ops->cache_invalidate_user)) {
252 		rc = -EINVAL;
253 		goto out_abort;
254 	}
255 	return hwpt_nested;
256 
257 out_abort:
258 	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
259 	return ERR_PTR(rc);
260 }
261 
iommufd_hwpt_alloc(struct iommufd_ucmd * ucmd)262 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
263 {
264 	struct iommu_hwpt_alloc *cmd = ucmd->cmd;
265 	const struct iommu_user_data user_data = {
266 		.type = cmd->data_type,
267 		.uptr = u64_to_user_ptr(cmd->data_uptr),
268 		.len = cmd->data_len,
269 	};
270 	struct iommufd_hw_pagetable *hwpt;
271 	struct iommufd_ioas *ioas = NULL;
272 	struct iommufd_object *pt_obj;
273 	struct iommufd_device *idev;
274 	int rc;
275 
276 	if (cmd->__reserved)
277 		return -EOPNOTSUPP;
278 	if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
279 	    (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
280 		return -EINVAL;
281 
282 	idev = iommufd_get_device(ucmd, cmd->dev_id);
283 	if (IS_ERR(idev))
284 		return PTR_ERR(idev);
285 
286 	pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
287 	if (IS_ERR(pt_obj)) {
288 		rc = -EINVAL;
289 		goto out_put_idev;
290 	}
291 
292 	if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
293 		struct iommufd_hwpt_paging *hwpt_paging;
294 
295 		ioas = container_of(pt_obj, struct iommufd_ioas, obj);
296 		mutex_lock(&ioas->mutex);
297 		hwpt_paging = iommufd_hwpt_paging_alloc(
298 			ucmd->ictx, ioas, idev, cmd->flags, false,
299 			user_data.len ? &user_data : NULL);
300 		if (IS_ERR(hwpt_paging)) {
301 			rc = PTR_ERR(hwpt_paging);
302 			goto out_unlock;
303 		}
304 		hwpt = &hwpt_paging->common;
305 	} else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
306 		struct iommufd_hwpt_nested *hwpt_nested;
307 
308 		hwpt_nested = iommufd_hwpt_nested_alloc(
309 			ucmd->ictx,
310 			container_of(pt_obj, struct iommufd_hwpt_paging,
311 				     common.obj),
312 			idev, cmd->flags, &user_data);
313 		if (IS_ERR(hwpt_nested)) {
314 			rc = PTR_ERR(hwpt_nested);
315 			goto out_unlock;
316 		}
317 		hwpt = &hwpt_nested->common;
318 	} else {
319 		rc = -EINVAL;
320 		goto out_put_pt;
321 	}
322 
323 	if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) {
324 		struct iommufd_fault *fault;
325 
326 		fault = iommufd_get_fault(ucmd, cmd->fault_id);
327 		if (IS_ERR(fault)) {
328 			rc = PTR_ERR(fault);
329 			goto out_hwpt;
330 		}
331 		hwpt->fault = fault;
332 		hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
333 		hwpt->domain->fault_data = hwpt;
334 		refcount_inc(&fault->obj.users);
335 		iommufd_put_object(ucmd->ictx, &fault->obj);
336 	}
337 
338 	cmd->out_hwpt_id = hwpt->obj.id;
339 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
340 	if (rc)
341 		goto out_hwpt;
342 	iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
343 	goto out_unlock;
344 
345 out_hwpt:
346 	iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
347 out_unlock:
348 	if (ioas)
349 		mutex_unlock(&ioas->mutex);
350 out_put_pt:
351 	iommufd_put_object(ucmd->ictx, pt_obj);
352 out_put_idev:
353 	iommufd_put_object(ucmd->ictx, &idev->obj);
354 	return rc;
355 }
356 
iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd * ucmd)357 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
358 {
359 	struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
360 	struct iommufd_hwpt_paging *hwpt_paging;
361 	struct iommufd_ioas *ioas;
362 	int rc = -EOPNOTSUPP;
363 	bool enable;
364 
365 	if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
366 		return rc;
367 
368 	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
369 	if (IS_ERR(hwpt_paging))
370 		return PTR_ERR(hwpt_paging);
371 
372 	ioas = hwpt_paging->ioas;
373 	enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
374 
375 	rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
376 				     enable);
377 
378 	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
379 	return rc;
380 }
381 
iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd * ucmd)382 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
383 {
384 	struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
385 	struct iommufd_hwpt_paging *hwpt_paging;
386 	struct iommufd_ioas *ioas;
387 	int rc = -EOPNOTSUPP;
388 
389 	if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
390 	    cmd->__reserved)
391 		return -EOPNOTSUPP;
392 
393 	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
394 	if (IS_ERR(hwpt_paging))
395 		return PTR_ERR(hwpt_paging);
396 
397 	ioas = hwpt_paging->ioas;
398 	rc = iopt_read_and_clear_dirty_data(
399 		&ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
400 
401 	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
402 	return rc;
403 }
404 
iommufd_hwpt_invalidate(struct iommufd_ucmd * ucmd)405 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
406 {
407 	struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
408 	struct iommu_user_data_array data_array = {
409 		.type = cmd->data_type,
410 		.uptr = u64_to_user_ptr(cmd->data_uptr),
411 		.entry_len = cmd->entry_len,
412 		.entry_num = cmd->entry_num,
413 	};
414 	struct iommufd_hw_pagetable *hwpt;
415 	u32 done_num = 0;
416 	int rc;
417 
418 	if (cmd->__reserved) {
419 		rc = -EOPNOTSUPP;
420 		goto out;
421 	}
422 
423 	if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
424 		rc = -EINVAL;
425 		goto out;
426 	}
427 
428 	hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id);
429 	if (IS_ERR(hwpt)) {
430 		rc = PTR_ERR(hwpt);
431 		goto out;
432 	}
433 
434 	rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
435 						      &data_array);
436 	done_num = data_array.entry_num;
437 
438 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
439 out:
440 	cmd->entry_num = done_num;
441 	if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
442 		return -EFAULT;
443 	return rc;
444 }
445