xref: /linux/drivers/iommu/iommufd/hw_pagetable.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/iommu.h>
6 #include <uapi/linux/iommufd.h>
7 
8 #include "../iommu-priv.h"
9 #include "iommufd_private.h"
10 
11 static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
12 {
13 	if (hwpt->domain)
14 		iommu_domain_free(hwpt->domain);
15 
16 	if (hwpt->fault)
17 		refcount_dec(&hwpt->fault->obj.users);
18 }
19 
20 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
21 {
22 	struct iommufd_hwpt_paging *hwpt_paging =
23 		container_of(obj, struct iommufd_hwpt_paging, common.obj);
24 
25 	if (!list_empty(&hwpt_paging->hwpt_item)) {
26 		mutex_lock(&hwpt_paging->ioas->mutex);
27 		list_del(&hwpt_paging->hwpt_item);
28 		mutex_unlock(&hwpt_paging->ioas->mutex);
29 
30 		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
31 					 hwpt_paging->common.domain);
32 	}
33 
34 	__iommufd_hwpt_destroy(&hwpt_paging->common);
35 	refcount_dec(&hwpt_paging->ioas->obj.users);
36 }
37 
38 void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
39 {
40 	struct iommufd_hwpt_paging *hwpt_paging =
41 		container_of(obj, struct iommufd_hwpt_paging, common.obj);
42 
43 	/* The ioas->mutex must be held until finalize is called. */
44 	lockdep_assert_held(&hwpt_paging->ioas->mutex);
45 
46 	if (!list_empty(&hwpt_paging->hwpt_item)) {
47 		list_del_init(&hwpt_paging->hwpt_item);
48 		iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
49 					 hwpt_paging->common.domain);
50 	}
51 	iommufd_hwpt_paging_destroy(obj);
52 }
53 
54 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
55 {
56 	struct iommufd_hwpt_nested *hwpt_nested =
57 		container_of(obj, struct iommufd_hwpt_nested, common.obj);
58 
59 	__iommufd_hwpt_destroy(&hwpt_nested->common);
60 	if (hwpt_nested->viommu)
61 		refcount_dec(&hwpt_nested->viommu->obj.users);
62 	else
63 		refcount_dec(&hwpt_nested->parent->common.obj.users);
64 }
65 
66 void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
67 {
68 	iommufd_hwpt_nested_destroy(obj);
69 }
70 
71 static int
72 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
73 {
74 	struct iommu_domain *paging_domain = hwpt_paging->common.domain;
75 
76 	if (hwpt_paging->enforce_cache_coherency)
77 		return 0;
78 
79 	if (paging_domain->ops->enforce_cache_coherency)
80 		hwpt_paging->enforce_cache_coherency =
81 			paging_domain->ops->enforce_cache_coherency(
82 				paging_domain);
83 	if (!hwpt_paging->enforce_cache_coherency)
84 		return -EINVAL;
85 	return 0;
86 }
87 
88 /**
89  * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
90  * @ictx: iommufd context
91  * @ioas: IOAS to associate the domain with
92  * @idev: Device to get an iommu_domain for
93  * @flags: Flags from userspace
94  * @immediate_attach: True if idev should be attached to the hwpt
95  * @user_data: The user provided driver specific data describing the domain to
96  *             create
97  *
98  * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
99  * will be linked to the given ioas and upon return the underlying iommu_domain
100  * is fully popoulated.
101  *
102  * The caller must hold the ioas->mutex until after
103  * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
104  * the returned hwpt.
105  */
106 struct iommufd_hwpt_paging *
107 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
108 			  struct iommufd_device *idev, u32 flags,
109 			  bool immediate_attach,
110 			  const struct iommu_user_data *user_data)
111 {
112 	const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
113 				IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
114 				IOMMU_HWPT_FAULT_ID_VALID;
115 	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
116 	struct iommufd_hwpt_paging *hwpt_paging;
117 	struct iommufd_hw_pagetable *hwpt;
118 	int rc;
119 
120 	lockdep_assert_held(&ioas->mutex);
121 
122 	if ((flags || user_data) && !ops->domain_alloc_paging_flags)
123 		return ERR_PTR(-EOPNOTSUPP);
124 	if (flags & ~valid_flags)
125 		return ERR_PTR(-EOPNOTSUPP);
126 	if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
127 	    !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
128 		return ERR_PTR(-EOPNOTSUPP);
129 
130 	hwpt_paging = __iommufd_object_alloc(
131 		ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
132 	if (IS_ERR(hwpt_paging))
133 		return ERR_CAST(hwpt_paging);
134 	hwpt = &hwpt_paging->common;
135 
136 	INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
137 	/* Pairs with iommufd_hw_pagetable_destroy() */
138 	refcount_inc(&ioas->obj.users);
139 	hwpt_paging->ioas = ioas;
140 	hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
141 
142 	if (ops->domain_alloc_paging_flags) {
143 		hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags,
144 							      user_data);
145 		if (IS_ERR(hwpt->domain)) {
146 			rc = PTR_ERR(hwpt->domain);
147 			hwpt->domain = NULL;
148 			goto out_abort;
149 		}
150 		hwpt->domain->owner = ops;
151 	} else {
152 		hwpt->domain = iommu_paging_domain_alloc(idev->dev);
153 		if (IS_ERR(hwpt->domain)) {
154 			rc = PTR_ERR(hwpt->domain);
155 			hwpt->domain = NULL;
156 			goto out_abort;
157 		}
158 	}
159 
160 	/*
161 	 * Set the coherency mode before we do iopt_table_add_domain() as some
162 	 * iommus have a per-PTE bit that controls it and need to decide before
163 	 * doing any maps. It is an iommu driver bug to report
164 	 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
165 	 * a new domain.
166 	 *
167 	 * The cache coherency mode must be configured here and unchanged later.
168 	 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
169 	 * reused by another device (either non-CC or CC). However, A HWPT (CC)
170 	 * created for a device (CC) cannot be reused by another device (non-CC)
171 	 * but only devices (CC). Instead user space in this case would need to
172 	 * allocate a separate HWPT (non-CC).
173 	 */
174 	if (idev->enforce_cache_coherency) {
175 		rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
176 		if (WARN_ON(rc))
177 			goto out_abort;
178 	}
179 
180 	/*
181 	 * immediate_attach exists only to accommodate iommu drivers that cannot
182 	 * directly allocate a domain. These drivers do not finish creating the
183 	 * domain until attach is completed. Thus we must have this call
184 	 * sequence. Once those drivers are fixed this should be removed.
185 	 */
186 	if (immediate_attach) {
187 		rc = iommufd_hw_pagetable_attach(hwpt, idev);
188 		if (rc)
189 			goto out_abort;
190 	}
191 
192 	rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
193 	if (rc)
194 		goto out_detach;
195 	list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
196 	return hwpt_paging;
197 
198 out_detach:
199 	if (immediate_attach)
200 		iommufd_hw_pagetable_detach(idev);
201 out_abort:
202 	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
203 	return ERR_PTR(rc);
204 }
205 
206 /**
207  * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
208  * @ictx: iommufd context
209  * @parent: Parent PAGING-type hwpt to associate the domain with
210  * @idev: Device to get an iommu_domain for
211  * @flags: Flags from userspace
212  * @user_data: user_data pointer. Must be valid
213  *
214  * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
215  * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
216  * being a parent.
217  */
218 static struct iommufd_hwpt_nested *
219 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
220 			  struct iommufd_hwpt_paging *parent,
221 			  struct iommufd_device *idev, u32 flags,
222 			  const struct iommu_user_data *user_data)
223 {
224 	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
225 	struct iommufd_hwpt_nested *hwpt_nested;
226 	struct iommufd_hw_pagetable *hwpt;
227 	int rc;
228 
229 	if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
230 	    !user_data->len || !ops->domain_alloc_nested)
231 		return ERR_PTR(-EOPNOTSUPP);
232 	if (parent->auto_domain || !parent->nest_parent ||
233 	    parent->common.domain->owner != ops)
234 		return ERR_PTR(-EINVAL);
235 
236 	hwpt_nested = __iommufd_object_alloc(
237 		ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
238 	if (IS_ERR(hwpt_nested))
239 		return ERR_CAST(hwpt_nested);
240 	hwpt = &hwpt_nested->common;
241 
242 	refcount_inc(&parent->common.obj.users);
243 	hwpt_nested->parent = parent;
244 
245 	hwpt->domain = ops->domain_alloc_nested(
246 		idev->dev, parent->common.domain,
247 		flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
248 	if (IS_ERR(hwpt->domain)) {
249 		rc = PTR_ERR(hwpt->domain);
250 		hwpt->domain = NULL;
251 		goto out_abort;
252 	}
253 	hwpt->domain->owner = ops;
254 
255 	if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
256 		rc = -EINVAL;
257 		goto out_abort;
258 	}
259 	return hwpt_nested;
260 
261 out_abort:
262 	iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
263 	return ERR_PTR(rc);
264 }
265 
266 /**
267  * iommufd_viommu_alloc_hwpt_nested() - Get a hwpt_nested for a vIOMMU
268  * @viommu: vIOMMU ojbect to associate the hwpt_nested/domain with
269  * @flags: Flags from userspace
270  * @user_data: user_data pointer. Must be valid
271  *
272  * Allocate a new IOMMU_DOMAIN_NESTED for a vIOMMU and return it as a NESTED
273  * hw_pagetable.
274  */
275 static struct iommufd_hwpt_nested *
276 iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
277 				 const struct iommu_user_data *user_data)
278 {
279 	struct iommufd_hwpt_nested *hwpt_nested;
280 	struct iommufd_hw_pagetable *hwpt;
281 	int rc;
282 
283 	if (!user_data->len)
284 		return ERR_PTR(-EOPNOTSUPP);
285 	if (!viommu->ops || !viommu->ops->alloc_domain_nested)
286 		return ERR_PTR(-EOPNOTSUPP);
287 
288 	hwpt_nested = __iommufd_object_alloc(
289 		viommu->ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
290 	if (IS_ERR(hwpt_nested))
291 		return ERR_CAST(hwpt_nested);
292 	hwpt = &hwpt_nested->common;
293 
294 	hwpt_nested->viommu = viommu;
295 	refcount_inc(&viommu->obj.users);
296 	hwpt_nested->parent = viommu->hwpt;
297 
298 	hwpt->domain =
299 		viommu->ops->alloc_domain_nested(viommu, flags, user_data);
300 	if (IS_ERR(hwpt->domain)) {
301 		rc = PTR_ERR(hwpt->domain);
302 		hwpt->domain = NULL;
303 		goto out_abort;
304 	}
305 	hwpt->domain->owner = viommu->iommu_dev->ops;
306 
307 	if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
308 		rc = -EINVAL;
309 		goto out_abort;
310 	}
311 	return hwpt_nested;
312 
313 out_abort:
314 	iommufd_object_abort_and_destroy(viommu->ictx, &hwpt->obj);
315 	return ERR_PTR(rc);
316 }
317 
318 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
319 {
320 	struct iommu_hwpt_alloc *cmd = ucmd->cmd;
321 	const struct iommu_user_data user_data = {
322 		.type = cmd->data_type,
323 		.uptr = u64_to_user_ptr(cmd->data_uptr),
324 		.len = cmd->data_len,
325 	};
326 	struct iommufd_hw_pagetable *hwpt;
327 	struct iommufd_ioas *ioas = NULL;
328 	struct iommufd_object *pt_obj;
329 	struct iommufd_device *idev;
330 	int rc;
331 
332 	if (cmd->__reserved)
333 		return -EOPNOTSUPP;
334 	if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
335 	    (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
336 		return -EINVAL;
337 
338 	idev = iommufd_get_device(ucmd, cmd->dev_id);
339 	if (IS_ERR(idev))
340 		return PTR_ERR(idev);
341 
342 	pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
343 	if (IS_ERR(pt_obj)) {
344 		rc = -EINVAL;
345 		goto out_put_idev;
346 	}
347 
348 	if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
349 		struct iommufd_hwpt_paging *hwpt_paging;
350 
351 		ioas = container_of(pt_obj, struct iommufd_ioas, obj);
352 		mutex_lock(&ioas->mutex);
353 		hwpt_paging = iommufd_hwpt_paging_alloc(
354 			ucmd->ictx, ioas, idev, cmd->flags, false,
355 			user_data.len ? &user_data : NULL);
356 		if (IS_ERR(hwpt_paging)) {
357 			rc = PTR_ERR(hwpt_paging);
358 			goto out_unlock;
359 		}
360 		hwpt = &hwpt_paging->common;
361 	} else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
362 		struct iommufd_hwpt_nested *hwpt_nested;
363 
364 		hwpt_nested = iommufd_hwpt_nested_alloc(
365 			ucmd->ictx,
366 			container_of(pt_obj, struct iommufd_hwpt_paging,
367 				     common.obj),
368 			idev, cmd->flags, &user_data);
369 		if (IS_ERR(hwpt_nested)) {
370 			rc = PTR_ERR(hwpt_nested);
371 			goto out_unlock;
372 		}
373 		hwpt = &hwpt_nested->common;
374 	} else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
375 		struct iommufd_hwpt_nested *hwpt_nested;
376 		struct iommufd_viommu *viommu;
377 
378 		viommu = container_of(pt_obj, struct iommufd_viommu, obj);
379 		if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) {
380 			rc = -EINVAL;
381 			goto out_unlock;
382 		}
383 		hwpt_nested = iommufd_viommu_alloc_hwpt_nested(
384 			viommu, cmd->flags, &user_data);
385 		if (IS_ERR(hwpt_nested)) {
386 			rc = PTR_ERR(hwpt_nested);
387 			goto out_unlock;
388 		}
389 		hwpt = &hwpt_nested->common;
390 	} else {
391 		rc = -EINVAL;
392 		goto out_put_pt;
393 	}
394 
395 	if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) {
396 		struct iommufd_fault *fault;
397 
398 		fault = iommufd_get_fault(ucmd, cmd->fault_id);
399 		if (IS_ERR(fault)) {
400 			rc = PTR_ERR(fault);
401 			goto out_hwpt;
402 		}
403 		hwpt->fault = fault;
404 		hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
405 		hwpt->domain->fault_data = hwpt;
406 		refcount_inc(&fault->obj.users);
407 		iommufd_put_object(ucmd->ictx, &fault->obj);
408 	}
409 
410 	cmd->out_hwpt_id = hwpt->obj.id;
411 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
412 	if (rc)
413 		goto out_hwpt;
414 	iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
415 	goto out_unlock;
416 
417 out_hwpt:
418 	iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
419 out_unlock:
420 	if (ioas)
421 		mutex_unlock(&ioas->mutex);
422 out_put_pt:
423 	iommufd_put_object(ucmd->ictx, pt_obj);
424 out_put_idev:
425 	iommufd_put_object(ucmd->ictx, &idev->obj);
426 	return rc;
427 }
428 
429 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
430 {
431 	struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
432 	struct iommufd_hwpt_paging *hwpt_paging;
433 	struct iommufd_ioas *ioas;
434 	int rc = -EOPNOTSUPP;
435 	bool enable;
436 
437 	if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
438 		return rc;
439 
440 	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
441 	if (IS_ERR(hwpt_paging))
442 		return PTR_ERR(hwpt_paging);
443 
444 	ioas = hwpt_paging->ioas;
445 	enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
446 
447 	rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
448 				     enable);
449 
450 	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
451 	return rc;
452 }
453 
454 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
455 {
456 	struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
457 	struct iommufd_hwpt_paging *hwpt_paging;
458 	struct iommufd_ioas *ioas;
459 	int rc = -EOPNOTSUPP;
460 
461 	if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
462 	    cmd->__reserved)
463 		return -EOPNOTSUPP;
464 
465 	hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
466 	if (IS_ERR(hwpt_paging))
467 		return PTR_ERR(hwpt_paging);
468 
469 	ioas = hwpt_paging->ioas;
470 	rc = iopt_read_and_clear_dirty_data(
471 		&ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
472 
473 	iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
474 	return rc;
475 }
476 
477 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
478 {
479 	struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
480 	struct iommu_user_data_array data_array = {
481 		.type = cmd->data_type,
482 		.uptr = u64_to_user_ptr(cmd->data_uptr),
483 		.entry_len = cmd->entry_len,
484 		.entry_num = cmd->entry_num,
485 	};
486 	struct iommufd_object *pt_obj;
487 	u32 done_num = 0;
488 	int rc;
489 
490 	if (cmd->__reserved) {
491 		rc = -EOPNOTSUPP;
492 		goto out;
493 	}
494 
495 	if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
496 		rc = -EINVAL;
497 		goto out;
498 	}
499 
500 	pt_obj = iommufd_get_object(ucmd->ictx, cmd->hwpt_id, IOMMUFD_OBJ_ANY);
501 	if (IS_ERR(pt_obj)) {
502 		rc = PTR_ERR(pt_obj);
503 		goto out;
504 	}
505 	if (pt_obj->type == IOMMUFD_OBJ_HWPT_NESTED) {
506 		struct iommufd_hw_pagetable *hwpt =
507 			container_of(pt_obj, struct iommufd_hw_pagetable, obj);
508 
509 		if (!hwpt->domain->ops ||
510 		    !hwpt->domain->ops->cache_invalidate_user) {
511 			rc = -EOPNOTSUPP;
512 			goto out_put_pt;
513 		}
514 		rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
515 							      &data_array);
516 	} else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
517 		struct iommufd_viommu *viommu =
518 			container_of(pt_obj, struct iommufd_viommu, obj);
519 
520 		if (!viommu->ops || !viommu->ops->cache_invalidate) {
521 			rc = -EOPNOTSUPP;
522 			goto out_put_pt;
523 		}
524 		rc = viommu->ops->cache_invalidate(viommu, &data_array);
525 	} else {
526 		rc = -EINVAL;
527 		goto out_put_pt;
528 	}
529 
530 	done_num = data_array.entry_num;
531 
532 out_put_pt:
533 	iommufd_put_object(ucmd->ictx, pt_obj);
534 out:
535 	cmd->entry_num = done_num;
536 	if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
537 		return -EFAULT;
538 	return rc;
539 }
540