1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/iommu.h>
6 #include <uapi/linux/iommufd.h>
7
8 #include "../iommu-priv.h"
9 #include "iommufd_private.h"
10
__iommufd_hwpt_destroy(struct iommufd_hw_pagetable * hwpt)11 static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
12 {
13 if (hwpt->domain)
14 iommu_domain_free(hwpt->domain);
15
16 if (hwpt->fault)
17 refcount_dec(&hwpt->fault->obj.users);
18 }
19
iommufd_hwpt_paging_destroy(struct iommufd_object * obj)20 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
21 {
22 struct iommufd_hwpt_paging *hwpt_paging =
23 container_of(obj, struct iommufd_hwpt_paging, common.obj);
24
25 if (!list_empty(&hwpt_paging->hwpt_item)) {
26 mutex_lock(&hwpt_paging->ioas->mutex);
27 list_del(&hwpt_paging->hwpt_item);
28 mutex_unlock(&hwpt_paging->ioas->mutex);
29
30 iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
31 hwpt_paging->common.domain);
32 }
33
34 __iommufd_hwpt_destroy(&hwpt_paging->common);
35 refcount_dec(&hwpt_paging->ioas->obj.users);
36 }
37
iommufd_hwpt_paging_abort(struct iommufd_object * obj)38 void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
39 {
40 struct iommufd_hwpt_paging *hwpt_paging =
41 container_of(obj, struct iommufd_hwpt_paging, common.obj);
42
43 /* The ioas->mutex must be held until finalize is called. */
44 lockdep_assert_held(&hwpt_paging->ioas->mutex);
45
46 if (!list_empty(&hwpt_paging->hwpt_item)) {
47 list_del_init(&hwpt_paging->hwpt_item);
48 iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
49 hwpt_paging->common.domain);
50 }
51 iommufd_hwpt_paging_destroy(obj);
52 }
53
iommufd_hwpt_nested_destroy(struct iommufd_object * obj)54 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
55 {
56 struct iommufd_hwpt_nested *hwpt_nested =
57 container_of(obj, struct iommufd_hwpt_nested, common.obj);
58
59 __iommufd_hwpt_destroy(&hwpt_nested->common);
60 if (hwpt_nested->viommu)
61 refcount_dec(&hwpt_nested->viommu->obj.users);
62 else
63 refcount_dec(&hwpt_nested->parent->common.obj.users);
64 }
65
iommufd_hwpt_nested_abort(struct iommufd_object * obj)66 void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
67 {
68 iommufd_hwpt_nested_destroy(obj);
69 }
70
71 static int
iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging * hwpt_paging)72 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
73 {
74 struct iommu_domain *paging_domain = hwpt_paging->common.domain;
75
76 if (hwpt_paging->enforce_cache_coherency)
77 return 0;
78
79 if (paging_domain->ops->enforce_cache_coherency)
80 hwpt_paging->enforce_cache_coherency =
81 paging_domain->ops->enforce_cache_coherency(
82 paging_domain);
83 if (!hwpt_paging->enforce_cache_coherency)
84 return -EINVAL;
85 return 0;
86 }
87
88 /**
89 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
90 * @ictx: iommufd context
91 * @ioas: IOAS to associate the domain with
92 * @idev: Device to get an iommu_domain for
93 * @flags: Flags from userspace
94 * @immediate_attach: True if idev should be attached to the hwpt
95 * @user_data: The user provided driver specific data describing the domain to
96 * create
97 *
98 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
99 * will be linked to the given ioas and upon return the underlying iommu_domain
100 * is fully popoulated.
101 *
102 * The caller must hold the ioas->mutex until after
103 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
104 * the returned hwpt.
105 */
106 struct iommufd_hwpt_paging *
iommufd_hwpt_paging_alloc(struct iommufd_ctx * ictx,struct iommufd_ioas * ioas,struct iommufd_device * idev,u32 flags,bool immediate_attach,const struct iommu_user_data * user_data)107 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
108 struct iommufd_device *idev, u32 flags,
109 bool immediate_attach,
110 const struct iommu_user_data *user_data)
111 {
112 const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
113 IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
114 IOMMU_HWPT_FAULT_ID_VALID;
115 const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
116 struct iommufd_hwpt_paging *hwpt_paging;
117 struct iommufd_hw_pagetable *hwpt;
118 int rc;
119
120 lockdep_assert_held(&ioas->mutex);
121
122 if ((flags || user_data) && !ops->domain_alloc_paging_flags)
123 return ERR_PTR(-EOPNOTSUPP);
124 if (flags & ~valid_flags)
125 return ERR_PTR(-EOPNOTSUPP);
126 if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
127 !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
128 return ERR_PTR(-EOPNOTSUPP);
129
130 hwpt_paging = __iommufd_object_alloc(
131 ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
132 if (IS_ERR(hwpt_paging))
133 return ERR_CAST(hwpt_paging);
134 hwpt = &hwpt_paging->common;
135
136 INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
137 /* Pairs with iommufd_hw_pagetable_destroy() */
138 refcount_inc(&ioas->obj.users);
139 hwpt_paging->ioas = ioas;
140 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
141
142 if (ops->domain_alloc_paging_flags) {
143 hwpt->domain = ops->domain_alloc_paging_flags(idev->dev,
144 flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
145 if (IS_ERR(hwpt->domain)) {
146 rc = PTR_ERR(hwpt->domain);
147 hwpt->domain = NULL;
148 goto out_abort;
149 }
150 hwpt->domain->owner = ops;
151 } else {
152 hwpt->domain = iommu_paging_domain_alloc(idev->dev);
153 if (IS_ERR(hwpt->domain)) {
154 rc = PTR_ERR(hwpt->domain);
155 hwpt->domain = NULL;
156 goto out_abort;
157 }
158 }
159 iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
160
161 /*
162 * Set the coherency mode before we do iopt_table_add_domain() as some
163 * iommus have a per-PTE bit that controls it and need to decide before
164 * doing any maps. It is an iommu driver bug to report
165 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
166 * a new domain.
167 *
168 * The cache coherency mode must be configured here and unchanged later.
169 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
170 * reused by another device (either non-CC or CC). However, A HWPT (CC)
171 * created for a device (CC) cannot be reused by another device (non-CC)
172 * but only devices (CC). Instead user space in this case would need to
173 * allocate a separate HWPT (non-CC).
174 */
175 if (idev->enforce_cache_coherency) {
176 rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
177 if (WARN_ON(rc))
178 goto out_abort;
179 }
180
181 /*
182 * immediate_attach exists only to accommodate iommu drivers that cannot
183 * directly allocate a domain. These drivers do not finish creating the
184 * domain until attach is completed. Thus we must have this call
185 * sequence. Once those drivers are fixed this should be removed.
186 */
187 if (immediate_attach) {
188 rc = iommufd_hw_pagetable_attach(hwpt, idev);
189 if (rc)
190 goto out_abort;
191 }
192
193 rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
194 if (rc)
195 goto out_detach;
196 list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
197 return hwpt_paging;
198
199 out_detach:
200 if (immediate_attach)
201 iommufd_hw_pagetable_detach(idev);
202 out_abort:
203 iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
204 return ERR_PTR(rc);
205 }
206
207 /**
208 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
209 * @ictx: iommufd context
210 * @parent: Parent PAGING-type hwpt to associate the domain with
211 * @idev: Device to get an iommu_domain for
212 * @flags: Flags from userspace
213 * @user_data: user_data pointer. Must be valid
214 *
215 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
216 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
217 * being a parent.
218 */
219 static struct iommufd_hwpt_nested *
iommufd_hwpt_nested_alloc(struct iommufd_ctx * ictx,struct iommufd_hwpt_paging * parent,struct iommufd_device * idev,u32 flags,const struct iommu_user_data * user_data)220 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
221 struct iommufd_hwpt_paging *parent,
222 struct iommufd_device *idev, u32 flags,
223 const struct iommu_user_data *user_data)
224 {
225 const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
226 struct iommufd_hwpt_nested *hwpt_nested;
227 struct iommufd_hw_pagetable *hwpt;
228 int rc;
229
230 if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
231 !user_data->len || !ops->domain_alloc_nested)
232 return ERR_PTR(-EOPNOTSUPP);
233 if (parent->auto_domain || !parent->nest_parent ||
234 parent->common.domain->owner != ops)
235 return ERR_PTR(-EINVAL);
236
237 hwpt_nested = __iommufd_object_alloc(
238 ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
239 if (IS_ERR(hwpt_nested))
240 return ERR_CAST(hwpt_nested);
241 hwpt = &hwpt_nested->common;
242
243 refcount_inc(&parent->common.obj.users);
244 hwpt_nested->parent = parent;
245
246 hwpt->domain = ops->domain_alloc_nested(
247 idev->dev, parent->common.domain,
248 flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
249 if (IS_ERR(hwpt->domain)) {
250 rc = PTR_ERR(hwpt->domain);
251 hwpt->domain = NULL;
252 goto out_abort;
253 }
254 hwpt->domain->owner = ops;
255 iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
256
257 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
258 rc = -EINVAL;
259 goto out_abort;
260 }
261 return hwpt_nested;
262
263 out_abort:
264 iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
265 return ERR_PTR(rc);
266 }
267
268 /**
269 * iommufd_viommu_alloc_hwpt_nested() - Get a hwpt_nested for a vIOMMU
270 * @viommu: vIOMMU ojbect to associate the hwpt_nested/domain with
271 * @flags: Flags from userspace
272 * @user_data: user_data pointer. Must be valid
273 *
274 * Allocate a new IOMMU_DOMAIN_NESTED for a vIOMMU and return it as a NESTED
275 * hw_pagetable.
276 */
277 static struct iommufd_hwpt_nested *
iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)278 iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
279 const struct iommu_user_data *user_data)
280 {
281 struct iommufd_hwpt_nested *hwpt_nested;
282 struct iommufd_hw_pagetable *hwpt;
283 int rc;
284
285 if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
286 return ERR_PTR(-EOPNOTSUPP);
287 if (!user_data->len)
288 return ERR_PTR(-EOPNOTSUPP);
289 if (!viommu->ops || !viommu->ops->alloc_domain_nested)
290 return ERR_PTR(-EOPNOTSUPP);
291
292 hwpt_nested = __iommufd_object_alloc(
293 viommu->ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
294 if (IS_ERR(hwpt_nested))
295 return ERR_CAST(hwpt_nested);
296 hwpt = &hwpt_nested->common;
297
298 hwpt_nested->viommu = viommu;
299 refcount_inc(&viommu->obj.users);
300 hwpt_nested->parent = viommu->hwpt;
301
302 hwpt->domain =
303 viommu->ops->alloc_domain_nested(viommu,
304 flags & ~IOMMU_HWPT_FAULT_ID_VALID,
305 user_data);
306 if (IS_ERR(hwpt->domain)) {
307 rc = PTR_ERR(hwpt->domain);
308 hwpt->domain = NULL;
309 goto out_abort;
310 }
311 hwpt->domain->owner = viommu->iommu_dev->ops;
312 iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
313
314 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
315 rc = -EINVAL;
316 goto out_abort;
317 }
318 return hwpt_nested;
319
320 out_abort:
321 iommufd_object_abort_and_destroy(viommu->ictx, &hwpt->obj);
322 return ERR_PTR(rc);
323 }
324
iommufd_hwpt_alloc(struct iommufd_ucmd * ucmd)325 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
326 {
327 struct iommu_hwpt_alloc *cmd = ucmd->cmd;
328 const struct iommu_user_data user_data = {
329 .type = cmd->data_type,
330 .uptr = u64_to_user_ptr(cmd->data_uptr),
331 .len = cmd->data_len,
332 };
333 struct iommufd_hw_pagetable *hwpt;
334 struct iommufd_ioas *ioas = NULL;
335 struct iommufd_object *pt_obj;
336 struct iommufd_device *idev;
337 int rc;
338
339 if (cmd->__reserved)
340 return -EOPNOTSUPP;
341 if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
342 (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
343 return -EINVAL;
344
345 idev = iommufd_get_device(ucmd, cmd->dev_id);
346 if (IS_ERR(idev))
347 return PTR_ERR(idev);
348
349 pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
350 if (IS_ERR(pt_obj)) {
351 rc = -EINVAL;
352 goto out_put_idev;
353 }
354
355 if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
356 struct iommufd_hwpt_paging *hwpt_paging;
357
358 ioas = container_of(pt_obj, struct iommufd_ioas, obj);
359 mutex_lock(&ioas->mutex);
360 hwpt_paging = iommufd_hwpt_paging_alloc(
361 ucmd->ictx, ioas, idev, cmd->flags, false,
362 user_data.len ? &user_data : NULL);
363 if (IS_ERR(hwpt_paging)) {
364 rc = PTR_ERR(hwpt_paging);
365 goto out_unlock;
366 }
367 hwpt = &hwpt_paging->common;
368 } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
369 struct iommufd_hwpt_nested *hwpt_nested;
370
371 hwpt_nested = iommufd_hwpt_nested_alloc(
372 ucmd->ictx,
373 container_of(pt_obj, struct iommufd_hwpt_paging,
374 common.obj),
375 idev, cmd->flags, &user_data);
376 if (IS_ERR(hwpt_nested)) {
377 rc = PTR_ERR(hwpt_nested);
378 goto out_unlock;
379 }
380 hwpt = &hwpt_nested->common;
381 } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
382 struct iommufd_hwpt_nested *hwpt_nested;
383 struct iommufd_viommu *viommu;
384
385 viommu = container_of(pt_obj, struct iommufd_viommu, obj);
386 if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) {
387 rc = -EINVAL;
388 goto out_unlock;
389 }
390 hwpt_nested = iommufd_viommu_alloc_hwpt_nested(
391 viommu, cmd->flags, &user_data);
392 if (IS_ERR(hwpt_nested)) {
393 rc = PTR_ERR(hwpt_nested);
394 goto out_unlock;
395 }
396 hwpt = &hwpt_nested->common;
397 } else {
398 rc = -EINVAL;
399 goto out_put_pt;
400 }
401
402 if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) {
403 struct iommufd_fault *fault;
404
405 fault = iommufd_get_fault(ucmd, cmd->fault_id);
406 if (IS_ERR(fault)) {
407 rc = PTR_ERR(fault);
408 goto out_hwpt;
409 }
410 hwpt->fault = fault;
411 hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
412 refcount_inc(&fault->obj.users);
413 iommufd_put_object(ucmd->ictx, &fault->obj);
414 }
415 hwpt->domain->iommufd_hwpt = hwpt;
416
417 cmd->out_hwpt_id = hwpt->obj.id;
418 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
419 if (rc)
420 goto out_hwpt;
421 iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
422 goto out_unlock;
423
424 out_hwpt:
425 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
426 out_unlock:
427 if (ioas)
428 mutex_unlock(&ioas->mutex);
429 out_put_pt:
430 iommufd_put_object(ucmd->ictx, pt_obj);
431 out_put_idev:
432 iommufd_put_object(ucmd->ictx, &idev->obj);
433 return rc;
434 }
435
iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd * ucmd)436 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
437 {
438 struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
439 struct iommufd_hwpt_paging *hwpt_paging;
440 struct iommufd_ioas *ioas;
441 int rc = -EOPNOTSUPP;
442 bool enable;
443
444 if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
445 return rc;
446
447 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
448 if (IS_ERR(hwpt_paging))
449 return PTR_ERR(hwpt_paging);
450
451 ioas = hwpt_paging->ioas;
452 enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
453
454 rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
455 enable);
456
457 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
458 return rc;
459 }
460
iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd * ucmd)461 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
462 {
463 struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
464 struct iommufd_hwpt_paging *hwpt_paging;
465 struct iommufd_ioas *ioas;
466 int rc = -EOPNOTSUPP;
467
468 if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
469 cmd->__reserved)
470 return -EOPNOTSUPP;
471
472 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
473 if (IS_ERR(hwpt_paging))
474 return PTR_ERR(hwpt_paging);
475
476 ioas = hwpt_paging->ioas;
477 rc = iopt_read_and_clear_dirty_data(
478 &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
479
480 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
481 return rc;
482 }
483
iommufd_hwpt_invalidate(struct iommufd_ucmd * ucmd)484 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
485 {
486 struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
487 struct iommu_user_data_array data_array = {
488 .type = cmd->data_type,
489 .uptr = u64_to_user_ptr(cmd->data_uptr),
490 .entry_len = cmd->entry_len,
491 .entry_num = cmd->entry_num,
492 };
493 struct iommufd_object *pt_obj;
494 u32 done_num = 0;
495 int rc;
496
497 if (cmd->__reserved) {
498 rc = -EOPNOTSUPP;
499 goto out;
500 }
501
502 if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
503 rc = -EINVAL;
504 goto out;
505 }
506
507 pt_obj = iommufd_get_object(ucmd->ictx, cmd->hwpt_id, IOMMUFD_OBJ_ANY);
508 if (IS_ERR(pt_obj)) {
509 rc = PTR_ERR(pt_obj);
510 goto out;
511 }
512 if (pt_obj->type == IOMMUFD_OBJ_HWPT_NESTED) {
513 struct iommufd_hw_pagetable *hwpt =
514 container_of(pt_obj, struct iommufd_hw_pagetable, obj);
515
516 if (!hwpt->domain->ops ||
517 !hwpt->domain->ops->cache_invalidate_user) {
518 rc = -EOPNOTSUPP;
519 goto out_put_pt;
520 }
521 rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
522 &data_array);
523 } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
524 struct iommufd_viommu *viommu =
525 container_of(pt_obj, struct iommufd_viommu, obj);
526
527 if (!viommu->ops || !viommu->ops->cache_invalidate) {
528 rc = -EOPNOTSUPP;
529 goto out_put_pt;
530 }
531 rc = viommu->ops->cache_invalidate(viommu, &data_array);
532 } else {
533 rc = -EINVAL;
534 goto out_put_pt;
535 }
536
537 done_num = data_array.entry_num;
538
539 out_put_pt:
540 iommufd_put_object(ucmd->ictx, pt_obj);
541 out:
542 cmd->entry_num = done_num;
543 if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
544 return -EFAULT;
545 return rc;
546 }
547