xref: /linux/drivers/iommu/iommufd/ioas.c (revision 056daec2925dc200b22c30419bc7b9e01f7843c4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/file.h>
6 #include <linux/interval_tree.h>
7 #include <linux/iommu.h>
8 #include <linux/iommufd.h>
9 #include <uapi/linux/iommufd.h>
10 
11 #include "io_pagetable.h"
12 
iommufd_ioas_destroy(struct iommufd_object * obj)13 void iommufd_ioas_destroy(struct iommufd_object *obj)
14 {
15 	struct iommufd_ioas *ioas = container_of(obj, struct iommufd_ioas, obj);
16 	int rc;
17 
18 	rc = iopt_unmap_all(&ioas->iopt, NULL);
19 	WARN_ON(rc && rc != -ENOENT);
20 	iopt_destroy_table(&ioas->iopt);
21 	mutex_destroy(&ioas->mutex);
22 }
23 
iommufd_ioas_alloc(struct iommufd_ctx * ictx)24 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx)
25 {
26 	struct iommufd_ioas *ioas;
27 
28 	ioas = iommufd_object_alloc(ictx, ioas, IOMMUFD_OBJ_IOAS);
29 	if (IS_ERR(ioas))
30 		return ioas;
31 
32 	iopt_init_table(&ioas->iopt);
33 	INIT_LIST_HEAD(&ioas->hwpt_list);
34 	mutex_init(&ioas->mutex);
35 	return ioas;
36 }
37 
iommufd_ioas_alloc_ioctl(struct iommufd_ucmd * ucmd)38 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd)
39 {
40 	struct iommu_ioas_alloc *cmd = ucmd->cmd;
41 	struct iommufd_ioas *ioas;
42 	int rc;
43 
44 	if (cmd->flags)
45 		return -EOPNOTSUPP;
46 
47 	ioas = iommufd_ioas_alloc(ucmd->ictx);
48 	if (IS_ERR(ioas))
49 		return PTR_ERR(ioas);
50 
51 	cmd->out_ioas_id = ioas->obj.id;
52 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
53 	if (rc)
54 		goto out_table;
55 
56 	down_read(&ucmd->ictx->ioas_creation_lock);
57 	iommufd_object_finalize(ucmd->ictx, &ioas->obj);
58 	up_read(&ucmd->ictx->ioas_creation_lock);
59 	return 0;
60 
61 out_table:
62 	iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj);
63 	return rc;
64 }
65 
iommufd_ioas_iova_ranges(struct iommufd_ucmd * ucmd)66 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd)
67 {
68 	struct iommu_iova_range __user *ranges;
69 	struct iommu_ioas_iova_ranges *cmd = ucmd->cmd;
70 	struct iommufd_ioas *ioas;
71 	struct interval_tree_span_iter span;
72 	u32 max_iovas;
73 	int rc;
74 
75 	if (cmd->__reserved)
76 		return -EOPNOTSUPP;
77 
78 	ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
79 	if (IS_ERR(ioas))
80 		return PTR_ERR(ioas);
81 
82 	down_read(&ioas->iopt.iova_rwsem);
83 	max_iovas = cmd->num_iovas;
84 	ranges = u64_to_user_ptr(cmd->allowed_iovas);
85 	cmd->num_iovas = 0;
86 	cmd->out_iova_alignment = ioas->iopt.iova_alignment;
87 	interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0,
88 				    ULONG_MAX) {
89 		if (!span.is_hole)
90 			continue;
91 		if (cmd->num_iovas < max_iovas) {
92 			struct iommu_iova_range elm = {
93 				.start = span.start_hole,
94 				.last = span.last_hole,
95 			};
96 
97 			if (copy_to_user(&ranges[cmd->num_iovas], &elm,
98 					 sizeof(elm))) {
99 				rc = -EFAULT;
100 				goto out_put;
101 			}
102 		}
103 		cmd->num_iovas++;
104 	}
105 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
106 	if (rc)
107 		goto out_put;
108 	if (cmd->num_iovas > max_iovas)
109 		rc = -EMSGSIZE;
110 out_put:
111 	up_read(&ioas->iopt.iova_rwsem);
112 	iommufd_put_object(ucmd->ictx, &ioas->obj);
113 	return rc;
114 }
115 
iommufd_ioas_load_iovas(struct rb_root_cached * itree,struct iommu_iova_range __user * ranges,u32 num)116 static int iommufd_ioas_load_iovas(struct rb_root_cached *itree,
117 				   struct iommu_iova_range __user *ranges,
118 				   u32 num)
119 {
120 	u32 i;
121 
122 	for (i = 0; i != num; i++) {
123 		struct iommu_iova_range range;
124 		struct iopt_allowed *allowed;
125 
126 		if (copy_from_user(&range, ranges + i, sizeof(range)))
127 			return -EFAULT;
128 
129 		if (range.start >= range.last)
130 			return -EINVAL;
131 
132 		if (interval_tree_iter_first(itree, range.start, range.last))
133 			return -EINVAL;
134 
135 		allowed = kzalloc(sizeof(*allowed), GFP_KERNEL_ACCOUNT);
136 		if (!allowed)
137 			return -ENOMEM;
138 		allowed->node.start = range.start;
139 		allowed->node.last = range.last;
140 
141 		interval_tree_insert(&allowed->node, itree);
142 	}
143 	return 0;
144 }
145 
iommufd_ioas_allow_iovas(struct iommufd_ucmd * ucmd)146 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd)
147 {
148 	struct iommu_ioas_allow_iovas *cmd = ucmd->cmd;
149 	struct rb_root_cached allowed_iova = RB_ROOT_CACHED;
150 	struct interval_tree_node *node;
151 	struct iommufd_ioas *ioas;
152 	struct io_pagetable *iopt;
153 	int rc = 0;
154 
155 	if (cmd->__reserved)
156 		return -EOPNOTSUPP;
157 
158 	ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
159 	if (IS_ERR(ioas))
160 		return PTR_ERR(ioas);
161 	iopt = &ioas->iopt;
162 
163 	rc = iommufd_ioas_load_iovas(&allowed_iova,
164 				     u64_to_user_ptr(cmd->allowed_iovas),
165 				     cmd->num_iovas);
166 	if (rc)
167 		goto out_free;
168 
169 	/*
170 	 * We want the allowed tree update to be atomic, so we have to keep the
171 	 * original nodes around, and keep track of the new nodes as we allocate
172 	 * memory for them. The simplest solution is to have a new/old tree and
173 	 * then swap new for old. On success we free the old tree, on failure we
174 	 * free the new tree.
175 	 */
176 	rc = iopt_set_allow_iova(iopt, &allowed_iova);
177 out_free:
178 	while ((node = interval_tree_iter_first(&allowed_iova, 0, ULONG_MAX))) {
179 		interval_tree_remove(node, &allowed_iova);
180 		kfree(container_of(node, struct iopt_allowed, node));
181 	}
182 	iommufd_put_object(ucmd->ictx, &ioas->obj);
183 	return rc;
184 }
185 
conv_iommu_prot(u32 map_flags)186 static int conv_iommu_prot(u32 map_flags)
187 {
188 	/*
189 	 * We provide no manual cache coherency ioctls to userspace and most
190 	 * architectures make the CPU ops for cache flushing privileged.
191 	 * Therefore we require the underlying IOMMU to support CPU coherent
192 	 * operation. Support for IOMMU_CACHE is enforced by the
193 	 * IOMMU_CAP_CACHE_COHERENCY test during bind.
194 	 */
195 	int iommu_prot = IOMMU_CACHE;
196 
197 	if (map_flags & IOMMU_IOAS_MAP_WRITEABLE)
198 		iommu_prot |= IOMMU_WRITE;
199 	if (map_flags & IOMMU_IOAS_MAP_READABLE)
200 		iommu_prot |= IOMMU_READ;
201 	return iommu_prot;
202 }
203 
iommufd_ioas_map_file(struct iommufd_ucmd * ucmd)204 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd)
205 {
206 	struct iommu_ioas_map_file *cmd = ucmd->cmd;
207 	unsigned long iova = cmd->iova;
208 	struct iommufd_ioas *ioas;
209 	unsigned int flags = 0;
210 	int rc;
211 
212 	if (cmd->flags &
213 	     ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
214 	       IOMMU_IOAS_MAP_READABLE))
215 		return -EOPNOTSUPP;
216 
217 	if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
218 		return -EOVERFLOW;
219 
220 	if (!(cmd->flags &
221 	      (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
222 		return -EINVAL;
223 
224 	ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
225 	if (IS_ERR(ioas))
226 		return PTR_ERR(ioas);
227 
228 	if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
229 		flags = IOPT_ALLOC_IOVA;
230 
231 	rc = iopt_map_file_pages(ucmd->ictx, &ioas->iopt, &iova, cmd->fd,
232 				 cmd->start, cmd->length,
233 				 conv_iommu_prot(cmd->flags), flags);
234 	if (rc)
235 		goto out_put;
236 
237 	cmd->iova = iova;
238 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
239 out_put:
240 	iommufd_put_object(ucmd->ictx, &ioas->obj);
241 	return rc;
242 }
243 
iommufd_ioas_map(struct iommufd_ucmd * ucmd)244 int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
245 {
246 	struct iommu_ioas_map *cmd = ucmd->cmd;
247 	unsigned long iova = cmd->iova;
248 	struct iommufd_ioas *ioas;
249 	unsigned int flags = 0;
250 	int rc;
251 
252 	if ((cmd->flags &
253 	     ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
254 	       IOMMU_IOAS_MAP_READABLE)) ||
255 	    cmd->__reserved)
256 		return -EOPNOTSUPP;
257 	if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
258 		return -EOVERFLOW;
259 
260 	if (!(cmd->flags &
261 	      (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
262 		return -EINVAL;
263 
264 	ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
265 	if (IS_ERR(ioas))
266 		return PTR_ERR(ioas);
267 
268 	if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
269 		flags = IOPT_ALLOC_IOVA;
270 	rc = iopt_map_user_pages(ucmd->ictx, &ioas->iopt, &iova,
271 				 u64_to_user_ptr(cmd->user_va), cmd->length,
272 				 conv_iommu_prot(cmd->flags), flags);
273 	if (rc)
274 		goto out_put;
275 
276 	cmd->iova = iova;
277 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
278 out_put:
279 	iommufd_put_object(ucmd->ictx, &ioas->obj);
280 	return rc;
281 }
282 
iommufd_ioas_copy(struct iommufd_ucmd * ucmd)283 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
284 {
285 	struct iommu_ioas_copy *cmd = ucmd->cmd;
286 	struct iommufd_ioas *src_ioas;
287 	struct iommufd_ioas *dst_ioas;
288 	unsigned int flags = 0;
289 	LIST_HEAD(pages_list);
290 	unsigned long iova;
291 	int rc;
292 
293 	iommufd_test_syz_conv_iova_id(ucmd, cmd->src_ioas_id, &cmd->src_iova,
294 				      &cmd->flags);
295 
296 	if ((cmd->flags &
297 	     ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
298 	       IOMMU_IOAS_MAP_READABLE)))
299 		return -EOPNOTSUPP;
300 	if (cmd->length >= ULONG_MAX || cmd->src_iova >= ULONG_MAX ||
301 	    cmd->dst_iova >= ULONG_MAX)
302 		return -EOVERFLOW;
303 
304 	if (!(cmd->flags &
305 	      (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
306 		return -EINVAL;
307 
308 	src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
309 	if (IS_ERR(src_ioas))
310 		return PTR_ERR(src_ioas);
311 	rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length,
312 			    &pages_list);
313 	iommufd_put_object(ucmd->ictx, &src_ioas->obj);
314 	if (rc)
315 		return rc;
316 
317 	dst_ioas = iommufd_get_ioas(ucmd->ictx, cmd->dst_ioas_id);
318 	if (IS_ERR(dst_ioas)) {
319 		rc = PTR_ERR(dst_ioas);
320 		goto out_pages;
321 	}
322 
323 	if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
324 		flags = IOPT_ALLOC_IOVA;
325 	iova = cmd->dst_iova;
326 	rc = iopt_map_pages(&dst_ioas->iopt, &pages_list, cmd->length, &iova,
327 			    conv_iommu_prot(cmd->flags), flags);
328 	if (rc)
329 		goto out_put_dst;
330 
331 	cmd->dst_iova = iova;
332 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
333 out_put_dst:
334 	iommufd_put_object(ucmd->ictx, &dst_ioas->obj);
335 out_pages:
336 	iopt_free_pages_list(&pages_list);
337 	return rc;
338 }
339 
iommufd_ioas_unmap(struct iommufd_ucmd * ucmd)340 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
341 {
342 	struct iommu_ioas_unmap *cmd = ucmd->cmd;
343 	struct iommufd_ioas *ioas;
344 	unsigned long unmapped = 0;
345 	int rc;
346 
347 	ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
348 	if (IS_ERR(ioas))
349 		return PTR_ERR(ioas);
350 
351 	if (cmd->iova == 0 && cmd->length == U64_MAX) {
352 		rc = iopt_unmap_all(&ioas->iopt, &unmapped);
353 		if (rc)
354 			goto out_put;
355 	} else {
356 		if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) {
357 			rc = -EOVERFLOW;
358 			goto out_put;
359 		}
360 		rc = iopt_unmap_iova(&ioas->iopt, cmd->iova, cmd->length,
361 				     &unmapped);
362 		if (rc)
363 			goto out_put;
364 		if (!unmapped) {
365 			rc = -ENOENT;
366 			goto out_put;
367 		}
368 	}
369 
370 	cmd->length = unmapped;
371 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
372 
373 out_put:
374 	iommufd_put_object(ucmd->ictx, &ioas->obj);
375 	return rc;
376 }
377 
iommufd_release_all_iova_rwsem(struct iommufd_ctx * ictx,struct xarray * ioas_list)378 static void iommufd_release_all_iova_rwsem(struct iommufd_ctx *ictx,
379 					   struct xarray *ioas_list)
380 {
381 	struct iommufd_ioas *ioas;
382 	unsigned long index;
383 
384 	xa_for_each(ioas_list, index, ioas) {
385 		up_write(&ioas->iopt.iova_rwsem);
386 		refcount_dec(&ioas->obj.users);
387 	}
388 	up_write(&ictx->ioas_creation_lock);
389 	xa_destroy(ioas_list);
390 }
391 
iommufd_take_all_iova_rwsem(struct iommufd_ctx * ictx,struct xarray * ioas_list)392 static int iommufd_take_all_iova_rwsem(struct iommufd_ctx *ictx,
393 				       struct xarray *ioas_list)
394 {
395 	struct iommufd_object *obj;
396 	unsigned long index;
397 	int rc;
398 
399 	/*
400 	 * This is very ugly, it is done instead of adding a lock around
401 	 * pages->source_mm, which is a performance path for mdev, we just
402 	 * obtain the write side of all the iova_rwsems which also protects the
403 	 * pages->source_*. Due to copies we can't know which IOAS could read
404 	 * from the pages, so we just lock everything. This is the only place
405 	 * locks are nested and they are uniformly taken in ID order.
406 	 *
407 	 * ioas_creation_lock prevents new IOAS from being installed in the
408 	 * xarray while we do this, and also prevents more than one thread from
409 	 * holding nested locks.
410 	 */
411 	down_write(&ictx->ioas_creation_lock);
412 	xa_lock(&ictx->objects);
413 	xa_for_each(&ictx->objects, index, obj) {
414 		struct iommufd_ioas *ioas;
415 
416 		if (!obj || obj->type != IOMMUFD_OBJ_IOAS)
417 			continue;
418 
419 		if (!refcount_inc_not_zero(&obj->users))
420 			continue;
421 
422 		xa_unlock(&ictx->objects);
423 
424 		ioas = container_of(obj, struct iommufd_ioas, obj);
425 		down_write_nest_lock(&ioas->iopt.iova_rwsem,
426 				     &ictx->ioas_creation_lock);
427 
428 		rc = xa_err(xa_store(ioas_list, index, ioas, GFP_KERNEL));
429 		if (rc) {
430 			iommufd_release_all_iova_rwsem(ictx, ioas_list);
431 			return rc;
432 		}
433 
434 		xa_lock(&ictx->objects);
435 	}
436 	xa_unlock(&ictx->objects);
437 	return 0;
438 }
439 
need_charge_update(struct iopt_pages * pages)440 static bool need_charge_update(struct iopt_pages *pages)
441 {
442 	switch (pages->account_mode) {
443 	case IOPT_PAGES_ACCOUNT_NONE:
444 		return false;
445 	case IOPT_PAGES_ACCOUNT_MM:
446 		return pages->source_mm != current->mm;
447 	case IOPT_PAGES_ACCOUNT_USER:
448 		/*
449 		 * Update when mm changes because it also accounts
450 		 * in mm->pinned_vm.
451 		 */
452 		return (pages->source_user != current_user()) ||
453 		       (pages->source_mm != current->mm);
454 	}
455 	return true;
456 }
457 
charge_current(unsigned long * npinned)458 static int charge_current(unsigned long *npinned)
459 {
460 	struct iopt_pages tmp = {
461 		.source_mm = current->mm,
462 		.source_task = current->group_leader,
463 		.source_user = current_user(),
464 	};
465 	unsigned int account_mode;
466 	int rc;
467 
468 	for (account_mode = 0; account_mode != IOPT_PAGES_ACCOUNT_MODE_NUM;
469 	     account_mode++) {
470 		if (!npinned[account_mode])
471 			continue;
472 
473 		tmp.account_mode = account_mode;
474 		rc = iopt_pages_update_pinned(&tmp, npinned[account_mode], true,
475 					      NULL);
476 		if (rc)
477 			goto err_undo;
478 	}
479 	return 0;
480 
481 err_undo:
482 	while (account_mode != 0) {
483 		account_mode--;
484 		if (!npinned[account_mode])
485 			continue;
486 		tmp.account_mode = account_mode;
487 		iopt_pages_update_pinned(&tmp, npinned[account_mode], false,
488 					 NULL);
489 	}
490 	return rc;
491 }
492 
change_mm(struct iopt_pages * pages)493 static void change_mm(struct iopt_pages *pages)
494 {
495 	struct task_struct *old_task = pages->source_task;
496 	struct user_struct *old_user = pages->source_user;
497 	struct mm_struct *old_mm = pages->source_mm;
498 
499 	pages->source_mm = current->mm;
500 	mmgrab(pages->source_mm);
501 	mmdrop(old_mm);
502 
503 	pages->source_task = current->group_leader;
504 	get_task_struct(pages->source_task);
505 	put_task_struct(old_task);
506 
507 	pages->source_user = get_uid(current_user());
508 	free_uid(old_user);
509 }
510 
511 #define for_each_ioas_area(_xa, _index, _ioas, _area) \
512 	xa_for_each((_xa), (_index), (_ioas)) \
513 		for (_area = iopt_area_iter_first(&_ioas->iopt, 0, ULONG_MAX); \
514 		     _area; \
515 		     _area = iopt_area_iter_next(_area, 0, ULONG_MAX))
516 
iommufd_ioas_change_process(struct iommufd_ucmd * ucmd)517 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd)
518 {
519 	struct iommu_ioas_change_process *cmd = ucmd->cmd;
520 	struct iommufd_ctx *ictx = ucmd->ictx;
521 	unsigned long all_npinned[IOPT_PAGES_ACCOUNT_MODE_NUM] = {};
522 	struct iommufd_ioas *ioas;
523 	struct iopt_area *area;
524 	struct iopt_pages *pages;
525 	struct xarray ioas_list;
526 	unsigned long index;
527 	int rc;
528 
529 	if (cmd->__reserved)
530 		return -EOPNOTSUPP;
531 
532 	xa_init(&ioas_list);
533 	rc = iommufd_take_all_iova_rwsem(ictx, &ioas_list);
534 	if (rc)
535 		return rc;
536 
537 	for_each_ioas_area(&ioas_list, index, ioas, area)  {
538 		if (area->pages->type != IOPT_ADDRESS_FILE) {
539 			rc = -EINVAL;
540 			goto out;
541 		}
542 	}
543 
544 	/*
545 	 * Count last_pinned pages, then clear it to avoid double counting
546 	 * if the same iopt_pages is visited multiple times in this loop.
547 	 * Since we are under all the locks, npinned == last_npinned, so we
548 	 * can easily restore last_npinned before we return.
549 	 */
550 	for_each_ioas_area(&ioas_list, index, ioas, area)  {
551 		pages = area->pages;
552 
553 		if (need_charge_update(pages)) {
554 			all_npinned[pages->account_mode] += pages->last_npinned;
555 			pages->last_npinned = 0;
556 		}
557 	}
558 
559 	rc = charge_current(all_npinned);
560 
561 	if (rc) {
562 		/* Charge failed.  Fix last_npinned and bail. */
563 		for_each_ioas_area(&ioas_list, index, ioas, area)
564 			area->pages->last_npinned = area->pages->npinned;
565 		goto out;
566 	}
567 
568 	for_each_ioas_area(&ioas_list, index, ioas, area) {
569 		pages = area->pages;
570 
571 		/* Uncharge the old one (which also restores last_npinned) */
572 		if (need_charge_update(pages)) {
573 			int r = iopt_pages_update_pinned(pages, pages->npinned,
574 							 false, NULL);
575 
576 			if (WARN_ON(r))
577 				rc = r;
578 		}
579 		change_mm(pages);
580 	}
581 
582 out:
583 	iommufd_release_all_iova_rwsem(ictx, &ioas_list);
584 	return rc;
585 }
586 
iommufd_option_rlimit_mode(struct iommu_option * cmd,struct iommufd_ctx * ictx)587 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
588 			       struct iommufd_ctx *ictx)
589 {
590 	if (cmd->object_id)
591 		return -EOPNOTSUPP;
592 
593 	if (cmd->op == IOMMU_OPTION_OP_GET) {
594 		cmd->val64 = ictx->account_mode == IOPT_PAGES_ACCOUNT_MM;
595 		return 0;
596 	}
597 	if (cmd->op == IOMMU_OPTION_OP_SET) {
598 		int rc = 0;
599 
600 		if (!capable(CAP_SYS_RESOURCE))
601 			return -EPERM;
602 
603 		xa_lock(&ictx->objects);
604 		if (!xa_empty(&ictx->objects)) {
605 			rc = -EBUSY;
606 		} else {
607 			if (cmd->val64 == 0)
608 				ictx->account_mode = IOPT_PAGES_ACCOUNT_USER;
609 			else if (cmd->val64 == 1)
610 				ictx->account_mode = IOPT_PAGES_ACCOUNT_MM;
611 			else
612 				rc = -EINVAL;
613 		}
614 		xa_unlock(&ictx->objects);
615 
616 		return rc;
617 	}
618 	return -EOPNOTSUPP;
619 }
620 
iommufd_ioas_option_huge_pages(struct iommu_option * cmd,struct iommufd_ioas * ioas)621 static int iommufd_ioas_option_huge_pages(struct iommu_option *cmd,
622 					  struct iommufd_ioas *ioas)
623 {
624 	if (cmd->op == IOMMU_OPTION_OP_GET) {
625 		cmd->val64 = !ioas->iopt.disable_large_pages;
626 		return 0;
627 	}
628 	if (cmd->op == IOMMU_OPTION_OP_SET) {
629 		if (cmd->val64 == 0)
630 			return iopt_disable_large_pages(&ioas->iopt);
631 		if (cmd->val64 == 1) {
632 			iopt_enable_large_pages(&ioas->iopt);
633 			return 0;
634 		}
635 		return -EINVAL;
636 	}
637 	return -EOPNOTSUPP;
638 }
639 
iommufd_ioas_option(struct iommufd_ucmd * ucmd)640 int iommufd_ioas_option(struct iommufd_ucmd *ucmd)
641 {
642 	struct iommu_option *cmd = ucmd->cmd;
643 	struct iommufd_ioas *ioas;
644 	int rc = 0;
645 
646 	if (cmd->__reserved)
647 		return -EOPNOTSUPP;
648 
649 	ioas = iommufd_get_ioas(ucmd->ictx, cmd->object_id);
650 	if (IS_ERR(ioas))
651 		return PTR_ERR(ioas);
652 
653 	switch (cmd->option_id) {
654 	case IOMMU_OPTION_HUGE_PAGES:
655 		rc = iommufd_ioas_option_huge_pages(cmd, ioas);
656 		break;
657 	default:
658 		rc = -EOPNOTSUPP;
659 	}
660 
661 	iommufd_put_object(ucmd->ictx, &ioas->obj);
662 	return rc;
663 }
664