1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/interval_tree.h>
6 #include <linux/iommu.h>
7 #include <linux/iommufd.h>
8 #include <uapi/linux/iommufd.h>
9
10 #include "io_pagetable.h"
11
iommufd_ioas_destroy(struct iommufd_object * obj)12 void iommufd_ioas_destroy(struct iommufd_object *obj)
13 {
14 struct iommufd_ioas *ioas = container_of(obj, struct iommufd_ioas, obj);
15 int rc;
16
17 rc = iopt_unmap_all(&ioas->iopt, NULL);
18 WARN_ON(rc && rc != -ENOENT);
19 iopt_destroy_table(&ioas->iopt);
20 mutex_destroy(&ioas->mutex);
21 }
22
iommufd_ioas_alloc(struct iommufd_ctx * ictx)23 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx)
24 {
25 struct iommufd_ioas *ioas;
26
27 ioas = iommufd_object_alloc(ictx, ioas, IOMMUFD_OBJ_IOAS);
28 if (IS_ERR(ioas))
29 return ioas;
30
31 iopt_init_table(&ioas->iopt);
32 INIT_LIST_HEAD(&ioas->hwpt_list);
33 mutex_init(&ioas->mutex);
34 return ioas;
35 }
36
iommufd_ioas_alloc_ioctl(struct iommufd_ucmd * ucmd)37 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd)
38 {
39 struct iommu_ioas_alloc *cmd = ucmd->cmd;
40 struct iommufd_ioas *ioas;
41 int rc;
42
43 if (cmd->flags)
44 return -EOPNOTSUPP;
45
46 ioas = iommufd_ioas_alloc(ucmd->ictx);
47 if (IS_ERR(ioas))
48 return PTR_ERR(ioas);
49
50 cmd->out_ioas_id = ioas->obj.id;
51 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
52 if (rc)
53 goto out_table;
54 iommufd_object_finalize(ucmd->ictx, &ioas->obj);
55 return 0;
56
57 out_table:
58 iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj);
59 return rc;
60 }
61
iommufd_ioas_iova_ranges(struct iommufd_ucmd * ucmd)62 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd)
63 {
64 struct iommu_iova_range __user *ranges;
65 struct iommu_ioas_iova_ranges *cmd = ucmd->cmd;
66 struct iommufd_ioas *ioas;
67 struct interval_tree_span_iter span;
68 u32 max_iovas;
69 int rc;
70
71 if (cmd->__reserved)
72 return -EOPNOTSUPP;
73
74 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
75 if (IS_ERR(ioas))
76 return PTR_ERR(ioas);
77
78 down_read(&ioas->iopt.iova_rwsem);
79 max_iovas = cmd->num_iovas;
80 ranges = u64_to_user_ptr(cmd->allowed_iovas);
81 cmd->num_iovas = 0;
82 cmd->out_iova_alignment = ioas->iopt.iova_alignment;
83 interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0,
84 ULONG_MAX) {
85 if (!span.is_hole)
86 continue;
87 if (cmd->num_iovas < max_iovas) {
88 struct iommu_iova_range elm = {
89 .start = span.start_hole,
90 .last = span.last_hole,
91 };
92
93 if (copy_to_user(&ranges[cmd->num_iovas], &elm,
94 sizeof(elm))) {
95 rc = -EFAULT;
96 goto out_put;
97 }
98 }
99 cmd->num_iovas++;
100 }
101 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
102 if (rc)
103 goto out_put;
104 if (cmd->num_iovas > max_iovas)
105 rc = -EMSGSIZE;
106 out_put:
107 up_read(&ioas->iopt.iova_rwsem);
108 iommufd_put_object(ucmd->ictx, &ioas->obj);
109 return rc;
110 }
111
iommufd_ioas_load_iovas(struct rb_root_cached * itree,struct iommu_iova_range __user * ranges,u32 num)112 static int iommufd_ioas_load_iovas(struct rb_root_cached *itree,
113 struct iommu_iova_range __user *ranges,
114 u32 num)
115 {
116 u32 i;
117
118 for (i = 0; i != num; i++) {
119 struct iommu_iova_range range;
120 struct iopt_allowed *allowed;
121
122 if (copy_from_user(&range, ranges + i, sizeof(range)))
123 return -EFAULT;
124
125 if (range.start >= range.last)
126 return -EINVAL;
127
128 if (interval_tree_iter_first(itree, range.start, range.last))
129 return -EINVAL;
130
131 allowed = kzalloc(sizeof(*allowed), GFP_KERNEL_ACCOUNT);
132 if (!allowed)
133 return -ENOMEM;
134 allowed->node.start = range.start;
135 allowed->node.last = range.last;
136
137 interval_tree_insert(&allowed->node, itree);
138 }
139 return 0;
140 }
141
iommufd_ioas_allow_iovas(struct iommufd_ucmd * ucmd)142 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd)
143 {
144 struct iommu_ioas_allow_iovas *cmd = ucmd->cmd;
145 struct rb_root_cached allowed_iova = RB_ROOT_CACHED;
146 struct interval_tree_node *node;
147 struct iommufd_ioas *ioas;
148 struct io_pagetable *iopt;
149 int rc = 0;
150
151 if (cmd->__reserved)
152 return -EOPNOTSUPP;
153
154 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
155 if (IS_ERR(ioas))
156 return PTR_ERR(ioas);
157 iopt = &ioas->iopt;
158
159 rc = iommufd_ioas_load_iovas(&allowed_iova,
160 u64_to_user_ptr(cmd->allowed_iovas),
161 cmd->num_iovas);
162 if (rc)
163 goto out_free;
164
165 /*
166 * We want the allowed tree update to be atomic, so we have to keep the
167 * original nodes around, and keep track of the new nodes as we allocate
168 * memory for them. The simplest solution is to have a new/old tree and
169 * then swap new for old. On success we free the old tree, on failure we
170 * free the new tree.
171 */
172 rc = iopt_set_allow_iova(iopt, &allowed_iova);
173 out_free:
174 while ((node = interval_tree_iter_first(&allowed_iova, 0, ULONG_MAX))) {
175 interval_tree_remove(node, &allowed_iova);
176 kfree(container_of(node, struct iopt_allowed, node));
177 }
178 iommufd_put_object(ucmd->ictx, &ioas->obj);
179 return rc;
180 }
181
conv_iommu_prot(u32 map_flags)182 static int conv_iommu_prot(u32 map_flags)
183 {
184 /*
185 * We provide no manual cache coherency ioctls to userspace and most
186 * architectures make the CPU ops for cache flushing privileged.
187 * Therefore we require the underlying IOMMU to support CPU coherent
188 * operation. Support for IOMMU_CACHE is enforced by the
189 * IOMMU_CAP_CACHE_COHERENCY test during bind.
190 */
191 int iommu_prot = IOMMU_CACHE;
192
193 if (map_flags & IOMMU_IOAS_MAP_WRITEABLE)
194 iommu_prot |= IOMMU_WRITE;
195 if (map_flags & IOMMU_IOAS_MAP_READABLE)
196 iommu_prot |= IOMMU_READ;
197 return iommu_prot;
198 }
199
iommufd_ioas_map(struct iommufd_ucmd * ucmd)200 int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
201 {
202 struct iommu_ioas_map *cmd = ucmd->cmd;
203 unsigned long iova = cmd->iova;
204 struct iommufd_ioas *ioas;
205 unsigned int flags = 0;
206 int rc;
207
208 if ((cmd->flags &
209 ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
210 IOMMU_IOAS_MAP_READABLE)) ||
211 cmd->__reserved)
212 return -EOPNOTSUPP;
213 if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
214 return -EOVERFLOW;
215
216 if (!(cmd->flags &
217 (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
218 return -EINVAL;
219
220 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
221 if (IS_ERR(ioas))
222 return PTR_ERR(ioas);
223
224 if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
225 flags = IOPT_ALLOC_IOVA;
226 rc = iopt_map_user_pages(ucmd->ictx, &ioas->iopt, &iova,
227 u64_to_user_ptr(cmd->user_va), cmd->length,
228 conv_iommu_prot(cmd->flags), flags);
229 if (rc)
230 goto out_put;
231
232 cmd->iova = iova;
233 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
234 out_put:
235 iommufd_put_object(ucmd->ictx, &ioas->obj);
236 return rc;
237 }
238
iommufd_ioas_copy(struct iommufd_ucmd * ucmd)239 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
240 {
241 struct iommu_ioas_copy *cmd = ucmd->cmd;
242 struct iommufd_ioas *src_ioas;
243 struct iommufd_ioas *dst_ioas;
244 unsigned int flags = 0;
245 LIST_HEAD(pages_list);
246 unsigned long iova;
247 int rc;
248
249 iommufd_test_syz_conv_iova_id(ucmd, cmd->src_ioas_id, &cmd->src_iova,
250 &cmd->flags);
251
252 if ((cmd->flags &
253 ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
254 IOMMU_IOAS_MAP_READABLE)))
255 return -EOPNOTSUPP;
256 if (cmd->length >= ULONG_MAX || cmd->src_iova >= ULONG_MAX ||
257 cmd->dst_iova >= ULONG_MAX)
258 return -EOVERFLOW;
259
260 if (!(cmd->flags &
261 (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
262 return -EINVAL;
263
264 src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
265 if (IS_ERR(src_ioas))
266 return PTR_ERR(src_ioas);
267 rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length,
268 &pages_list);
269 iommufd_put_object(ucmd->ictx, &src_ioas->obj);
270 if (rc)
271 return rc;
272
273 dst_ioas = iommufd_get_ioas(ucmd->ictx, cmd->dst_ioas_id);
274 if (IS_ERR(dst_ioas)) {
275 rc = PTR_ERR(dst_ioas);
276 goto out_pages;
277 }
278
279 if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
280 flags = IOPT_ALLOC_IOVA;
281 iova = cmd->dst_iova;
282 rc = iopt_map_pages(&dst_ioas->iopt, &pages_list, cmd->length, &iova,
283 conv_iommu_prot(cmd->flags), flags);
284 if (rc)
285 goto out_put_dst;
286
287 cmd->dst_iova = iova;
288 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
289 out_put_dst:
290 iommufd_put_object(ucmd->ictx, &dst_ioas->obj);
291 out_pages:
292 iopt_free_pages_list(&pages_list);
293 return rc;
294 }
295
iommufd_ioas_unmap(struct iommufd_ucmd * ucmd)296 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
297 {
298 struct iommu_ioas_unmap *cmd = ucmd->cmd;
299 struct iommufd_ioas *ioas;
300 unsigned long unmapped = 0;
301 int rc;
302
303 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
304 if (IS_ERR(ioas))
305 return PTR_ERR(ioas);
306
307 if (cmd->iova == 0 && cmd->length == U64_MAX) {
308 rc = iopt_unmap_all(&ioas->iopt, &unmapped);
309 if (rc)
310 goto out_put;
311 } else {
312 if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) {
313 rc = -EOVERFLOW;
314 goto out_put;
315 }
316 rc = iopt_unmap_iova(&ioas->iopt, cmd->iova, cmd->length,
317 &unmapped);
318 if (rc)
319 goto out_put;
320 }
321
322 cmd->length = unmapped;
323 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
324
325 out_put:
326 iommufd_put_object(ucmd->ictx, &ioas->obj);
327 return rc;
328 }
329
iommufd_option_rlimit_mode(struct iommu_option * cmd,struct iommufd_ctx * ictx)330 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
331 struct iommufd_ctx *ictx)
332 {
333 if (cmd->object_id)
334 return -EOPNOTSUPP;
335
336 if (cmd->op == IOMMU_OPTION_OP_GET) {
337 cmd->val64 = ictx->account_mode == IOPT_PAGES_ACCOUNT_MM;
338 return 0;
339 }
340 if (cmd->op == IOMMU_OPTION_OP_SET) {
341 int rc = 0;
342
343 if (!capable(CAP_SYS_RESOURCE))
344 return -EPERM;
345
346 xa_lock(&ictx->objects);
347 if (!xa_empty(&ictx->objects)) {
348 rc = -EBUSY;
349 } else {
350 if (cmd->val64 == 0)
351 ictx->account_mode = IOPT_PAGES_ACCOUNT_USER;
352 else if (cmd->val64 == 1)
353 ictx->account_mode = IOPT_PAGES_ACCOUNT_MM;
354 else
355 rc = -EINVAL;
356 }
357 xa_unlock(&ictx->objects);
358
359 return rc;
360 }
361 return -EOPNOTSUPP;
362 }
363
iommufd_ioas_option_huge_pages(struct iommu_option * cmd,struct iommufd_ioas * ioas)364 static int iommufd_ioas_option_huge_pages(struct iommu_option *cmd,
365 struct iommufd_ioas *ioas)
366 {
367 if (cmd->op == IOMMU_OPTION_OP_GET) {
368 cmd->val64 = !ioas->iopt.disable_large_pages;
369 return 0;
370 }
371 if (cmd->op == IOMMU_OPTION_OP_SET) {
372 if (cmd->val64 == 0)
373 return iopt_disable_large_pages(&ioas->iopt);
374 if (cmd->val64 == 1) {
375 iopt_enable_large_pages(&ioas->iopt);
376 return 0;
377 }
378 return -EINVAL;
379 }
380 return -EOPNOTSUPP;
381 }
382
iommufd_ioas_option(struct iommufd_ucmd * ucmd)383 int iommufd_ioas_option(struct iommufd_ucmd *ucmd)
384 {
385 struct iommu_option *cmd = ucmd->cmd;
386 struct iommufd_ioas *ioas;
387 int rc = 0;
388
389 if (cmd->__reserved)
390 return -EOPNOTSUPP;
391
392 ioas = iommufd_get_ioas(ucmd->ictx, cmd->object_id);
393 if (IS_ERR(ioas))
394 return PTR_ERR(ioas);
395
396 switch (cmd->option_id) {
397 case IOMMU_OPTION_HUGE_PAGES:
398 rc = iommufd_ioas_option_huge_pages(cmd, ioas);
399 break;
400 default:
401 rc = -EOPNOTSUPP;
402 }
403
404 iommufd_put_object(ucmd->ictx, &ioas->obj);
405 return rc;
406 }
407