1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <stdlib.h>
4 #include <sys/mman.h>
5 #include <sys/eventfd.h>
6
7 #define __EXPORTED_HEADERS__
8 #include <linux/vfio.h>
9
10 #include "iommufd_utils.h"
11
12 static unsigned long HUGEPAGE_SIZE;
13
14 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
15 #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
16
get_huge_page_size(void)17 static unsigned long get_huge_page_size(void)
18 {
19 char buf[80];
20 int ret;
21 int fd;
22
23 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
24 O_RDONLY);
25 if (fd < 0)
26 return 2 * 1024 * 1024;
27
28 ret = read(fd, buf, sizeof(buf));
29 close(fd);
30 if (ret <= 0 || ret == sizeof(buf))
31 return 2 * 1024 * 1024;
32 buf[ret] = 0;
33 return strtoul(buf, NULL, 10);
34 }
35
setup_sizes(void)36 static __attribute__((constructor)) void setup_sizes(void)
37 {
38 void *vrc;
39 int rc;
40
41 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
42 HUGEPAGE_SIZE = get_huge_page_size();
43
44 BUFFER_SIZE = PAGE_SIZE * 16;
45 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
46 assert(!rc);
47 assert(buffer);
48 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
49 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
50 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
51 assert(vrc == buffer);
52 }
53
FIXTURE(iommufd)54 FIXTURE(iommufd)
55 {
56 int fd;
57 };
58
FIXTURE_SETUP(iommufd)59 FIXTURE_SETUP(iommufd)
60 {
61 self->fd = open("/dev/iommu", O_RDWR);
62 ASSERT_NE(-1, self->fd);
63 }
64
FIXTURE_TEARDOWN(iommufd)65 FIXTURE_TEARDOWN(iommufd)
66 {
67 teardown_iommufd(self->fd, _metadata);
68 }
69
TEST_F(iommufd,simple_close)70 TEST_F(iommufd, simple_close)
71 {
72 }
73
TEST_F(iommufd,cmd_fail)74 TEST_F(iommufd, cmd_fail)
75 {
76 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
77
78 /* object id is invalid */
79 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
80 /* Bad pointer */
81 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
82 /* Unknown ioctl */
83 EXPECT_ERRNO(ENOTTY,
84 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
85 &cmd));
86 }
87
TEST_F(iommufd,cmd_length)88 TEST_F(iommufd, cmd_length)
89 {
90 #define TEST_LENGTH(_struct, _ioctl, _last) \
91 { \
92 size_t min_size = offsetofend(struct _struct, _last); \
93 struct { \
94 struct _struct cmd; \
95 uint8_t extra; \
96 } cmd = { .cmd = { .size = min_size - 1 }, \
97 .extra = UINT8_MAX }; \
98 int old_errno; \
99 int rc; \
100 \
101 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
102 cmd.cmd.size = sizeof(struct _struct) + 1; \
103 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
104 cmd.cmd.size = sizeof(struct _struct); \
105 rc = ioctl(self->fd, _ioctl, &cmd); \
106 old_errno = errno; \
107 cmd.cmd.size = sizeof(struct _struct) + 1; \
108 cmd.extra = 0; \
109 if (rc) { \
110 EXPECT_ERRNO(old_errno, \
111 ioctl(self->fd, _ioctl, &cmd)); \
112 } else { \
113 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
114 } \
115 }
116
117 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
118 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
119 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
120 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
121 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
122 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
123 out_iova_alignment);
124 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
125 allowed_iovas);
126 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
127 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
128 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
129 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
130 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
131 #undef TEST_LENGTH
132 }
133
TEST_F(iommufd,cmd_ex_fail)134 TEST_F(iommufd, cmd_ex_fail)
135 {
136 struct {
137 struct iommu_destroy cmd;
138 __u64 future;
139 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
140
141 /* object id is invalid and command is longer */
142 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
143 /* future area is non-zero */
144 cmd.future = 1;
145 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
146 /* Original command "works" */
147 cmd.cmd.size = sizeof(cmd.cmd);
148 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
149 /* Short command fails */
150 cmd.cmd.size = sizeof(cmd.cmd) - 1;
151 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
152 }
153
TEST_F(iommufd,global_options)154 TEST_F(iommufd, global_options)
155 {
156 struct iommu_option cmd = {
157 .size = sizeof(cmd),
158 .option_id = IOMMU_OPTION_RLIMIT_MODE,
159 .op = IOMMU_OPTION_OP_GET,
160 .val64 = 1,
161 };
162
163 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
164 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
165 ASSERT_EQ(0, cmd.val64);
166
167 /* This requires root */
168 cmd.op = IOMMU_OPTION_OP_SET;
169 cmd.val64 = 1;
170 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
171 cmd.val64 = 2;
172 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
173
174 cmd.op = IOMMU_OPTION_OP_GET;
175 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
176 ASSERT_EQ(1, cmd.val64);
177
178 cmd.op = IOMMU_OPTION_OP_SET;
179 cmd.val64 = 0;
180 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
181
182 cmd.op = IOMMU_OPTION_OP_GET;
183 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
184 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
185 cmd.op = IOMMU_OPTION_OP_SET;
186 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
187 }
188
FIXTURE(iommufd_ioas)189 FIXTURE(iommufd_ioas)
190 {
191 int fd;
192 uint32_t ioas_id;
193 uint32_t stdev_id;
194 uint32_t hwpt_id;
195 uint32_t device_id;
196 uint64_t base_iova;
197 };
198
FIXTURE_VARIANT(iommufd_ioas)199 FIXTURE_VARIANT(iommufd_ioas)
200 {
201 unsigned int mock_domains;
202 unsigned int memory_limit;
203 };
204
FIXTURE_SETUP(iommufd_ioas)205 FIXTURE_SETUP(iommufd_ioas)
206 {
207 unsigned int i;
208
209
210 self->fd = open("/dev/iommu", O_RDWR);
211 ASSERT_NE(-1, self->fd);
212 test_ioctl_ioas_alloc(&self->ioas_id);
213
214 if (!variant->memory_limit) {
215 test_ioctl_set_default_memory_limit();
216 } else {
217 test_ioctl_set_temp_memory_limit(variant->memory_limit);
218 }
219
220 for (i = 0; i != variant->mock_domains; i++) {
221 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
222 &self->hwpt_id, &self->device_id);
223 self->base_iova = MOCK_APERTURE_START;
224 }
225 }
226
FIXTURE_TEARDOWN(iommufd_ioas)227 FIXTURE_TEARDOWN(iommufd_ioas)
228 {
229 test_ioctl_set_default_memory_limit();
230 teardown_iommufd(self->fd, _metadata);
231 }
232
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)233 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
234 {
235 };
236
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)237 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
238 {
239 .mock_domains = 1,
240 };
241
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)242 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
243 {
244 .mock_domains = 2,
245 };
246
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)247 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
248 {
249 .mock_domains = 1,
250 .memory_limit = 16,
251 };
252
TEST_F(iommufd_ioas,ioas_auto_destroy)253 TEST_F(iommufd_ioas, ioas_auto_destroy)
254 {
255 }
256
TEST_F(iommufd_ioas,ioas_destroy)257 TEST_F(iommufd_ioas, ioas_destroy)
258 {
259 if (self->stdev_id) {
260 /* IOAS cannot be freed while a device has a HWPT using it */
261 EXPECT_ERRNO(EBUSY,
262 _test_ioctl_destroy(self->fd, self->ioas_id));
263 } else {
264 /* Can allocate and manually free an IOAS table */
265 test_ioctl_destroy(self->ioas_id);
266 }
267 }
268
TEST_F(iommufd_ioas,alloc_hwpt_nested)269 TEST_F(iommufd_ioas, alloc_hwpt_nested)
270 {
271 const uint32_t min_data_len =
272 offsetofend(struct iommu_hwpt_selftest, iotlb);
273 struct iommu_hwpt_selftest data = {
274 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
275 };
276 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
277 uint32_t nested_hwpt_id[2] = {};
278 uint32_t num_inv;
279 uint32_t parent_hwpt_id = 0;
280 uint32_t parent_hwpt_id_not_work = 0;
281 uint32_t test_hwpt_id = 0;
282 uint32_t iopf_hwpt_id;
283 uint32_t fault_id;
284 uint32_t fault_fd;
285
286 if (self->device_id) {
287 /* Negative tests */
288 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
289 &test_hwpt_id);
290 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
291 &test_hwpt_id);
292
293 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
294 IOMMU_HWPT_ALLOC_NEST_PARENT,
295 &parent_hwpt_id);
296
297 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
298 &parent_hwpt_id_not_work);
299
300 /* Negative nested tests */
301 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
302 parent_hwpt_id, 0,
303 &nested_hwpt_id[0],
304 IOMMU_HWPT_DATA_NONE, &data,
305 sizeof(data));
306 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
307 parent_hwpt_id, 0,
308 &nested_hwpt_id[0],
309 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
310 sizeof(data));
311 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
312 parent_hwpt_id, 0,
313 &nested_hwpt_id[0],
314 IOMMU_HWPT_DATA_SELFTEST, &data,
315 min_data_len - 1);
316 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
317 parent_hwpt_id, 0,
318 &nested_hwpt_id[0],
319 IOMMU_HWPT_DATA_SELFTEST, NULL,
320 sizeof(data));
321 test_err_hwpt_alloc_nested(
322 EOPNOTSUPP, self->device_id, parent_hwpt_id,
323 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
324 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
325 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
326 parent_hwpt_id_not_work, 0,
327 &nested_hwpt_id[0],
328 IOMMU_HWPT_DATA_SELFTEST, &data,
329 sizeof(data));
330
331 /* Allocate two nested hwpts sharing one common parent hwpt */
332 test_ioctl_fault_alloc(&fault_id, &fault_fd);
333 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
334 &nested_hwpt_id[0],
335 IOMMU_HWPT_DATA_SELFTEST, &data,
336 sizeof(data));
337 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
338 &nested_hwpt_id[1],
339 IOMMU_HWPT_DATA_SELFTEST, &data,
340 sizeof(data));
341 test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
342 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
343 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
344 &data, sizeof(data));
345 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
346 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
347 IOMMU_HWPT_DATA_SELFTEST, &data,
348 sizeof(data));
349 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
350 IOMMU_TEST_IOTLB_DEFAULT);
351 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
352 IOMMU_TEST_IOTLB_DEFAULT);
353
354 /* Negative test: a nested hwpt on top of a nested hwpt */
355 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
356 nested_hwpt_id[0], 0, &test_hwpt_id,
357 IOMMU_HWPT_DATA_SELFTEST, &data,
358 sizeof(data));
359 /* Negative test: parent hwpt now cannot be freed */
360 EXPECT_ERRNO(EBUSY,
361 _test_ioctl_destroy(self->fd, parent_hwpt_id));
362
363 /* hwpt_invalidate only supports a user-managed hwpt (nested) */
364 num_inv = 1;
365 test_err_hwpt_invalidate(ENOENT, parent_hwpt_id, inv_reqs,
366 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
367 sizeof(*inv_reqs), &num_inv);
368 assert(!num_inv);
369
370 /* Check data_type by passing zero-length array */
371 num_inv = 0;
372 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
373 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
374 sizeof(*inv_reqs), &num_inv);
375 assert(!num_inv);
376
377 /* Negative test: Invalid data_type */
378 num_inv = 1;
379 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
380 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
381 sizeof(*inv_reqs), &num_inv);
382 assert(!num_inv);
383
384 /* Negative test: structure size sanity */
385 num_inv = 1;
386 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
387 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
388 sizeof(*inv_reqs) + 1, &num_inv);
389 assert(!num_inv);
390
391 num_inv = 1;
392 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
393 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
394 1, &num_inv);
395 assert(!num_inv);
396
397 /* Negative test: invalid flag is passed */
398 num_inv = 1;
399 inv_reqs[0].flags = 0xffffffff;
400 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
401 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
402 sizeof(*inv_reqs), &num_inv);
403 assert(!num_inv);
404
405 /* Negative test: invalid data_uptr when array is not empty */
406 num_inv = 1;
407 inv_reqs[0].flags = 0;
408 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
409 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
410 sizeof(*inv_reqs), &num_inv);
411 assert(!num_inv);
412
413 /* Negative test: invalid entry_len when array is not empty */
414 num_inv = 1;
415 inv_reqs[0].flags = 0;
416 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
417 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
418 0, &num_inv);
419 assert(!num_inv);
420
421 /* Negative test: invalid iotlb_id */
422 num_inv = 1;
423 inv_reqs[0].flags = 0;
424 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
425 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
426 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
427 sizeof(*inv_reqs), &num_inv);
428 assert(!num_inv);
429
430 /*
431 * Invalidate the 1st iotlb entry but fail the 2nd request
432 * due to invalid flags configuration in the 2nd request.
433 */
434 num_inv = 2;
435 inv_reqs[0].flags = 0;
436 inv_reqs[0].iotlb_id = 0;
437 inv_reqs[1].flags = 0xffffffff;
438 inv_reqs[1].iotlb_id = 1;
439 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
440 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
441 sizeof(*inv_reqs), &num_inv);
442 assert(num_inv == 1);
443 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
444 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
445 IOMMU_TEST_IOTLB_DEFAULT);
446 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
447 IOMMU_TEST_IOTLB_DEFAULT);
448 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
449 IOMMU_TEST_IOTLB_DEFAULT);
450
451 /*
452 * Invalidate the 1st iotlb entry but fail the 2nd request
453 * due to invalid iotlb_id configuration in the 2nd request.
454 */
455 num_inv = 2;
456 inv_reqs[0].flags = 0;
457 inv_reqs[0].iotlb_id = 0;
458 inv_reqs[1].flags = 0;
459 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
460 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
461 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
462 sizeof(*inv_reqs), &num_inv);
463 assert(num_inv == 1);
464 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
465 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
466 IOMMU_TEST_IOTLB_DEFAULT);
467 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
468 IOMMU_TEST_IOTLB_DEFAULT);
469 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
470 IOMMU_TEST_IOTLB_DEFAULT);
471
472 /* Invalidate the 2nd iotlb entry and verify */
473 num_inv = 1;
474 inv_reqs[0].flags = 0;
475 inv_reqs[0].iotlb_id = 1;
476 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
477 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
478 sizeof(*inv_reqs), &num_inv);
479 assert(num_inv == 1);
480 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
481 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
482 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
483 IOMMU_TEST_IOTLB_DEFAULT);
484 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
485 IOMMU_TEST_IOTLB_DEFAULT);
486
487 /* Invalidate the 3rd and 4th iotlb entries and verify */
488 num_inv = 2;
489 inv_reqs[0].flags = 0;
490 inv_reqs[0].iotlb_id = 2;
491 inv_reqs[1].flags = 0;
492 inv_reqs[1].iotlb_id = 3;
493 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
494 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
495 sizeof(*inv_reqs), &num_inv);
496 assert(num_inv == 2);
497 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
498
499 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
500 num_inv = 1;
501 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
502 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
503 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
504 sizeof(*inv_reqs), &num_inv);
505 assert(num_inv == 1);
506 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
507
508 /* Attach device to nested_hwpt_id[0] that then will be busy */
509 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
510 EXPECT_ERRNO(EBUSY,
511 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
512
513 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
514 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
515 EXPECT_ERRNO(EBUSY,
516 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
517 test_ioctl_destroy(nested_hwpt_id[0]);
518
519 /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
520 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
521 EXPECT_ERRNO(EBUSY,
522 _test_ioctl_destroy(self->fd, iopf_hwpt_id));
523 /* Trigger an IOPF on the device */
524 test_cmd_trigger_iopf(self->device_id, fault_fd);
525
526 /* Detach from nested_hwpt_id[1] and destroy it */
527 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
528 test_ioctl_destroy(nested_hwpt_id[1]);
529 test_ioctl_destroy(iopf_hwpt_id);
530
531 /* Detach from the parent hw_pagetable and destroy it */
532 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
533 test_ioctl_destroy(parent_hwpt_id);
534 test_ioctl_destroy(parent_hwpt_id_not_work);
535 close(fault_fd);
536 test_ioctl_destroy(fault_id);
537 } else {
538 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
539 &parent_hwpt_id);
540 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
541 parent_hwpt_id, 0,
542 &nested_hwpt_id[0],
543 IOMMU_HWPT_DATA_SELFTEST, &data,
544 sizeof(data));
545 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
546 parent_hwpt_id, 0,
547 &nested_hwpt_id[1],
548 IOMMU_HWPT_DATA_SELFTEST, &data,
549 sizeof(data));
550 test_err_mock_domain_replace(ENOENT, self->stdev_id,
551 nested_hwpt_id[0]);
552 test_err_mock_domain_replace(ENOENT, self->stdev_id,
553 nested_hwpt_id[1]);
554 }
555 }
556
TEST_F(iommufd_ioas,hwpt_attach)557 TEST_F(iommufd_ioas, hwpt_attach)
558 {
559 /* Create a device attached directly to a hwpt */
560 if (self->stdev_id) {
561 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
562 } else {
563 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
564 }
565 }
566
TEST_F(iommufd_ioas,ioas_area_destroy)567 TEST_F(iommufd_ioas, ioas_area_destroy)
568 {
569 /* Adding an area does not change ability to destroy */
570 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
571 if (self->stdev_id)
572 EXPECT_ERRNO(EBUSY,
573 _test_ioctl_destroy(self->fd, self->ioas_id));
574 else
575 test_ioctl_destroy(self->ioas_id);
576 }
577
TEST_F(iommufd_ioas,ioas_area_auto_destroy)578 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
579 {
580 int i;
581
582 /* Can allocate and automatically free an IOAS table with many areas */
583 for (i = 0; i != 10; i++) {
584 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
585 self->base_iova + i * PAGE_SIZE);
586 }
587 }
588
TEST_F(iommufd_ioas,get_hw_info)589 TEST_F(iommufd_ioas, get_hw_info)
590 {
591 struct iommu_test_hw_info buffer_exact;
592 struct iommu_test_hw_info_buffer_larger {
593 struct iommu_test_hw_info info;
594 uint64_t trailing_bytes;
595 } buffer_larger;
596 struct iommu_test_hw_info_buffer_smaller {
597 __u32 flags;
598 } buffer_smaller;
599
600 if (self->device_id) {
601 /* Provide a zero-size user_buffer */
602 test_cmd_get_hw_info(self->device_id, NULL, 0);
603 /* Provide a user_buffer with exact size */
604 test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
605 /*
606 * Provide a user_buffer with size larger than the exact size to check if
607 * kernel zero the trailing bytes.
608 */
609 test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
610 /*
611 * Provide a user_buffer with size smaller than the exact size to check if
612 * the fields within the size range still gets updated.
613 */
614 test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
615 } else {
616 test_err_get_hw_info(ENOENT, self->device_id,
617 &buffer_exact, sizeof(buffer_exact));
618 test_err_get_hw_info(ENOENT, self->device_id,
619 &buffer_larger, sizeof(buffer_larger));
620 }
621 }
622
TEST_F(iommufd_ioas,area)623 TEST_F(iommufd_ioas, area)
624 {
625 int i;
626
627 /* Unmap fails if nothing is mapped */
628 for (i = 0; i != 10; i++)
629 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
630
631 /* Unmap works */
632 for (i = 0; i != 10; i++)
633 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
634 self->base_iova + i * PAGE_SIZE);
635 for (i = 0; i != 10; i++)
636 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
637 PAGE_SIZE);
638
639 /* Split fails */
640 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
641 self->base_iova + 16 * PAGE_SIZE);
642 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
643 PAGE_SIZE);
644 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
645 PAGE_SIZE);
646
647 /* Over map fails */
648 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
649 self->base_iova + 16 * PAGE_SIZE);
650 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
651 self->base_iova + 16 * PAGE_SIZE);
652 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
653 self->base_iova + 17 * PAGE_SIZE);
654 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
655 self->base_iova + 15 * PAGE_SIZE);
656 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
657 self->base_iova + 15 * PAGE_SIZE);
658
659 /* unmap all works */
660 test_ioctl_ioas_unmap(0, UINT64_MAX);
661
662 /* Unmap all succeeds on an empty IOAS */
663 test_ioctl_ioas_unmap(0, UINT64_MAX);
664 }
665
TEST_F(iommufd_ioas,unmap_fully_contained_areas)666 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
667 {
668 uint64_t unmap_len;
669 int i;
670
671 /* Give no_domain some space to rewind base_iova */
672 self->base_iova += 4 * PAGE_SIZE;
673
674 for (i = 0; i != 4; i++)
675 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
676 self->base_iova + i * 16 * PAGE_SIZE);
677
678 /* Unmap not fully contained area doesn't work */
679 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
680 8 * PAGE_SIZE);
681 test_err_ioctl_ioas_unmap(ENOENT,
682 self->base_iova + 3 * 16 * PAGE_SIZE +
683 8 * PAGE_SIZE - 4 * PAGE_SIZE,
684 8 * PAGE_SIZE);
685
686 /* Unmap fully contained areas works */
687 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
688 self->base_iova - 4 * PAGE_SIZE,
689 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
690 4 * PAGE_SIZE,
691 &unmap_len));
692 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
693 }
694
TEST_F(iommufd_ioas,area_auto_iova)695 TEST_F(iommufd_ioas, area_auto_iova)
696 {
697 struct iommu_test_cmd test_cmd = {
698 .size = sizeof(test_cmd),
699 .op = IOMMU_TEST_OP_ADD_RESERVED,
700 .id = self->ioas_id,
701 .add_reserved = { .start = PAGE_SIZE * 4,
702 .length = PAGE_SIZE * 100 },
703 };
704 struct iommu_iova_range ranges[1] = {};
705 struct iommu_ioas_allow_iovas allow_cmd = {
706 .size = sizeof(allow_cmd),
707 .ioas_id = self->ioas_id,
708 .num_iovas = 1,
709 .allowed_iovas = (uintptr_t)ranges,
710 };
711 __u64 iovas[10];
712 int i;
713
714 /* Simple 4k pages */
715 for (i = 0; i != 10; i++)
716 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
717 for (i = 0; i != 10; i++)
718 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
719
720 /* Kernel automatically aligns IOVAs properly */
721 for (i = 0; i != 10; i++) {
722 size_t length = PAGE_SIZE * (i + 1);
723
724 if (self->stdev_id) {
725 test_ioctl_ioas_map(buffer, length, &iovas[i]);
726 } else {
727 test_ioctl_ioas_map((void *)(1UL << 31), length,
728 &iovas[i]);
729 }
730 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
731 }
732 for (i = 0; i != 10; i++)
733 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
734
735 /* Avoids a reserved region */
736 ASSERT_EQ(0,
737 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
738 &test_cmd));
739 for (i = 0; i != 10; i++) {
740 size_t length = PAGE_SIZE * (i + 1);
741
742 test_ioctl_ioas_map(buffer, length, &iovas[i]);
743 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
744 EXPECT_EQ(false,
745 iovas[i] > test_cmd.add_reserved.start &&
746 iovas[i] <
747 test_cmd.add_reserved.start +
748 test_cmd.add_reserved.length);
749 }
750 for (i = 0; i != 10; i++)
751 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
752
753 /* Allowed region intersects with a reserved region */
754 ranges[0].start = PAGE_SIZE;
755 ranges[0].last = PAGE_SIZE * 600;
756 EXPECT_ERRNO(EADDRINUSE,
757 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
758
759 /* Allocate from an allowed region */
760 if (self->stdev_id) {
761 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
762 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
763 } else {
764 ranges[0].start = PAGE_SIZE * 200;
765 ranges[0].last = PAGE_SIZE * 600 - 1;
766 }
767 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
768 for (i = 0; i != 10; i++) {
769 size_t length = PAGE_SIZE * (i + 1);
770
771 test_ioctl_ioas_map(buffer, length, &iovas[i]);
772 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
773 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
774 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
775 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
776 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
777 }
778 for (i = 0; i != 10; i++)
779 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
780 }
781
TEST_F(iommufd_ioas,area_allowed)782 TEST_F(iommufd_ioas, area_allowed)
783 {
784 struct iommu_test_cmd test_cmd = {
785 .size = sizeof(test_cmd),
786 .op = IOMMU_TEST_OP_ADD_RESERVED,
787 .id = self->ioas_id,
788 .add_reserved = { .start = PAGE_SIZE * 4,
789 .length = PAGE_SIZE * 100 },
790 };
791 struct iommu_iova_range ranges[1] = {};
792 struct iommu_ioas_allow_iovas allow_cmd = {
793 .size = sizeof(allow_cmd),
794 .ioas_id = self->ioas_id,
795 .num_iovas = 1,
796 .allowed_iovas = (uintptr_t)ranges,
797 };
798
799 /* Reserved intersects an allowed */
800 allow_cmd.num_iovas = 1;
801 ranges[0].start = self->base_iova;
802 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
803 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
804 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
805 test_cmd.add_reserved.length = PAGE_SIZE;
806 EXPECT_ERRNO(EADDRINUSE,
807 ioctl(self->fd,
808 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
809 &test_cmd));
810 allow_cmd.num_iovas = 0;
811 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
812
813 /* Allowed intersects a reserved */
814 ASSERT_EQ(0,
815 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
816 &test_cmd));
817 allow_cmd.num_iovas = 1;
818 ranges[0].start = self->base_iova;
819 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
820 EXPECT_ERRNO(EADDRINUSE,
821 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
822 }
823
TEST_F(iommufd_ioas,copy_area)824 TEST_F(iommufd_ioas, copy_area)
825 {
826 struct iommu_ioas_copy copy_cmd = {
827 .size = sizeof(copy_cmd),
828 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
829 .dst_ioas_id = self->ioas_id,
830 .src_ioas_id = self->ioas_id,
831 .length = PAGE_SIZE,
832 };
833
834 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
835
836 /* Copy inside a single IOAS */
837 copy_cmd.src_iova = self->base_iova;
838 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
839 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
840
841 /* Copy between IOAS's */
842 copy_cmd.src_iova = self->base_iova;
843 copy_cmd.dst_iova = 0;
844 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
845 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
846 }
847
TEST_F(iommufd_ioas,iova_ranges)848 TEST_F(iommufd_ioas, iova_ranges)
849 {
850 struct iommu_test_cmd test_cmd = {
851 .size = sizeof(test_cmd),
852 .op = IOMMU_TEST_OP_ADD_RESERVED,
853 .id = self->ioas_id,
854 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
855 };
856 struct iommu_iova_range *ranges = buffer;
857 struct iommu_ioas_iova_ranges ranges_cmd = {
858 .size = sizeof(ranges_cmd),
859 .ioas_id = self->ioas_id,
860 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
861 .allowed_iovas = (uintptr_t)ranges,
862 };
863
864 /* Range can be read */
865 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
866 EXPECT_EQ(1, ranges_cmd.num_iovas);
867 if (!self->stdev_id) {
868 EXPECT_EQ(0, ranges[0].start);
869 EXPECT_EQ(SIZE_MAX, ranges[0].last);
870 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
871 } else {
872 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
873 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
874 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
875 }
876
877 /* Buffer too small */
878 memset(ranges, 0, BUFFER_SIZE);
879 ranges_cmd.num_iovas = 0;
880 EXPECT_ERRNO(EMSGSIZE,
881 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
882 EXPECT_EQ(1, ranges_cmd.num_iovas);
883 EXPECT_EQ(0, ranges[0].start);
884 EXPECT_EQ(0, ranges[0].last);
885
886 /* 2 ranges */
887 ASSERT_EQ(0,
888 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
889 &test_cmd));
890 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
891 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
892 if (!self->stdev_id) {
893 EXPECT_EQ(2, ranges_cmd.num_iovas);
894 EXPECT_EQ(0, ranges[0].start);
895 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
896 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
897 EXPECT_EQ(SIZE_MAX, ranges[1].last);
898 } else {
899 EXPECT_EQ(1, ranges_cmd.num_iovas);
900 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
901 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
902 }
903
904 /* Buffer too small */
905 memset(ranges, 0, BUFFER_SIZE);
906 ranges_cmd.num_iovas = 1;
907 if (!self->stdev_id) {
908 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
909 &ranges_cmd));
910 EXPECT_EQ(2, ranges_cmd.num_iovas);
911 EXPECT_EQ(0, ranges[0].start);
912 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
913 } else {
914 ASSERT_EQ(0,
915 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
916 EXPECT_EQ(1, ranges_cmd.num_iovas);
917 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
918 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
919 }
920 EXPECT_EQ(0, ranges[1].start);
921 EXPECT_EQ(0, ranges[1].last);
922 }
923
TEST_F(iommufd_ioas,access_domain_destory)924 TEST_F(iommufd_ioas, access_domain_destory)
925 {
926 struct iommu_test_cmd access_cmd = {
927 .size = sizeof(access_cmd),
928 .op = IOMMU_TEST_OP_ACCESS_PAGES,
929 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
930 .length = PAGE_SIZE},
931 };
932 size_t buf_size = 2 * HUGEPAGE_SIZE;
933 uint8_t *buf;
934
935 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
936 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
937 0);
938 ASSERT_NE(MAP_FAILED, buf);
939 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
940
941 test_cmd_create_access(self->ioas_id, &access_cmd.id,
942 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
943 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
944 ASSERT_EQ(0,
945 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
946 &access_cmd));
947
948 /* Causes a complicated unpin across a huge page boundary */
949 if (self->stdev_id)
950 test_ioctl_destroy(self->stdev_id);
951
952 test_cmd_destroy_access_pages(
953 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
954 test_cmd_destroy_access(access_cmd.id);
955 ASSERT_EQ(0, munmap(buf, buf_size));
956 }
957
TEST_F(iommufd_ioas,access_pin)958 TEST_F(iommufd_ioas, access_pin)
959 {
960 struct iommu_test_cmd access_cmd = {
961 .size = sizeof(access_cmd),
962 .op = IOMMU_TEST_OP_ACCESS_PAGES,
963 .access_pages = { .iova = MOCK_APERTURE_START,
964 .length = BUFFER_SIZE,
965 .uptr = (uintptr_t)buffer },
966 };
967 struct iommu_test_cmd check_map_cmd = {
968 .size = sizeof(check_map_cmd),
969 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
970 .check_map = { .iova = MOCK_APERTURE_START,
971 .length = BUFFER_SIZE,
972 .uptr = (uintptr_t)buffer },
973 };
974 uint32_t access_pages_id;
975 unsigned int npages;
976
977 test_cmd_create_access(self->ioas_id, &access_cmd.id,
978 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
979
980 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
981 uint32_t mock_stdev_id;
982 uint32_t mock_hwpt_id;
983
984 access_cmd.access_pages.length = npages * PAGE_SIZE;
985
986 /* Single map/unmap */
987 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
988 MOCK_APERTURE_START);
989 ASSERT_EQ(0, ioctl(self->fd,
990 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
991 &access_cmd));
992 test_cmd_destroy_access_pages(
993 access_cmd.id,
994 access_cmd.access_pages.out_access_pages_id);
995
996 /* Double user */
997 ASSERT_EQ(0, ioctl(self->fd,
998 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
999 &access_cmd));
1000 access_pages_id = access_cmd.access_pages.out_access_pages_id;
1001 ASSERT_EQ(0, ioctl(self->fd,
1002 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1003 &access_cmd));
1004 test_cmd_destroy_access_pages(
1005 access_cmd.id,
1006 access_cmd.access_pages.out_access_pages_id);
1007 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1008
1009 /* Add/remove a domain with a user */
1010 ASSERT_EQ(0, ioctl(self->fd,
1011 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1012 &access_cmd));
1013 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1014 &mock_hwpt_id, NULL);
1015 check_map_cmd.id = mock_hwpt_id;
1016 ASSERT_EQ(0, ioctl(self->fd,
1017 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1018 &check_map_cmd));
1019
1020 test_ioctl_destroy(mock_stdev_id);
1021 test_cmd_destroy_access_pages(
1022 access_cmd.id,
1023 access_cmd.access_pages.out_access_pages_id);
1024
1025 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1026 }
1027 test_cmd_destroy_access(access_cmd.id);
1028 }
1029
TEST_F(iommufd_ioas,access_pin_unmap)1030 TEST_F(iommufd_ioas, access_pin_unmap)
1031 {
1032 struct iommu_test_cmd access_pages_cmd = {
1033 .size = sizeof(access_pages_cmd),
1034 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1035 .access_pages = { .iova = MOCK_APERTURE_START,
1036 .length = BUFFER_SIZE,
1037 .uptr = (uintptr_t)buffer },
1038 };
1039
1040 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1041 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1042 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1043 ASSERT_EQ(0,
1044 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1045 &access_pages_cmd));
1046
1047 /* Trigger the unmap op */
1048 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1049
1050 /* kernel removed the item for us */
1051 test_err_destroy_access_pages(
1052 ENOENT, access_pages_cmd.id,
1053 access_pages_cmd.access_pages.out_access_pages_id);
1054 }
1055
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1056 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1057 unsigned int access_id, uint64_t iova,
1058 unsigned int def_flags)
1059 {
1060 uint16_t tmp[32];
1061 struct iommu_test_cmd access_cmd = {
1062 .size = sizeof(access_cmd),
1063 .op = IOMMU_TEST_OP_ACCESS_RW,
1064 .id = access_id,
1065 .access_rw = { .uptr = (uintptr_t)tmp },
1066 };
1067 uint16_t *buffer16 = buffer;
1068 unsigned int i;
1069 void *tmp2;
1070
1071 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1072 buffer16[i] = rand();
1073
1074 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1075 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1076 access_cmd.access_rw.iova++) {
1077 for (access_cmd.access_rw.length = 1;
1078 access_cmd.access_rw.length < sizeof(tmp);
1079 access_cmd.access_rw.length++) {
1080 access_cmd.access_rw.flags = def_flags;
1081 ASSERT_EQ(0, ioctl(fd,
1082 _IOMMU_TEST_CMD(
1083 IOMMU_TEST_OP_ACCESS_RW),
1084 &access_cmd));
1085 ASSERT_EQ(0,
1086 memcmp(buffer + (access_cmd.access_rw.iova -
1087 iova),
1088 tmp, access_cmd.access_rw.length));
1089
1090 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1091 tmp[i] = rand();
1092 access_cmd.access_rw.flags = def_flags |
1093 MOCK_ACCESS_RW_WRITE;
1094 ASSERT_EQ(0, ioctl(fd,
1095 _IOMMU_TEST_CMD(
1096 IOMMU_TEST_OP_ACCESS_RW),
1097 &access_cmd));
1098 ASSERT_EQ(0,
1099 memcmp(buffer + (access_cmd.access_rw.iova -
1100 iova),
1101 tmp, access_cmd.access_rw.length));
1102 }
1103 }
1104
1105 /* Multi-page test */
1106 tmp2 = malloc(BUFFER_SIZE);
1107 ASSERT_NE(NULL, tmp2);
1108 access_cmd.access_rw.iova = iova;
1109 access_cmd.access_rw.length = BUFFER_SIZE;
1110 access_cmd.access_rw.flags = def_flags;
1111 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1112 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1113 &access_cmd));
1114 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1115 free(tmp2);
1116 }
1117
TEST_F(iommufd_ioas,access_rw)1118 TEST_F(iommufd_ioas, access_rw)
1119 {
1120 __u32 access_id;
1121 __u64 iova;
1122
1123 test_cmd_create_access(self->ioas_id, &access_id, 0);
1124 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1125 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1126 check_access_rw(_metadata, self->fd, access_id, iova,
1127 MOCK_ACCESS_RW_SLOW_PATH);
1128 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1129 test_cmd_destroy_access(access_id);
1130 }
1131
TEST_F(iommufd_ioas,access_rw_unaligned)1132 TEST_F(iommufd_ioas, access_rw_unaligned)
1133 {
1134 __u32 access_id;
1135 __u64 iova;
1136
1137 test_cmd_create_access(self->ioas_id, &access_id, 0);
1138
1139 /* Unaligned pages */
1140 iova = self->base_iova + MOCK_PAGE_SIZE;
1141 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1142 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1143 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1144 test_cmd_destroy_access(access_id);
1145 }
1146
TEST_F(iommufd_ioas,fork_gone)1147 TEST_F(iommufd_ioas, fork_gone)
1148 {
1149 __u32 access_id;
1150 pid_t child;
1151
1152 test_cmd_create_access(self->ioas_id, &access_id, 0);
1153
1154 /* Create a mapping with a different mm */
1155 child = fork();
1156 if (!child) {
1157 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1158 MOCK_APERTURE_START);
1159 exit(0);
1160 }
1161 ASSERT_NE(-1, child);
1162 ASSERT_EQ(child, waitpid(child, NULL, 0));
1163
1164 if (self->stdev_id) {
1165 /*
1166 * If a domain already existed then everything was pinned within
1167 * the fork, so this copies from one domain to another.
1168 */
1169 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1170 check_access_rw(_metadata, self->fd, access_id,
1171 MOCK_APERTURE_START, 0);
1172
1173 } else {
1174 /*
1175 * Otherwise we need to actually pin pages which can't happen
1176 * since the fork is gone.
1177 */
1178 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1179 }
1180
1181 test_cmd_destroy_access(access_id);
1182 }
1183
TEST_F(iommufd_ioas,fork_present)1184 TEST_F(iommufd_ioas, fork_present)
1185 {
1186 __u32 access_id;
1187 int pipefds[2];
1188 uint64_t tmp;
1189 pid_t child;
1190 int efd;
1191
1192 test_cmd_create_access(self->ioas_id, &access_id, 0);
1193
1194 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1195 efd = eventfd(0, EFD_CLOEXEC);
1196 ASSERT_NE(-1, efd);
1197
1198 /* Create a mapping with a different mm */
1199 child = fork();
1200 if (!child) {
1201 __u64 iova;
1202 uint64_t one = 1;
1203
1204 close(pipefds[1]);
1205 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1206 MOCK_APERTURE_START);
1207 if (write(efd, &one, sizeof(one)) != sizeof(one))
1208 exit(100);
1209 if (read(pipefds[0], &iova, 1) != 1)
1210 exit(100);
1211 exit(0);
1212 }
1213 close(pipefds[0]);
1214 ASSERT_NE(-1, child);
1215 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1216
1217 /* Read pages from the remote process */
1218 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1219 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1220
1221 ASSERT_EQ(0, close(pipefds[1]));
1222 ASSERT_EQ(child, waitpid(child, NULL, 0));
1223
1224 test_cmd_destroy_access(access_id);
1225 }
1226
TEST_F(iommufd_ioas,ioas_option_huge_pages)1227 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1228 {
1229 struct iommu_option cmd = {
1230 .size = sizeof(cmd),
1231 .option_id = IOMMU_OPTION_HUGE_PAGES,
1232 .op = IOMMU_OPTION_OP_GET,
1233 .val64 = 3,
1234 .object_id = self->ioas_id,
1235 };
1236
1237 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1238 ASSERT_EQ(1, cmd.val64);
1239
1240 cmd.op = IOMMU_OPTION_OP_SET;
1241 cmd.val64 = 0;
1242 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1243
1244 cmd.op = IOMMU_OPTION_OP_GET;
1245 cmd.val64 = 3;
1246 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1247 ASSERT_EQ(0, cmd.val64);
1248
1249 cmd.op = IOMMU_OPTION_OP_SET;
1250 cmd.val64 = 2;
1251 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1252
1253 cmd.op = IOMMU_OPTION_OP_SET;
1254 cmd.val64 = 1;
1255 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1256 }
1257
TEST_F(iommufd_ioas,ioas_iova_alloc)1258 TEST_F(iommufd_ioas, ioas_iova_alloc)
1259 {
1260 unsigned int length;
1261 __u64 iova;
1262
1263 for (length = 1; length != PAGE_SIZE * 2; length++) {
1264 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1265 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1266 } else {
1267 test_ioctl_ioas_map(buffer, length, &iova);
1268 test_ioctl_ioas_unmap(iova, length);
1269 }
1270 }
1271 }
1272
TEST_F(iommufd_ioas,ioas_align_change)1273 TEST_F(iommufd_ioas, ioas_align_change)
1274 {
1275 struct iommu_option cmd = {
1276 .size = sizeof(cmd),
1277 .option_id = IOMMU_OPTION_HUGE_PAGES,
1278 .op = IOMMU_OPTION_OP_SET,
1279 .object_id = self->ioas_id,
1280 /* 0 means everything must be aligned to PAGE_SIZE */
1281 .val64 = 0,
1282 };
1283
1284 /*
1285 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1286 * and map are present.
1287 */
1288 if (variant->mock_domains)
1289 return;
1290
1291 /*
1292 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1293 */
1294 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1295 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1296
1297 /* Misalignment is rejected at map time */
1298 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1299 PAGE_SIZE,
1300 MOCK_APERTURE_START + PAGE_SIZE);
1301 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1302
1303 /* Reduce alignment */
1304 cmd.val64 = 1;
1305 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1306
1307 /* Confirm misalignment is rejected during alignment upgrade */
1308 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1309 MOCK_APERTURE_START + PAGE_SIZE);
1310 cmd.val64 = 0;
1311 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1312
1313 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1314 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1315 }
1316
TEST_F(iommufd_ioas,copy_sweep)1317 TEST_F(iommufd_ioas, copy_sweep)
1318 {
1319 struct iommu_ioas_copy copy_cmd = {
1320 .size = sizeof(copy_cmd),
1321 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1322 .src_ioas_id = self->ioas_id,
1323 .dst_iova = MOCK_APERTURE_START,
1324 .length = MOCK_PAGE_SIZE,
1325 };
1326 unsigned int dst_ioas_id;
1327 uint64_t last_iova;
1328 uint64_t iova;
1329
1330 test_ioctl_ioas_alloc(&dst_ioas_id);
1331 copy_cmd.dst_ioas_id = dst_ioas_id;
1332
1333 if (variant->mock_domains)
1334 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1335 else
1336 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1337
1338 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1339 MOCK_APERTURE_START);
1340
1341 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1342 iova += 511) {
1343 copy_cmd.src_iova = iova;
1344 if (iova < MOCK_APERTURE_START ||
1345 iova + copy_cmd.length - 1 > last_iova) {
1346 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1347 ©_cmd));
1348 } else {
1349 ASSERT_EQ(0,
1350 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1351 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1352 copy_cmd.length);
1353 }
1354 }
1355
1356 test_ioctl_destroy(dst_ioas_id);
1357 }
1358
FIXTURE(iommufd_mock_domain)1359 FIXTURE(iommufd_mock_domain)
1360 {
1361 int fd;
1362 uint32_t ioas_id;
1363 uint32_t hwpt_id;
1364 uint32_t hwpt_ids[2];
1365 uint32_t stdev_ids[2];
1366 uint32_t idev_ids[2];
1367 int mmap_flags;
1368 size_t mmap_buf_size;
1369 };
1370
FIXTURE_VARIANT(iommufd_mock_domain)1371 FIXTURE_VARIANT(iommufd_mock_domain)
1372 {
1373 unsigned int mock_domains;
1374 bool hugepages;
1375 };
1376
FIXTURE_SETUP(iommufd_mock_domain)1377 FIXTURE_SETUP(iommufd_mock_domain)
1378 {
1379 unsigned int i;
1380
1381 self->fd = open("/dev/iommu", O_RDWR);
1382 ASSERT_NE(-1, self->fd);
1383 test_ioctl_ioas_alloc(&self->ioas_id);
1384
1385 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1386
1387 for (i = 0; i != variant->mock_domains; i++)
1388 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1389 &self->hwpt_ids[i], &self->idev_ids[i]);
1390 self->hwpt_id = self->hwpt_ids[0];
1391
1392 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1393 self->mmap_buf_size = PAGE_SIZE * 8;
1394 if (variant->hugepages) {
1395 /*
1396 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1397 * not available.
1398 */
1399 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1400 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1401 }
1402 }
1403
FIXTURE_TEARDOWN(iommufd_mock_domain)1404 FIXTURE_TEARDOWN(iommufd_mock_domain)
1405 {
1406 teardown_iommufd(self->fd, _metadata);
1407 }
1408
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1409 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1410 {
1411 .mock_domains = 1,
1412 .hugepages = false,
1413 };
1414
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1415 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1416 {
1417 .mock_domains = 2,
1418 .hugepages = false,
1419 };
1420
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1421 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1422 {
1423 .mock_domains = 1,
1424 .hugepages = true,
1425 };
1426
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1427 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1428 {
1429 .mock_domains = 2,
1430 .hugepages = true,
1431 };
1432
1433 /* Have the kernel check that the user pages made it to the iommu_domain */
1434 #define check_mock_iova(_ptr, _iova, _length) \
1435 ({ \
1436 struct iommu_test_cmd check_map_cmd = { \
1437 .size = sizeof(check_map_cmd), \
1438 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1439 .id = self->hwpt_id, \
1440 .check_map = { .iova = _iova, \
1441 .length = _length, \
1442 .uptr = (uintptr_t)(_ptr) }, \
1443 }; \
1444 ASSERT_EQ(0, \
1445 ioctl(self->fd, \
1446 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1447 &check_map_cmd)); \
1448 if (self->hwpt_ids[1]) { \
1449 check_map_cmd.id = self->hwpt_ids[1]; \
1450 ASSERT_EQ(0, \
1451 ioctl(self->fd, \
1452 _IOMMU_TEST_CMD( \
1453 IOMMU_TEST_OP_MD_CHECK_MAP), \
1454 &check_map_cmd)); \
1455 } \
1456 })
1457
TEST_F(iommufd_mock_domain,basic)1458 TEST_F(iommufd_mock_domain, basic)
1459 {
1460 size_t buf_size = self->mmap_buf_size;
1461 uint8_t *buf;
1462 __u64 iova;
1463
1464 /* Simple one page map */
1465 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1466 check_mock_iova(buffer, iova, PAGE_SIZE);
1467
1468 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1469 0);
1470 ASSERT_NE(MAP_FAILED, buf);
1471
1472 /* EFAULT half way through mapping */
1473 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1474 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1475
1476 /* EFAULT on first page */
1477 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1478 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1479 }
1480
TEST_F(iommufd_mock_domain,ro_unshare)1481 TEST_F(iommufd_mock_domain, ro_unshare)
1482 {
1483 uint8_t *buf;
1484 __u64 iova;
1485 int fd;
1486
1487 fd = open("/proc/self/exe", O_RDONLY);
1488 ASSERT_NE(-1, fd);
1489
1490 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1491 ASSERT_NE(MAP_FAILED, buf);
1492 close(fd);
1493
1494 /*
1495 * There have been lots of changes to the "unshare" mechanism in
1496 * get_user_pages(), make sure it works right. The write to the page
1497 * after we map it for reading should not change the assigned PFN.
1498 */
1499 ASSERT_EQ(0,
1500 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1501 &iova, IOMMU_IOAS_MAP_READABLE));
1502 check_mock_iova(buf, iova, PAGE_SIZE);
1503 memset(buf, 1, PAGE_SIZE);
1504 check_mock_iova(buf, iova, PAGE_SIZE);
1505 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1506 }
1507
TEST_F(iommufd_mock_domain,all_aligns)1508 TEST_F(iommufd_mock_domain, all_aligns)
1509 {
1510 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1511 MOCK_PAGE_SIZE;
1512 size_t buf_size = self->mmap_buf_size;
1513 unsigned int start;
1514 unsigned int end;
1515 uint8_t *buf;
1516
1517 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1518 0);
1519 ASSERT_NE(MAP_FAILED, buf);
1520 check_refs(buf, buf_size, 0);
1521
1522 /*
1523 * Map every combination of page size and alignment within a big region,
1524 * less for hugepage case as it takes so long to finish.
1525 */
1526 for (start = 0; start < buf_size; start += test_step) {
1527 if (variant->hugepages)
1528 end = buf_size;
1529 else
1530 end = start + MOCK_PAGE_SIZE;
1531 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1532 size_t length = end - start;
1533 __u64 iova;
1534
1535 test_ioctl_ioas_map(buf + start, length, &iova);
1536 check_mock_iova(buf + start, iova, length);
1537 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1538 end / PAGE_SIZE * PAGE_SIZE -
1539 start / PAGE_SIZE * PAGE_SIZE,
1540 1);
1541
1542 test_ioctl_ioas_unmap(iova, length);
1543 }
1544 }
1545 check_refs(buf, buf_size, 0);
1546 ASSERT_EQ(0, munmap(buf, buf_size));
1547 }
1548
TEST_F(iommufd_mock_domain,all_aligns_copy)1549 TEST_F(iommufd_mock_domain, all_aligns_copy)
1550 {
1551 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1552 MOCK_PAGE_SIZE;
1553 size_t buf_size = self->mmap_buf_size;
1554 unsigned int start;
1555 unsigned int end;
1556 uint8_t *buf;
1557
1558 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1559 0);
1560 ASSERT_NE(MAP_FAILED, buf);
1561 check_refs(buf, buf_size, 0);
1562
1563 /*
1564 * Map every combination of page size and alignment within a big region,
1565 * less for hugepage case as it takes so long to finish.
1566 */
1567 for (start = 0; start < buf_size; start += test_step) {
1568 if (variant->hugepages)
1569 end = buf_size;
1570 else
1571 end = start + MOCK_PAGE_SIZE;
1572 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1573 size_t length = end - start;
1574 unsigned int old_id;
1575 uint32_t mock_stdev_id;
1576 __u64 iova;
1577
1578 test_ioctl_ioas_map(buf + start, length, &iova);
1579
1580 /* Add and destroy a domain while the area exists */
1581 old_id = self->hwpt_ids[1];
1582 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1583 &self->hwpt_ids[1], NULL);
1584
1585 check_mock_iova(buf + start, iova, length);
1586 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1587 end / PAGE_SIZE * PAGE_SIZE -
1588 start / PAGE_SIZE * PAGE_SIZE,
1589 1);
1590
1591 test_ioctl_destroy(mock_stdev_id);
1592 self->hwpt_ids[1] = old_id;
1593
1594 test_ioctl_ioas_unmap(iova, length);
1595 }
1596 }
1597 check_refs(buf, buf_size, 0);
1598 ASSERT_EQ(0, munmap(buf, buf_size));
1599 }
1600
TEST_F(iommufd_mock_domain,user_copy)1601 TEST_F(iommufd_mock_domain, user_copy)
1602 {
1603 struct iommu_test_cmd access_cmd = {
1604 .size = sizeof(access_cmd),
1605 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1606 .access_pages = { .length = BUFFER_SIZE,
1607 .uptr = (uintptr_t)buffer },
1608 };
1609 struct iommu_ioas_copy copy_cmd = {
1610 .size = sizeof(copy_cmd),
1611 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1612 .dst_ioas_id = self->ioas_id,
1613 .dst_iova = MOCK_APERTURE_START,
1614 .length = BUFFER_SIZE,
1615 };
1616 struct iommu_ioas_unmap unmap_cmd = {
1617 .size = sizeof(unmap_cmd),
1618 .ioas_id = self->ioas_id,
1619 .iova = MOCK_APERTURE_START,
1620 .length = BUFFER_SIZE,
1621 };
1622 unsigned int new_ioas_id, ioas_id;
1623
1624 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1625 test_ioctl_ioas_alloc(&ioas_id);
1626 test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1627 ©_cmd.src_iova);
1628
1629 test_cmd_create_access(ioas_id, &access_cmd.id,
1630 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1631
1632 access_cmd.access_pages.iova = copy_cmd.src_iova;
1633 ASSERT_EQ(0,
1634 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1635 &access_cmd));
1636 copy_cmd.src_ioas_id = ioas_id;
1637 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1638 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1639
1640 /* Now replace the ioas with a new one */
1641 test_ioctl_ioas_alloc(&new_ioas_id);
1642 test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1643 ©_cmd.src_iova);
1644 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1645
1646 /* Destroy the old ioas and cleanup copied mapping */
1647 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1648 test_ioctl_destroy(ioas_id);
1649
1650 /* Then run the same test again with the new ioas */
1651 access_cmd.access_pages.iova = copy_cmd.src_iova;
1652 ASSERT_EQ(0,
1653 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1654 &access_cmd));
1655 copy_cmd.src_ioas_id = new_ioas_id;
1656 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1657 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1658
1659 test_cmd_destroy_access_pages(
1660 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1661 test_cmd_destroy_access(access_cmd.id);
1662
1663 test_ioctl_destroy(new_ioas_id);
1664 }
1665
TEST_F(iommufd_mock_domain,replace)1666 TEST_F(iommufd_mock_domain, replace)
1667 {
1668 uint32_t ioas_id;
1669
1670 test_ioctl_ioas_alloc(&ioas_id);
1671
1672 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1673
1674 /*
1675 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1676 * should get enoent when we try to use it.
1677 */
1678 if (variant->mock_domains == 1)
1679 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1680 self->hwpt_ids[0]);
1681
1682 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1683 if (variant->mock_domains >= 2) {
1684 test_cmd_mock_domain_replace(self->stdev_ids[0],
1685 self->hwpt_ids[1]);
1686 test_cmd_mock_domain_replace(self->stdev_ids[0],
1687 self->hwpt_ids[1]);
1688 test_cmd_mock_domain_replace(self->stdev_ids[0],
1689 self->hwpt_ids[0]);
1690 }
1691
1692 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1693 test_ioctl_destroy(ioas_id);
1694 }
1695
TEST_F(iommufd_mock_domain,alloc_hwpt)1696 TEST_F(iommufd_mock_domain, alloc_hwpt)
1697 {
1698 int i;
1699
1700 for (i = 0; i != variant->mock_domains; i++) {
1701 uint32_t hwpt_id[2];
1702 uint32_t stddev_id;
1703
1704 test_err_hwpt_alloc(EOPNOTSUPP,
1705 self->idev_ids[i], self->ioas_id,
1706 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1707 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1708 0, &hwpt_id[0]);
1709 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1710 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1711
1712 /* Do a hw_pagetable rotation test */
1713 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1714 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1715 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1716 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1717 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1718 test_ioctl_destroy(hwpt_id[1]);
1719
1720 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1721 test_ioctl_destroy(stddev_id);
1722 test_ioctl_destroy(hwpt_id[0]);
1723 }
1724 }
1725
FIXTURE(iommufd_dirty_tracking)1726 FIXTURE(iommufd_dirty_tracking)
1727 {
1728 int fd;
1729 uint32_t ioas_id;
1730 uint32_t hwpt_id;
1731 uint32_t stdev_id;
1732 uint32_t idev_id;
1733 unsigned long page_size;
1734 unsigned long bitmap_size;
1735 void *bitmap;
1736 void *buffer;
1737 };
1738
FIXTURE_VARIANT(iommufd_dirty_tracking)1739 FIXTURE_VARIANT(iommufd_dirty_tracking)
1740 {
1741 unsigned long buffer_size;
1742 bool hugepages;
1743 };
1744
FIXTURE_SETUP(iommufd_dirty_tracking)1745 FIXTURE_SETUP(iommufd_dirty_tracking)
1746 {
1747 unsigned long size;
1748 int mmap_flags;
1749 void *vrc;
1750 int rc;
1751
1752 if (variant->buffer_size < MOCK_PAGE_SIZE) {
1753 SKIP(return,
1754 "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
1755 variant->buffer_size, MOCK_PAGE_SIZE);
1756 }
1757
1758 self->fd = open("/dev/iommu", O_RDWR);
1759 ASSERT_NE(-1, self->fd);
1760
1761 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
1762 if (rc || !self->buffer) {
1763 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
1764 variant->buffer_size, rc);
1765 }
1766
1767 mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
1768 if (variant->hugepages) {
1769 /*
1770 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1771 * not available.
1772 */
1773 mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1774 }
1775 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
1776 vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
1777 mmap_flags, -1, 0);
1778 assert(vrc == self->buffer);
1779
1780 self->page_size = MOCK_PAGE_SIZE;
1781 self->bitmap_size = variant->buffer_size / self->page_size;
1782
1783 /* Provision with an extra (PAGE_SIZE) for the unaligned case */
1784 size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
1785 rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
1786 assert(!rc);
1787 assert(self->bitmap);
1788 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
1789
1790 test_ioctl_ioas_alloc(&self->ioas_id);
1791 /* Enable 1M mock IOMMU hugepages */
1792 if (variant->hugepages) {
1793 test_cmd_mock_domain_flags(self->ioas_id,
1794 MOCK_FLAGS_DEVICE_HUGE_IOVA,
1795 &self->stdev_id, &self->hwpt_id,
1796 &self->idev_id);
1797 } else {
1798 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
1799 &self->hwpt_id, &self->idev_id);
1800 }
1801 }
1802
FIXTURE_TEARDOWN(iommufd_dirty_tracking)1803 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
1804 {
1805 munmap(self->buffer, variant->buffer_size);
1806 munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
1807 teardown_iommufd(self->fd, _metadata);
1808 }
1809
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)1810 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
1811 {
1812 /* half of an u8 index bitmap */
1813 .buffer_size = 8UL * 1024UL,
1814 };
1815
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)1816 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
1817 {
1818 /* one u8 index bitmap */
1819 .buffer_size = 16UL * 1024UL,
1820 };
1821
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)1822 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
1823 {
1824 /* one u32 index bitmap */
1825 .buffer_size = 64UL * 1024UL,
1826 };
1827
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)1828 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
1829 {
1830 /* one u64 index bitmap */
1831 .buffer_size = 128UL * 1024UL,
1832 };
1833
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)1834 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
1835 {
1836 /* two u64 index and trailing end bitmap */
1837 .buffer_size = 320UL * 1024UL,
1838 };
1839
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)1840 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
1841 {
1842 /* 4K bitmap (64M IOVA range) */
1843 .buffer_size = 64UL * 1024UL * 1024UL,
1844 };
1845
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)1846 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
1847 {
1848 /* 4K bitmap (64M IOVA range) */
1849 .buffer_size = 64UL * 1024UL * 1024UL,
1850 .hugepages = true,
1851 };
1852
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)1853 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
1854 {
1855 /* 8K bitmap (128M IOVA range) */
1856 .buffer_size = 128UL * 1024UL * 1024UL,
1857 };
1858
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)1859 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
1860 {
1861 /* 8K bitmap (128M IOVA range) */
1862 .buffer_size = 128UL * 1024UL * 1024UL,
1863 .hugepages = true,
1864 };
1865
TEST_F(iommufd_dirty_tracking,enforce_dirty)1866 TEST_F(iommufd_dirty_tracking, enforce_dirty)
1867 {
1868 uint32_t ioas_id, stddev_id, idev_id;
1869 uint32_t hwpt_id, _hwpt_id;
1870 uint32_t dev_flags;
1871
1872 /* Regular case */
1873 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
1874 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1875 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1876 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1877 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
1878 NULL);
1879 test_ioctl_destroy(stddev_id);
1880 test_ioctl_destroy(hwpt_id);
1881
1882 /* IOMMU device does not support dirty tracking */
1883 test_ioctl_ioas_alloc(&ioas_id);
1884 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
1885 &idev_id);
1886 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
1887 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1888 test_ioctl_destroy(stddev_id);
1889 }
1890
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)1891 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
1892 {
1893 uint32_t stddev_id;
1894 uint32_t hwpt_id;
1895
1896 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1897 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1898 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1899 test_cmd_set_dirty_tracking(hwpt_id, true);
1900 test_cmd_set_dirty_tracking(hwpt_id, false);
1901
1902 test_ioctl_destroy(stddev_id);
1903 test_ioctl_destroy(hwpt_id);
1904 }
1905
TEST_F(iommufd_dirty_tracking,device_dirty_capability)1906 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
1907 {
1908 uint32_t caps = 0;
1909 uint32_t stddev_id;
1910 uint32_t hwpt_id;
1911
1912 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
1913 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1914 test_cmd_get_hw_capabilities(self->idev_id, caps,
1915 IOMMU_HW_CAP_DIRTY_TRACKING);
1916 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
1917 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
1918
1919 test_ioctl_destroy(stddev_id);
1920 test_ioctl_destroy(hwpt_id);
1921 }
1922
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)1923 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
1924 {
1925 uint32_t page_size = MOCK_PAGE_SIZE;
1926 uint32_t hwpt_id;
1927 uint32_t ioas_id;
1928
1929 if (variant->hugepages)
1930 page_size = MOCK_HUGE_PAGE_SIZE;
1931
1932 test_ioctl_ioas_alloc(&ioas_id);
1933 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1934 variant->buffer_size, MOCK_APERTURE_START);
1935
1936 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1937 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1938
1939 test_cmd_set_dirty_tracking(hwpt_id, true);
1940
1941 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1942 MOCK_APERTURE_START, self->page_size, page_size,
1943 self->bitmap, self->bitmap_size, 0, _metadata);
1944
1945 /* PAGE_SIZE unaligned bitmap */
1946 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1947 MOCK_APERTURE_START, self->page_size, page_size,
1948 self->bitmap + MOCK_PAGE_SIZE,
1949 self->bitmap_size, 0, _metadata);
1950
1951 /* u64 unaligned bitmap */
1952 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1953 MOCK_APERTURE_START, self->page_size, page_size,
1954 self->bitmap + 0xff1, self->bitmap_size, 0,
1955 _metadata);
1956
1957 test_ioctl_destroy(hwpt_id);
1958 }
1959
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)1960 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
1961 {
1962 uint32_t page_size = MOCK_PAGE_SIZE;
1963 uint32_t hwpt_id;
1964 uint32_t ioas_id;
1965
1966 if (variant->hugepages)
1967 page_size = MOCK_HUGE_PAGE_SIZE;
1968
1969 test_ioctl_ioas_alloc(&ioas_id);
1970 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1971 variant->buffer_size, MOCK_APERTURE_START);
1972
1973 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1974 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1975
1976 test_cmd_set_dirty_tracking(hwpt_id, true);
1977
1978 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1979 MOCK_APERTURE_START, self->page_size, page_size,
1980 self->bitmap, self->bitmap_size,
1981 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1982 _metadata);
1983
1984 /* Unaligned bitmap */
1985 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1986 MOCK_APERTURE_START, self->page_size, page_size,
1987 self->bitmap + MOCK_PAGE_SIZE,
1988 self->bitmap_size,
1989 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1990 _metadata);
1991
1992 /* u64 unaligned bitmap */
1993 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1994 MOCK_APERTURE_START, self->page_size, page_size,
1995 self->bitmap + 0xff1, self->bitmap_size,
1996 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1997 _metadata);
1998
1999 test_ioctl_destroy(hwpt_id);
2000 }
2001
2002 /* VFIO compatibility IOCTLs */
2003
TEST_F(iommufd,simple_ioctls)2004 TEST_F(iommufd, simple_ioctls)
2005 {
2006 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2007 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2008 }
2009
TEST_F(iommufd,unmap_cmd)2010 TEST_F(iommufd, unmap_cmd)
2011 {
2012 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2013 .iova = MOCK_APERTURE_START,
2014 .size = PAGE_SIZE,
2015 };
2016
2017 unmap_cmd.argsz = 1;
2018 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2019
2020 unmap_cmd.argsz = sizeof(unmap_cmd);
2021 unmap_cmd.flags = 1 << 31;
2022 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2023
2024 unmap_cmd.flags = 0;
2025 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2026 }
2027
TEST_F(iommufd,map_cmd)2028 TEST_F(iommufd, map_cmd)
2029 {
2030 struct vfio_iommu_type1_dma_map map_cmd = {
2031 .iova = MOCK_APERTURE_START,
2032 .size = PAGE_SIZE,
2033 .vaddr = (__u64)buffer,
2034 };
2035
2036 map_cmd.argsz = 1;
2037 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2038
2039 map_cmd.argsz = sizeof(map_cmd);
2040 map_cmd.flags = 1 << 31;
2041 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2042
2043 /* Requires a domain to be attached */
2044 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2045 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2046 }
2047
TEST_F(iommufd,info_cmd)2048 TEST_F(iommufd, info_cmd)
2049 {
2050 struct vfio_iommu_type1_info info_cmd = {};
2051
2052 /* Invalid argsz */
2053 info_cmd.argsz = 1;
2054 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2055
2056 info_cmd.argsz = sizeof(info_cmd);
2057 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2058 }
2059
TEST_F(iommufd,set_iommu_cmd)2060 TEST_F(iommufd, set_iommu_cmd)
2061 {
2062 /* Requires a domain to be attached */
2063 EXPECT_ERRNO(ENODEV,
2064 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2065 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2066 }
2067
TEST_F(iommufd,vfio_ioas)2068 TEST_F(iommufd, vfio_ioas)
2069 {
2070 struct iommu_vfio_ioas vfio_ioas_cmd = {
2071 .size = sizeof(vfio_ioas_cmd),
2072 .op = IOMMU_VFIO_IOAS_GET,
2073 };
2074 __u32 ioas_id;
2075
2076 /* ENODEV if there is no compat ioas */
2077 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2078
2079 /* Invalid id for set */
2080 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2081 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2082
2083 /* Valid id for set*/
2084 test_ioctl_ioas_alloc(&ioas_id);
2085 vfio_ioas_cmd.ioas_id = ioas_id;
2086 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2087
2088 /* Same id comes back from get */
2089 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2090 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2091 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2092
2093 /* Clear works */
2094 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2095 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2096 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2097 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2098 }
2099
FIXTURE(vfio_compat_mock_domain)2100 FIXTURE(vfio_compat_mock_domain)
2101 {
2102 int fd;
2103 uint32_t ioas_id;
2104 };
2105
FIXTURE_VARIANT(vfio_compat_mock_domain)2106 FIXTURE_VARIANT(vfio_compat_mock_domain)
2107 {
2108 unsigned int version;
2109 };
2110
FIXTURE_SETUP(vfio_compat_mock_domain)2111 FIXTURE_SETUP(vfio_compat_mock_domain)
2112 {
2113 struct iommu_vfio_ioas vfio_ioas_cmd = {
2114 .size = sizeof(vfio_ioas_cmd),
2115 .op = IOMMU_VFIO_IOAS_SET,
2116 };
2117
2118 self->fd = open("/dev/iommu", O_RDWR);
2119 ASSERT_NE(-1, self->fd);
2120
2121 /* Create what VFIO would consider a group */
2122 test_ioctl_ioas_alloc(&self->ioas_id);
2123 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2124
2125 /* Attach it to the vfio compat */
2126 vfio_ioas_cmd.ioas_id = self->ioas_id;
2127 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2128 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2129 }
2130
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2131 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2132 {
2133 teardown_iommufd(self->fd, _metadata);
2134 }
2135
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2136 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2137 {
2138 .version = VFIO_TYPE1v2_IOMMU,
2139 };
2140
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2141 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2142 {
2143 .version = VFIO_TYPE1_IOMMU,
2144 };
2145
TEST_F(vfio_compat_mock_domain,simple_close)2146 TEST_F(vfio_compat_mock_domain, simple_close)
2147 {
2148 }
2149
TEST_F(vfio_compat_mock_domain,option_huge_pages)2150 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2151 {
2152 struct iommu_option cmd = {
2153 .size = sizeof(cmd),
2154 .option_id = IOMMU_OPTION_HUGE_PAGES,
2155 .op = IOMMU_OPTION_OP_GET,
2156 .val64 = 3,
2157 .object_id = self->ioas_id,
2158 };
2159
2160 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2161 if (variant->version == VFIO_TYPE1_IOMMU) {
2162 ASSERT_EQ(0, cmd.val64);
2163 } else {
2164 ASSERT_EQ(1, cmd.val64);
2165 }
2166 }
2167
2168 /*
2169 * Execute an ioctl command stored in buffer and check that the result does not
2170 * overflow memory.
2171 */
is_filled(const void * buf,uint8_t c,size_t len)2172 static bool is_filled(const void *buf, uint8_t c, size_t len)
2173 {
2174 const uint8_t *cbuf = buf;
2175
2176 for (; len; cbuf++, len--)
2177 if (*cbuf != c)
2178 return false;
2179 return true;
2180 }
2181
2182 #define ioctl_check_buf(fd, cmd) \
2183 ({ \
2184 size_t _cmd_len = *(__u32 *)buffer; \
2185 \
2186 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2187 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2188 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2189 BUFFER_SIZE - _cmd_len)); \
2190 })
2191
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2192 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2193 struct vfio_iommu_type1_info *info_cmd)
2194 {
2195 const struct vfio_info_cap_header *cap;
2196
2197 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2198 cap = buffer + info_cmd->cap_offset;
2199 while (true) {
2200 size_t cap_size;
2201
2202 if (cap->next)
2203 cap_size = (buffer + cap->next) - (void *)cap;
2204 else
2205 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2206
2207 switch (cap->id) {
2208 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2209 struct vfio_iommu_type1_info_cap_iova_range *data =
2210 (void *)cap;
2211
2212 ASSERT_EQ(1, data->header.version);
2213 ASSERT_EQ(1, data->nr_iovas);
2214 EXPECT_EQ(MOCK_APERTURE_START,
2215 data->iova_ranges[0].start);
2216 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2217 break;
2218 }
2219 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2220 struct vfio_iommu_type1_info_dma_avail *data =
2221 (void *)cap;
2222
2223 ASSERT_EQ(1, data->header.version);
2224 ASSERT_EQ(sizeof(*data), cap_size);
2225 break;
2226 }
2227 default:
2228 ASSERT_EQ(false, true);
2229 break;
2230 }
2231 if (!cap->next)
2232 break;
2233
2234 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2235 ASSERT_GE(buffer + cap->next, (void *)cap);
2236 cap = buffer + cap->next;
2237 }
2238 }
2239
TEST_F(vfio_compat_mock_domain,get_info)2240 TEST_F(vfio_compat_mock_domain, get_info)
2241 {
2242 struct vfio_iommu_type1_info *info_cmd = buffer;
2243 unsigned int i;
2244 size_t caplen;
2245
2246 /* Pre-cap ABI */
2247 *info_cmd = (struct vfio_iommu_type1_info){
2248 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2249 };
2250 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2251 ASSERT_NE(0, info_cmd->iova_pgsizes);
2252 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2253 info_cmd->flags);
2254
2255 /* Read the cap chain size */
2256 *info_cmd = (struct vfio_iommu_type1_info){
2257 .argsz = sizeof(*info_cmd),
2258 };
2259 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2260 ASSERT_NE(0, info_cmd->iova_pgsizes);
2261 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2262 info_cmd->flags);
2263 ASSERT_EQ(0, info_cmd->cap_offset);
2264 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2265
2266 /* Read the caps, kernel should never create a corrupted caps */
2267 caplen = info_cmd->argsz;
2268 for (i = sizeof(*info_cmd); i < caplen; i++) {
2269 *info_cmd = (struct vfio_iommu_type1_info){
2270 .argsz = i,
2271 };
2272 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2273 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2274 info_cmd->flags);
2275 if (!info_cmd->cap_offset)
2276 continue;
2277 check_vfio_info_cap_chain(_metadata, info_cmd);
2278 }
2279 }
2280
shuffle_array(unsigned long * array,size_t nelms)2281 static void shuffle_array(unsigned long *array, size_t nelms)
2282 {
2283 unsigned int i;
2284
2285 /* Shuffle */
2286 for (i = 0; i != nelms; i++) {
2287 unsigned long tmp = array[i];
2288 unsigned int other = rand() % (nelms - i);
2289
2290 array[i] = array[other];
2291 array[other] = tmp;
2292 }
2293 }
2294
TEST_F(vfio_compat_mock_domain,map)2295 TEST_F(vfio_compat_mock_domain, map)
2296 {
2297 struct vfio_iommu_type1_dma_map map_cmd = {
2298 .argsz = sizeof(map_cmd),
2299 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2300 .vaddr = (uintptr_t)buffer,
2301 .size = BUFFER_SIZE,
2302 .iova = MOCK_APERTURE_START,
2303 };
2304 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2305 .argsz = sizeof(unmap_cmd),
2306 .size = BUFFER_SIZE,
2307 .iova = MOCK_APERTURE_START,
2308 };
2309 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2310 unsigned int i;
2311
2312 /* Simple map/unmap */
2313 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2314 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2315 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2316
2317 /* UNMAP_FLAG_ALL requires 0 iova/size */
2318 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2319 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2320 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2321
2322 unmap_cmd.iova = 0;
2323 unmap_cmd.size = 0;
2324 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2325 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2326
2327 /* Small pages */
2328 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2329 map_cmd.iova = pages_iova[i] =
2330 MOCK_APERTURE_START + i * PAGE_SIZE;
2331 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2332 map_cmd.size = PAGE_SIZE;
2333 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2334 }
2335 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2336
2337 unmap_cmd.flags = 0;
2338 unmap_cmd.size = PAGE_SIZE;
2339 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2340 unmap_cmd.iova = pages_iova[i];
2341 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2342 }
2343 }
2344
TEST_F(vfio_compat_mock_domain,huge_map)2345 TEST_F(vfio_compat_mock_domain, huge_map)
2346 {
2347 size_t buf_size = HUGEPAGE_SIZE * 2;
2348 struct vfio_iommu_type1_dma_map map_cmd = {
2349 .argsz = sizeof(map_cmd),
2350 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2351 .size = buf_size,
2352 .iova = MOCK_APERTURE_START,
2353 };
2354 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2355 .argsz = sizeof(unmap_cmd),
2356 };
2357 unsigned long pages_iova[16];
2358 unsigned int i;
2359 void *buf;
2360
2361 /* Test huge pages and splitting */
2362 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2363 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2364 0);
2365 ASSERT_NE(MAP_FAILED, buf);
2366 map_cmd.vaddr = (uintptr_t)buf;
2367 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2368
2369 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2370 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2371 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2372 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2373
2374 /* type1 mode can cut up larger mappings, type1v2 always fails */
2375 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2376 unmap_cmd.iova = pages_iova[i];
2377 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2378 if (variant->version == VFIO_TYPE1_IOMMU) {
2379 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2380 &unmap_cmd));
2381 } else {
2382 EXPECT_ERRNO(ENOENT,
2383 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2384 &unmap_cmd));
2385 }
2386 }
2387 }
2388
2389 TEST_HARNESS_MAIN
2390