1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11
12 #include "iommufd_utils.h"
13
14 static unsigned long HUGEPAGE_SIZE;
15
16 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
17 #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
18
get_huge_page_size(void)19 static unsigned long get_huge_page_size(void)
20 {
21 char buf[80];
22 int ret;
23 int fd;
24
25 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
26 O_RDONLY);
27 if (fd < 0)
28 return 2 * 1024 * 1024;
29
30 ret = read(fd, buf, sizeof(buf));
31 close(fd);
32 if (ret <= 0 || ret == sizeof(buf))
33 return 2 * 1024 * 1024;
34 buf[ret] = 0;
35 return strtoul(buf, NULL, 10);
36 }
37
setup_sizes(void)38 static __attribute__((constructor)) void setup_sizes(void)
39 {
40 void *vrc;
41 int rc;
42
43 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
44 HUGEPAGE_SIZE = get_huge_page_size();
45
46 BUFFER_SIZE = PAGE_SIZE * 16;
47 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
48 assert(!rc);
49 assert(buffer);
50 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
51 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
52 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
53 assert(vrc == buffer);
54
55 mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
56 &mfd);
57 assert(mfd_buffer != MAP_FAILED);
58 assert(mfd > 0);
59 }
60
FIXTURE(iommufd)61 FIXTURE(iommufd)
62 {
63 int fd;
64 };
65
FIXTURE_SETUP(iommufd)66 FIXTURE_SETUP(iommufd)
67 {
68 self->fd = open("/dev/iommu", O_RDWR);
69 ASSERT_NE(-1, self->fd);
70 }
71
FIXTURE_TEARDOWN(iommufd)72 FIXTURE_TEARDOWN(iommufd)
73 {
74 teardown_iommufd(self->fd, _metadata);
75 }
76
TEST_F(iommufd,simple_close)77 TEST_F(iommufd, simple_close)
78 {
79 }
80
TEST_F(iommufd,cmd_fail)81 TEST_F(iommufd, cmd_fail)
82 {
83 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
84
85 /* object id is invalid */
86 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
87 /* Bad pointer */
88 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
89 /* Unknown ioctl */
90 EXPECT_ERRNO(ENOTTY,
91 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
92 &cmd));
93 }
94
TEST_F(iommufd,cmd_length)95 TEST_F(iommufd, cmd_length)
96 {
97 #define TEST_LENGTH(_struct, _ioctl, _last) \
98 { \
99 size_t min_size = offsetofend(struct _struct, _last); \
100 struct { \
101 struct _struct cmd; \
102 uint8_t extra; \
103 } cmd = { .cmd = { .size = min_size - 1 }, \
104 .extra = UINT8_MAX }; \
105 int old_errno; \
106 int rc; \
107 \
108 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
109 cmd.cmd.size = sizeof(struct _struct) + 1; \
110 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
111 cmd.cmd.size = sizeof(struct _struct); \
112 rc = ioctl(self->fd, _ioctl, &cmd); \
113 old_errno = errno; \
114 cmd.cmd.size = sizeof(struct _struct) + 1; \
115 cmd.extra = 0; \
116 if (rc) { \
117 EXPECT_ERRNO(old_errno, \
118 ioctl(self->fd, _ioctl, &cmd)); \
119 } else { \
120 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
121 } \
122 }
123
124 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
125 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
126 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
127 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
128 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
129 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
130 out_iova_alignment);
131 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
132 allowed_iovas);
133 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
134 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
135 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
136 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
137 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
138 TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
139 TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
140 TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
141 TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
142 __reserved);
143 #undef TEST_LENGTH
144 }
145
TEST_F(iommufd,cmd_ex_fail)146 TEST_F(iommufd, cmd_ex_fail)
147 {
148 struct {
149 struct iommu_destroy cmd;
150 __u64 future;
151 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
152
153 /* object id is invalid and command is longer */
154 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
155 /* future area is non-zero */
156 cmd.future = 1;
157 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
158 /* Original command "works" */
159 cmd.cmd.size = sizeof(cmd.cmd);
160 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
161 /* Short command fails */
162 cmd.cmd.size = sizeof(cmd.cmd) - 1;
163 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
164 }
165
TEST_F(iommufd,global_options)166 TEST_F(iommufd, global_options)
167 {
168 struct iommu_option cmd = {
169 .size = sizeof(cmd),
170 .option_id = IOMMU_OPTION_RLIMIT_MODE,
171 .op = IOMMU_OPTION_OP_GET,
172 .val64 = 1,
173 };
174
175 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
176 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
177 ASSERT_EQ(0, cmd.val64);
178
179 /* This requires root */
180 cmd.op = IOMMU_OPTION_OP_SET;
181 cmd.val64 = 1;
182 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
183 cmd.val64 = 2;
184 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
185
186 cmd.op = IOMMU_OPTION_OP_GET;
187 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
188 ASSERT_EQ(1, cmd.val64);
189
190 cmd.op = IOMMU_OPTION_OP_SET;
191 cmd.val64 = 0;
192 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
193
194 cmd.op = IOMMU_OPTION_OP_GET;
195 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
196 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
197 cmd.op = IOMMU_OPTION_OP_SET;
198 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
199 }
200
drop_cap_ipc_lock(struct __test_metadata * _metadata)201 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
202 {
203 cap_t caps;
204 cap_value_t cap_list[1] = { CAP_IPC_LOCK };
205
206 caps = cap_get_proc();
207 ASSERT_NE(caps, NULL);
208 ASSERT_NE(-1,
209 cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
210 ASSERT_NE(-1, cap_set_proc(caps));
211 cap_free(caps);
212 }
213
get_proc_status_value(pid_t pid,const char * var)214 static long get_proc_status_value(pid_t pid, const char *var)
215 {
216 FILE *fp;
217 char buf[80], tag[80];
218 long val = -1;
219
220 snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
221 fp = fopen(buf, "r");
222 if (!fp)
223 return val;
224
225 while (fgets(buf, sizeof(buf), fp))
226 if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
227 break;
228
229 fclose(fp);
230 return val;
231 }
232
get_vm_pinned(pid_t pid)233 static long get_vm_pinned(pid_t pid)
234 {
235 return get_proc_status_value(pid, "VmPin:");
236 }
237
get_vm_locked(pid_t pid)238 static long get_vm_locked(pid_t pid)
239 {
240 return get_proc_status_value(pid, "VmLck:");
241 }
242
FIXTURE(change_process)243 FIXTURE(change_process)
244 {
245 int fd;
246 uint32_t ioas_id;
247 };
248
FIXTURE_VARIANT(change_process)249 FIXTURE_VARIANT(change_process)
250 {
251 int accounting;
252 };
253
FIXTURE_SETUP(change_process)254 FIXTURE_SETUP(change_process)
255 {
256 self->fd = open("/dev/iommu", O_RDWR);
257 ASSERT_NE(-1, self->fd);
258
259 drop_cap_ipc_lock(_metadata);
260 if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
261 struct iommu_option set_limit_cmd = {
262 .size = sizeof(set_limit_cmd),
263 .option_id = IOMMU_OPTION_RLIMIT_MODE,
264 .op = IOMMU_OPTION_OP_SET,
265 .val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
266 };
267 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
268 }
269
270 test_ioctl_ioas_alloc(&self->ioas_id);
271 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
272 }
273
FIXTURE_TEARDOWN(change_process)274 FIXTURE_TEARDOWN(change_process)
275 {
276 teardown_iommufd(self->fd, _metadata);
277 }
278
FIXTURE_VARIANT_ADD(change_process,account_none)279 FIXTURE_VARIANT_ADD(change_process, account_none)
280 {
281 .accounting = IOPT_PAGES_ACCOUNT_NONE,
282 };
283
FIXTURE_VARIANT_ADD(change_process,account_user)284 FIXTURE_VARIANT_ADD(change_process, account_user)
285 {
286 .accounting = IOPT_PAGES_ACCOUNT_USER,
287 };
288
FIXTURE_VARIANT_ADD(change_process,account_mm)289 FIXTURE_VARIANT_ADD(change_process, account_mm)
290 {
291 .accounting = IOPT_PAGES_ACCOUNT_MM,
292 };
293
TEST_F(change_process,basic)294 TEST_F(change_process, basic)
295 {
296 pid_t parent = getpid();
297 pid_t child;
298 __u64 iova;
299 struct iommu_ioas_change_process cmd = {
300 .size = sizeof(cmd),
301 };
302
303 /* Expect failure if non-file maps exist */
304 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
305 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
306 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
307
308 /* Change process works in current process. */
309 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
310 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
311
312 /* Change process works in another process */
313 child = fork();
314 if (!child) {
315 int nlock = PAGE_SIZE / 1024;
316
317 /* Parent accounts for locked memory before */
318 ASSERT_EQ(nlock, get_vm_pinned(parent));
319 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
320 ASSERT_EQ(nlock, get_vm_locked(parent));
321 ASSERT_EQ(0, get_vm_pinned(getpid()));
322 ASSERT_EQ(0, get_vm_locked(getpid()));
323
324 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
325
326 /* Child accounts for locked memory after */
327 ASSERT_EQ(0, get_vm_pinned(parent));
328 ASSERT_EQ(0, get_vm_locked(parent));
329 ASSERT_EQ(nlock, get_vm_pinned(getpid()));
330 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
331 ASSERT_EQ(nlock, get_vm_locked(getpid()));
332
333 exit(0);
334 }
335 ASSERT_NE(-1, child);
336 ASSERT_EQ(child, waitpid(child, NULL, 0));
337 }
338
FIXTURE(iommufd_ioas)339 FIXTURE(iommufd_ioas)
340 {
341 int fd;
342 uint32_t ioas_id;
343 uint32_t stdev_id;
344 uint32_t hwpt_id;
345 uint32_t device_id;
346 uint64_t base_iova;
347 uint32_t device_pasid_id;
348 };
349
FIXTURE_VARIANT(iommufd_ioas)350 FIXTURE_VARIANT(iommufd_ioas)
351 {
352 unsigned int mock_domains;
353 unsigned int memory_limit;
354 bool pasid_capable;
355 };
356
FIXTURE_SETUP(iommufd_ioas)357 FIXTURE_SETUP(iommufd_ioas)
358 {
359 unsigned int i;
360
361
362 self->fd = open("/dev/iommu", O_RDWR);
363 ASSERT_NE(-1, self->fd);
364 test_ioctl_ioas_alloc(&self->ioas_id);
365
366 if (!variant->memory_limit) {
367 test_ioctl_set_default_memory_limit();
368 } else {
369 test_ioctl_set_temp_memory_limit(variant->memory_limit);
370 }
371
372 for (i = 0; i != variant->mock_domains; i++) {
373 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
374 &self->hwpt_id, &self->device_id);
375 test_cmd_dev_check_cache_all(self->device_id,
376 IOMMU_TEST_DEV_CACHE_DEFAULT);
377 self->base_iova = MOCK_APERTURE_START;
378 }
379
380 if (variant->pasid_capable)
381 test_cmd_mock_domain_flags(self->ioas_id,
382 MOCK_FLAGS_DEVICE_PASID,
383 NULL, NULL,
384 &self->device_pasid_id);
385 }
386
FIXTURE_TEARDOWN(iommufd_ioas)387 FIXTURE_TEARDOWN(iommufd_ioas)
388 {
389 test_ioctl_set_default_memory_limit();
390 teardown_iommufd(self->fd, _metadata);
391 }
392
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)393 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
394 {
395 };
396
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)397 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
398 {
399 .mock_domains = 1,
400 .pasid_capable = true,
401 };
402
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)403 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
404 {
405 .mock_domains = 2,
406 };
407
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)408 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
409 {
410 .mock_domains = 1,
411 .memory_limit = 16,
412 };
413
TEST_F(iommufd_ioas,ioas_auto_destroy)414 TEST_F(iommufd_ioas, ioas_auto_destroy)
415 {
416 }
417
TEST_F(iommufd_ioas,ioas_destroy)418 TEST_F(iommufd_ioas, ioas_destroy)
419 {
420 if (self->stdev_id) {
421 /* IOAS cannot be freed while a device has a HWPT using it */
422 EXPECT_ERRNO(EBUSY,
423 _test_ioctl_destroy(self->fd, self->ioas_id));
424 } else {
425 /* Can allocate and manually free an IOAS table */
426 test_ioctl_destroy(self->ioas_id);
427 }
428 }
429
TEST_F(iommufd_ioas,alloc_hwpt_nested)430 TEST_F(iommufd_ioas, alloc_hwpt_nested)
431 {
432 const uint32_t min_data_len =
433 offsetofend(struct iommu_hwpt_selftest, iotlb);
434 struct iommu_hwpt_selftest data = {
435 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
436 };
437 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
438 uint32_t nested_hwpt_id[2] = {};
439 uint32_t num_inv;
440 uint32_t parent_hwpt_id = 0;
441 uint32_t parent_hwpt_id_not_work = 0;
442 uint32_t test_hwpt_id = 0;
443 uint32_t iopf_hwpt_id;
444 uint32_t fault_id;
445 uint32_t fault_fd;
446
447 if (self->device_id) {
448 /* Negative tests */
449 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
450 &test_hwpt_id);
451 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
452 &test_hwpt_id);
453 test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
454 IOMMU_HWPT_ALLOC_NEST_PARENT |
455 IOMMU_HWPT_FAULT_ID_VALID,
456 &test_hwpt_id);
457
458 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
459 IOMMU_HWPT_ALLOC_NEST_PARENT,
460 &parent_hwpt_id);
461
462 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
463 &parent_hwpt_id_not_work);
464
465 /* Negative nested tests */
466 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
467 parent_hwpt_id, 0,
468 &nested_hwpt_id[0],
469 IOMMU_HWPT_DATA_NONE, &data,
470 sizeof(data));
471 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
472 parent_hwpt_id, 0,
473 &nested_hwpt_id[0],
474 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
475 sizeof(data));
476 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
477 parent_hwpt_id, 0,
478 &nested_hwpt_id[0],
479 IOMMU_HWPT_DATA_SELFTEST, &data,
480 min_data_len - 1);
481 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
482 parent_hwpt_id, 0,
483 &nested_hwpt_id[0],
484 IOMMU_HWPT_DATA_SELFTEST, NULL,
485 sizeof(data));
486 test_err_hwpt_alloc_nested(
487 EOPNOTSUPP, self->device_id, parent_hwpt_id,
488 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
489 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
490 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
491 parent_hwpt_id_not_work, 0,
492 &nested_hwpt_id[0],
493 IOMMU_HWPT_DATA_SELFTEST, &data,
494 sizeof(data));
495
496 /* Allocate two nested hwpts sharing one common parent hwpt */
497 test_ioctl_fault_alloc(&fault_id, &fault_fd);
498 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
499 &nested_hwpt_id[0],
500 IOMMU_HWPT_DATA_SELFTEST, &data,
501 sizeof(data));
502 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
503 &nested_hwpt_id[1],
504 IOMMU_HWPT_DATA_SELFTEST, &data,
505 sizeof(data));
506 test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
507 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
508 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
509 &data, sizeof(data));
510 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
511 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
512 IOMMU_HWPT_DATA_SELFTEST, &data,
513 sizeof(data));
514 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
515 IOMMU_TEST_IOTLB_DEFAULT);
516 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
517 IOMMU_TEST_IOTLB_DEFAULT);
518
519 /* Negative test: a nested hwpt on top of a nested hwpt */
520 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
521 nested_hwpt_id[0], 0, &test_hwpt_id,
522 IOMMU_HWPT_DATA_SELFTEST, &data,
523 sizeof(data));
524 /* Negative test: parent hwpt now cannot be freed */
525 EXPECT_ERRNO(EBUSY,
526 _test_ioctl_destroy(self->fd, parent_hwpt_id));
527
528 /* hwpt_invalidate does not support a parent hwpt */
529 num_inv = 1;
530 test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
531 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
532 sizeof(*inv_reqs), &num_inv);
533 assert(!num_inv);
534
535 /* Check data_type by passing zero-length array */
536 num_inv = 0;
537 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
538 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
539 sizeof(*inv_reqs), &num_inv);
540 assert(!num_inv);
541
542 /* Negative test: Invalid data_type */
543 num_inv = 1;
544 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
545 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
546 sizeof(*inv_reqs), &num_inv);
547 assert(!num_inv);
548
549 /* Negative test: structure size sanity */
550 num_inv = 1;
551 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
552 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
553 sizeof(*inv_reqs) + 1, &num_inv);
554 assert(!num_inv);
555
556 num_inv = 1;
557 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
558 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
559 1, &num_inv);
560 assert(!num_inv);
561
562 /* Negative test: invalid flag is passed */
563 num_inv = 1;
564 inv_reqs[0].flags = 0xffffffff;
565 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
566 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
567 sizeof(*inv_reqs), &num_inv);
568 assert(!num_inv);
569
570 /* Negative test: invalid data_uptr when array is not empty */
571 num_inv = 1;
572 inv_reqs[0].flags = 0;
573 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
574 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
575 sizeof(*inv_reqs), &num_inv);
576 assert(!num_inv);
577
578 /* Negative test: invalid entry_len when array is not empty */
579 num_inv = 1;
580 inv_reqs[0].flags = 0;
581 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
582 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
583 0, &num_inv);
584 assert(!num_inv);
585
586 /* Negative test: invalid iotlb_id */
587 num_inv = 1;
588 inv_reqs[0].flags = 0;
589 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
590 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
591 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
592 sizeof(*inv_reqs), &num_inv);
593 assert(!num_inv);
594
595 /*
596 * Invalidate the 1st iotlb entry but fail the 2nd request
597 * due to invalid flags configuration in the 2nd request.
598 */
599 num_inv = 2;
600 inv_reqs[0].flags = 0;
601 inv_reqs[0].iotlb_id = 0;
602 inv_reqs[1].flags = 0xffffffff;
603 inv_reqs[1].iotlb_id = 1;
604 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
605 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
606 sizeof(*inv_reqs), &num_inv);
607 assert(num_inv == 1);
608 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
609 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
610 IOMMU_TEST_IOTLB_DEFAULT);
611 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
612 IOMMU_TEST_IOTLB_DEFAULT);
613 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
614 IOMMU_TEST_IOTLB_DEFAULT);
615
616 /*
617 * Invalidate the 1st iotlb entry but fail the 2nd request
618 * due to invalid iotlb_id configuration in the 2nd request.
619 */
620 num_inv = 2;
621 inv_reqs[0].flags = 0;
622 inv_reqs[0].iotlb_id = 0;
623 inv_reqs[1].flags = 0;
624 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
625 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
626 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
627 sizeof(*inv_reqs), &num_inv);
628 assert(num_inv == 1);
629 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
630 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
631 IOMMU_TEST_IOTLB_DEFAULT);
632 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
633 IOMMU_TEST_IOTLB_DEFAULT);
634 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
635 IOMMU_TEST_IOTLB_DEFAULT);
636
637 /* Invalidate the 2nd iotlb entry and verify */
638 num_inv = 1;
639 inv_reqs[0].flags = 0;
640 inv_reqs[0].iotlb_id = 1;
641 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
642 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
643 sizeof(*inv_reqs), &num_inv);
644 assert(num_inv == 1);
645 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
646 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
647 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
648 IOMMU_TEST_IOTLB_DEFAULT);
649 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
650 IOMMU_TEST_IOTLB_DEFAULT);
651
652 /* Invalidate the 3rd and 4th iotlb entries and verify */
653 num_inv = 2;
654 inv_reqs[0].flags = 0;
655 inv_reqs[0].iotlb_id = 2;
656 inv_reqs[1].flags = 0;
657 inv_reqs[1].iotlb_id = 3;
658 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
659 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
660 sizeof(*inv_reqs), &num_inv);
661 assert(num_inv == 2);
662 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
663
664 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
665 num_inv = 1;
666 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
667 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
668 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
669 sizeof(*inv_reqs), &num_inv);
670 assert(num_inv == 1);
671 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
672
673 /* Attach device to nested_hwpt_id[0] that then will be busy */
674 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
675 EXPECT_ERRNO(EBUSY,
676 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
677
678 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
679 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
680 EXPECT_ERRNO(EBUSY,
681 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
682 test_ioctl_destroy(nested_hwpt_id[0]);
683
684 /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
685 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
686 EXPECT_ERRNO(EBUSY,
687 _test_ioctl_destroy(self->fd, iopf_hwpt_id));
688 /* Trigger an IOPF on the device */
689 test_cmd_trigger_iopf(self->device_id, fault_fd);
690
691 /* Detach from nested_hwpt_id[1] and destroy it */
692 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
693 test_ioctl_destroy(nested_hwpt_id[1]);
694 test_ioctl_destroy(iopf_hwpt_id);
695
696 /* Detach from the parent hw_pagetable and destroy it */
697 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
698 test_ioctl_destroy(parent_hwpt_id);
699 test_ioctl_destroy(parent_hwpt_id_not_work);
700 close(fault_fd);
701 test_ioctl_destroy(fault_id);
702 } else {
703 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
704 &parent_hwpt_id);
705 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
706 parent_hwpt_id, 0,
707 &nested_hwpt_id[0],
708 IOMMU_HWPT_DATA_SELFTEST, &data,
709 sizeof(data));
710 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
711 parent_hwpt_id, 0,
712 &nested_hwpt_id[1],
713 IOMMU_HWPT_DATA_SELFTEST, &data,
714 sizeof(data));
715 test_err_mock_domain_replace(ENOENT, self->stdev_id,
716 nested_hwpt_id[0]);
717 test_err_mock_domain_replace(ENOENT, self->stdev_id,
718 nested_hwpt_id[1]);
719 }
720 }
721
TEST_F(iommufd_ioas,hwpt_attach)722 TEST_F(iommufd_ioas, hwpt_attach)
723 {
724 /* Create a device attached directly to a hwpt */
725 if (self->stdev_id) {
726 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
727 } else {
728 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
729 }
730 }
731
TEST_F(iommufd_ioas,ioas_area_destroy)732 TEST_F(iommufd_ioas, ioas_area_destroy)
733 {
734 /* Adding an area does not change ability to destroy */
735 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
736 if (self->stdev_id)
737 EXPECT_ERRNO(EBUSY,
738 _test_ioctl_destroy(self->fd, self->ioas_id));
739 else
740 test_ioctl_destroy(self->ioas_id);
741 }
742
TEST_F(iommufd_ioas,ioas_area_auto_destroy)743 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
744 {
745 int i;
746
747 /* Can allocate and automatically free an IOAS table with many areas */
748 for (i = 0; i != 10; i++) {
749 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
750 self->base_iova + i * PAGE_SIZE);
751 }
752 }
753
TEST_F(iommufd_ioas,get_hw_info)754 TEST_F(iommufd_ioas, get_hw_info)
755 {
756 struct iommu_test_hw_info buffer_exact;
757 struct iommu_test_hw_info_buffer_larger {
758 struct iommu_test_hw_info info;
759 uint64_t trailing_bytes;
760 } buffer_larger;
761 struct iommu_test_hw_info_buffer_smaller {
762 __u32 flags;
763 } buffer_smaller;
764
765 if (self->device_id) {
766 uint8_t max_pasid = 0;
767
768 /* Provide a zero-size user_buffer */
769 test_cmd_get_hw_info(self->device_id,
770 IOMMU_HW_INFO_TYPE_DEFAULT, NULL, 0);
771 /* Provide a user_buffer with exact size */
772 test_cmd_get_hw_info(self->device_id,
773 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
774 sizeof(buffer_exact));
775
776 /* Request for a wrong data_type, and a correct one */
777 test_err_get_hw_info(EOPNOTSUPP, self->device_id,
778 IOMMU_HW_INFO_TYPE_SELFTEST + 1,
779 &buffer_exact, sizeof(buffer_exact));
780 test_cmd_get_hw_info(self->device_id,
781 IOMMU_HW_INFO_TYPE_SELFTEST, &buffer_exact,
782 sizeof(buffer_exact));
783 /*
784 * Provide a user_buffer with size larger than the exact size to check if
785 * kernel zero the trailing bytes.
786 */
787 test_cmd_get_hw_info(self->device_id,
788 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
789 sizeof(buffer_larger));
790 /*
791 * Provide a user_buffer with size smaller than the exact size to check if
792 * the fields within the size range still gets updated.
793 */
794 test_cmd_get_hw_info(self->device_id,
795 IOMMU_HW_INFO_TYPE_DEFAULT,
796 &buffer_smaller, sizeof(buffer_smaller));
797 test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
798 ASSERT_EQ(0, max_pasid);
799 if (variant->pasid_capable) {
800 test_cmd_get_hw_info_pasid(self->device_pasid_id,
801 &max_pasid);
802 ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
803 }
804 } else {
805 test_err_get_hw_info(ENOENT, self->device_id,
806 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
807 sizeof(buffer_exact));
808 test_err_get_hw_info(ENOENT, self->device_id,
809 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
810 sizeof(buffer_larger));
811 }
812 }
813
TEST_F(iommufd_ioas,area)814 TEST_F(iommufd_ioas, area)
815 {
816 int i;
817
818 /* Unmap fails if nothing is mapped */
819 for (i = 0; i != 10; i++)
820 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
821
822 /* Unmap works */
823 for (i = 0; i != 10; i++)
824 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
825 self->base_iova + i * PAGE_SIZE);
826 for (i = 0; i != 10; i++)
827 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
828 PAGE_SIZE);
829
830 /* Split fails */
831 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
832 self->base_iova + 16 * PAGE_SIZE);
833 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
834 PAGE_SIZE);
835 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
836 PAGE_SIZE);
837
838 /* Over map fails */
839 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
840 self->base_iova + 16 * PAGE_SIZE);
841 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
842 self->base_iova + 16 * PAGE_SIZE);
843 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
844 self->base_iova + 17 * PAGE_SIZE);
845 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
846 self->base_iova + 15 * PAGE_SIZE);
847 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
848 self->base_iova + 15 * PAGE_SIZE);
849
850 /* unmap all works */
851 test_ioctl_ioas_unmap(0, UINT64_MAX);
852
853 /* Unmap all succeeds on an empty IOAS */
854 test_ioctl_ioas_unmap(0, UINT64_MAX);
855 }
856
TEST_F(iommufd_ioas,unmap_fully_contained_areas)857 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
858 {
859 uint64_t unmap_len;
860 int i;
861
862 /* Give no_domain some space to rewind base_iova */
863 self->base_iova += 4 * PAGE_SIZE;
864
865 for (i = 0; i != 4; i++)
866 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
867 self->base_iova + i * 16 * PAGE_SIZE);
868
869 /* Unmap not fully contained area doesn't work */
870 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
871 8 * PAGE_SIZE);
872 test_err_ioctl_ioas_unmap(ENOENT,
873 self->base_iova + 3 * 16 * PAGE_SIZE +
874 8 * PAGE_SIZE - 4 * PAGE_SIZE,
875 8 * PAGE_SIZE);
876
877 /* Unmap fully contained areas works */
878 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
879 self->base_iova - 4 * PAGE_SIZE,
880 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
881 4 * PAGE_SIZE,
882 &unmap_len));
883 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
884 }
885
TEST_F(iommufd_ioas,area_auto_iova)886 TEST_F(iommufd_ioas, area_auto_iova)
887 {
888 struct iommu_test_cmd test_cmd = {
889 .size = sizeof(test_cmd),
890 .op = IOMMU_TEST_OP_ADD_RESERVED,
891 .id = self->ioas_id,
892 .add_reserved = { .start = PAGE_SIZE * 4,
893 .length = PAGE_SIZE * 100 },
894 };
895 struct iommu_iova_range ranges[1] = {};
896 struct iommu_ioas_allow_iovas allow_cmd = {
897 .size = sizeof(allow_cmd),
898 .ioas_id = self->ioas_id,
899 .num_iovas = 1,
900 .allowed_iovas = (uintptr_t)ranges,
901 };
902 __u64 iovas[10];
903 int i;
904
905 /* Simple 4k pages */
906 for (i = 0; i != 10; i++)
907 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
908 for (i = 0; i != 10; i++)
909 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
910
911 /* Kernel automatically aligns IOVAs properly */
912 for (i = 0; i != 10; i++) {
913 size_t length = PAGE_SIZE * (i + 1);
914
915 if (self->stdev_id) {
916 test_ioctl_ioas_map(buffer, length, &iovas[i]);
917 } else {
918 test_ioctl_ioas_map((void *)(1UL << 31), length,
919 &iovas[i]);
920 }
921 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
922 }
923 for (i = 0; i != 10; i++)
924 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
925
926 /* Avoids a reserved region */
927 ASSERT_EQ(0,
928 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
929 &test_cmd));
930 for (i = 0; i != 10; i++) {
931 size_t length = PAGE_SIZE * (i + 1);
932
933 test_ioctl_ioas_map(buffer, length, &iovas[i]);
934 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
935 EXPECT_EQ(false,
936 iovas[i] > test_cmd.add_reserved.start &&
937 iovas[i] <
938 test_cmd.add_reserved.start +
939 test_cmd.add_reserved.length);
940 }
941 for (i = 0; i != 10; i++)
942 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
943
944 /* Allowed region intersects with a reserved region */
945 ranges[0].start = PAGE_SIZE;
946 ranges[0].last = PAGE_SIZE * 600;
947 EXPECT_ERRNO(EADDRINUSE,
948 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
949
950 /* Allocate from an allowed region */
951 if (self->stdev_id) {
952 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
953 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
954 } else {
955 ranges[0].start = PAGE_SIZE * 200;
956 ranges[0].last = PAGE_SIZE * 600 - 1;
957 }
958 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
959 for (i = 0; i != 10; i++) {
960 size_t length = PAGE_SIZE * (i + 1);
961
962 test_ioctl_ioas_map(buffer, length, &iovas[i]);
963 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
964 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
965 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
966 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
967 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
968 }
969 for (i = 0; i != 10; i++)
970 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
971 }
972
973 /* https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com */
TEST_F(iommufd_ioas,reserved_overflow)974 TEST_F(iommufd_ioas, reserved_overflow)
975 {
976 struct iommu_test_cmd test_cmd = {
977 .size = sizeof(test_cmd),
978 .op = IOMMU_TEST_OP_ADD_RESERVED,
979 .id = self->ioas_id,
980 .add_reserved.start = 6,
981 };
982 unsigned int map_len;
983 __u64 iova;
984
985 if (PAGE_SIZE == 4096) {
986 test_cmd.add_reserved.length = 0xffffffffffff8001;
987 map_len = 0x5000;
988 } else {
989 test_cmd.add_reserved.length =
990 0xffffffffffffffff - MOCK_PAGE_SIZE * 16;
991 map_len = MOCK_PAGE_SIZE * 10;
992 }
993
994 ASSERT_EQ(0,
995 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
996 &test_cmd));
997 test_err_ioctl_ioas_map(ENOSPC, buffer, map_len, &iova);
998 }
999
TEST_F(iommufd_ioas,area_allowed)1000 TEST_F(iommufd_ioas, area_allowed)
1001 {
1002 struct iommu_test_cmd test_cmd = {
1003 .size = sizeof(test_cmd),
1004 .op = IOMMU_TEST_OP_ADD_RESERVED,
1005 .id = self->ioas_id,
1006 .add_reserved = { .start = PAGE_SIZE * 4,
1007 .length = PAGE_SIZE * 100 },
1008 };
1009 struct iommu_iova_range ranges[1] = {};
1010 struct iommu_ioas_allow_iovas allow_cmd = {
1011 .size = sizeof(allow_cmd),
1012 .ioas_id = self->ioas_id,
1013 .num_iovas = 1,
1014 .allowed_iovas = (uintptr_t)ranges,
1015 };
1016
1017 /* Reserved intersects an allowed */
1018 allow_cmd.num_iovas = 1;
1019 ranges[0].start = self->base_iova;
1020 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1021 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1022 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
1023 test_cmd.add_reserved.length = PAGE_SIZE;
1024 EXPECT_ERRNO(EADDRINUSE,
1025 ioctl(self->fd,
1026 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1027 &test_cmd));
1028 allow_cmd.num_iovas = 0;
1029 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1030
1031 /* Allowed intersects a reserved */
1032 ASSERT_EQ(0,
1033 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1034 &test_cmd));
1035 allow_cmd.num_iovas = 1;
1036 ranges[0].start = self->base_iova;
1037 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1038 EXPECT_ERRNO(EADDRINUSE,
1039 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1040 }
1041
TEST_F(iommufd_ioas,copy_area)1042 TEST_F(iommufd_ioas, copy_area)
1043 {
1044 struct iommu_ioas_copy copy_cmd = {
1045 .size = sizeof(copy_cmd),
1046 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1047 .dst_ioas_id = self->ioas_id,
1048 .src_ioas_id = self->ioas_id,
1049 .length = PAGE_SIZE,
1050 };
1051
1052 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
1053
1054 /* Copy inside a single IOAS */
1055 copy_cmd.src_iova = self->base_iova;
1056 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
1057 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1058
1059 /* Copy between IOAS's */
1060 copy_cmd.src_iova = self->base_iova;
1061 copy_cmd.dst_iova = 0;
1062 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
1063 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1064 }
1065
TEST_F(iommufd_ioas,iova_ranges)1066 TEST_F(iommufd_ioas, iova_ranges)
1067 {
1068 struct iommu_test_cmd test_cmd = {
1069 .size = sizeof(test_cmd),
1070 .op = IOMMU_TEST_OP_ADD_RESERVED,
1071 .id = self->ioas_id,
1072 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1073 };
1074 struct iommu_iova_range *ranges = buffer;
1075 struct iommu_ioas_iova_ranges ranges_cmd = {
1076 .size = sizeof(ranges_cmd),
1077 .ioas_id = self->ioas_id,
1078 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
1079 .allowed_iovas = (uintptr_t)ranges,
1080 };
1081
1082 /* Range can be read */
1083 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1084 EXPECT_EQ(1, ranges_cmd.num_iovas);
1085 if (!self->stdev_id) {
1086 EXPECT_EQ(0, ranges[0].start);
1087 EXPECT_EQ(SIZE_MAX, ranges[0].last);
1088 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1089 } else {
1090 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1091 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1092 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1093 }
1094
1095 /* Buffer too small */
1096 memset(ranges, 0, BUFFER_SIZE);
1097 ranges_cmd.num_iovas = 0;
1098 EXPECT_ERRNO(EMSGSIZE,
1099 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1100 EXPECT_EQ(1, ranges_cmd.num_iovas);
1101 EXPECT_EQ(0, ranges[0].start);
1102 EXPECT_EQ(0, ranges[0].last);
1103
1104 /* 2 ranges */
1105 ASSERT_EQ(0,
1106 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1107 &test_cmd));
1108 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1109 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1110 if (!self->stdev_id) {
1111 EXPECT_EQ(2, ranges_cmd.num_iovas);
1112 EXPECT_EQ(0, ranges[0].start);
1113 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1114 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1115 EXPECT_EQ(SIZE_MAX, ranges[1].last);
1116 } else {
1117 EXPECT_EQ(1, ranges_cmd.num_iovas);
1118 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1119 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1120 }
1121
1122 /* Buffer too small */
1123 memset(ranges, 0, BUFFER_SIZE);
1124 ranges_cmd.num_iovas = 1;
1125 if (!self->stdev_id) {
1126 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1127 &ranges_cmd));
1128 EXPECT_EQ(2, ranges_cmd.num_iovas);
1129 EXPECT_EQ(0, ranges[0].start);
1130 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1131 } else {
1132 ASSERT_EQ(0,
1133 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1134 EXPECT_EQ(1, ranges_cmd.num_iovas);
1135 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1136 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1137 }
1138 EXPECT_EQ(0, ranges[1].start);
1139 EXPECT_EQ(0, ranges[1].last);
1140 }
1141
TEST_F(iommufd_ioas,access_domain_destory)1142 TEST_F(iommufd_ioas, access_domain_destory)
1143 {
1144 struct iommu_test_cmd access_cmd = {
1145 .size = sizeof(access_cmd),
1146 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1147 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
1148 .length = PAGE_SIZE},
1149 };
1150 size_t buf_size = 2 * HUGEPAGE_SIZE;
1151 uint8_t *buf;
1152
1153 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1154 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1155 0);
1156 ASSERT_NE(MAP_FAILED, buf);
1157 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1158
1159 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1160 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1161 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1162 ASSERT_EQ(0,
1163 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1164 &access_cmd));
1165
1166 /* Causes a complicated unpin across a huge page boundary */
1167 if (self->stdev_id)
1168 test_ioctl_destroy(self->stdev_id);
1169
1170 test_cmd_destroy_access_pages(
1171 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1172 test_cmd_destroy_access(access_cmd.id);
1173 ASSERT_EQ(0, munmap(buf, buf_size));
1174 }
1175
TEST_F(iommufd_ioas,access_pin)1176 TEST_F(iommufd_ioas, access_pin)
1177 {
1178 struct iommu_test_cmd access_cmd = {
1179 .size = sizeof(access_cmd),
1180 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1181 .access_pages = { .iova = MOCK_APERTURE_START,
1182 .length = BUFFER_SIZE,
1183 .uptr = (uintptr_t)buffer },
1184 };
1185 struct iommu_test_cmd check_map_cmd = {
1186 .size = sizeof(check_map_cmd),
1187 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
1188 .check_map = { .iova = MOCK_APERTURE_START,
1189 .length = BUFFER_SIZE,
1190 .uptr = (uintptr_t)buffer },
1191 };
1192 uint32_t access_pages_id;
1193 unsigned int npages;
1194
1195 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1196 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1197
1198 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1199 uint32_t mock_stdev_id;
1200 uint32_t mock_hwpt_id;
1201
1202 access_cmd.access_pages.length = npages * PAGE_SIZE;
1203
1204 /* Single map/unmap */
1205 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1206 MOCK_APERTURE_START);
1207 ASSERT_EQ(0, ioctl(self->fd,
1208 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1209 &access_cmd));
1210 test_cmd_destroy_access_pages(
1211 access_cmd.id,
1212 access_cmd.access_pages.out_access_pages_id);
1213
1214 /* Double user */
1215 ASSERT_EQ(0, ioctl(self->fd,
1216 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1217 &access_cmd));
1218 access_pages_id = access_cmd.access_pages.out_access_pages_id;
1219 ASSERT_EQ(0, ioctl(self->fd,
1220 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1221 &access_cmd));
1222 test_cmd_destroy_access_pages(
1223 access_cmd.id,
1224 access_cmd.access_pages.out_access_pages_id);
1225 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1226
1227 /* Add/remove a domain with a user */
1228 ASSERT_EQ(0, ioctl(self->fd,
1229 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1230 &access_cmd));
1231 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1232 &mock_hwpt_id, NULL);
1233 check_map_cmd.id = mock_hwpt_id;
1234 ASSERT_EQ(0, ioctl(self->fd,
1235 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1236 &check_map_cmd));
1237
1238 test_ioctl_destroy(mock_stdev_id);
1239 test_cmd_destroy_access_pages(
1240 access_cmd.id,
1241 access_cmd.access_pages.out_access_pages_id);
1242
1243 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1244 }
1245 test_cmd_destroy_access(access_cmd.id);
1246 }
1247
TEST_F(iommufd_ioas,access_pin_unmap)1248 TEST_F(iommufd_ioas, access_pin_unmap)
1249 {
1250 struct iommu_test_cmd access_pages_cmd = {
1251 .size = sizeof(access_pages_cmd),
1252 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1253 .access_pages = { .iova = MOCK_APERTURE_START,
1254 .length = BUFFER_SIZE,
1255 .uptr = (uintptr_t)buffer },
1256 };
1257
1258 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1259 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1260 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1261 ASSERT_EQ(0,
1262 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1263 &access_pages_cmd));
1264
1265 /* Trigger the unmap op */
1266 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1267
1268 /* kernel removed the item for us */
1269 test_err_destroy_access_pages(
1270 ENOENT, access_pages_cmd.id,
1271 access_pages_cmd.access_pages.out_access_pages_id);
1272 }
1273
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1274 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1275 unsigned int access_id, uint64_t iova,
1276 unsigned int def_flags)
1277 {
1278 uint16_t tmp[32];
1279 struct iommu_test_cmd access_cmd = {
1280 .size = sizeof(access_cmd),
1281 .op = IOMMU_TEST_OP_ACCESS_RW,
1282 .id = access_id,
1283 .access_rw = { .uptr = (uintptr_t)tmp },
1284 };
1285 uint16_t *buffer16 = buffer;
1286 unsigned int i;
1287 void *tmp2;
1288
1289 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1290 buffer16[i] = rand();
1291
1292 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1293 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1294 access_cmd.access_rw.iova++) {
1295 for (access_cmd.access_rw.length = 1;
1296 access_cmd.access_rw.length < sizeof(tmp);
1297 access_cmd.access_rw.length++) {
1298 access_cmd.access_rw.flags = def_flags;
1299 ASSERT_EQ(0, ioctl(fd,
1300 _IOMMU_TEST_CMD(
1301 IOMMU_TEST_OP_ACCESS_RW),
1302 &access_cmd));
1303 ASSERT_EQ(0,
1304 memcmp(buffer + (access_cmd.access_rw.iova -
1305 iova),
1306 tmp, access_cmd.access_rw.length));
1307
1308 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1309 tmp[i] = rand();
1310 access_cmd.access_rw.flags = def_flags |
1311 MOCK_ACCESS_RW_WRITE;
1312 ASSERT_EQ(0, ioctl(fd,
1313 _IOMMU_TEST_CMD(
1314 IOMMU_TEST_OP_ACCESS_RW),
1315 &access_cmd));
1316 ASSERT_EQ(0,
1317 memcmp(buffer + (access_cmd.access_rw.iova -
1318 iova),
1319 tmp, access_cmd.access_rw.length));
1320 }
1321 }
1322
1323 /* Multi-page test */
1324 tmp2 = malloc(BUFFER_SIZE);
1325 ASSERT_NE(NULL, tmp2);
1326 access_cmd.access_rw.iova = iova;
1327 access_cmd.access_rw.length = BUFFER_SIZE;
1328 access_cmd.access_rw.flags = def_flags;
1329 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1330 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1331 &access_cmd));
1332 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1333 free(tmp2);
1334 }
1335
TEST_F(iommufd_ioas,access_rw)1336 TEST_F(iommufd_ioas, access_rw)
1337 {
1338 __u32 access_id;
1339 __u64 iova;
1340
1341 test_cmd_create_access(self->ioas_id, &access_id, 0);
1342 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1343 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1344 check_access_rw(_metadata, self->fd, access_id, iova,
1345 MOCK_ACCESS_RW_SLOW_PATH);
1346 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1347 test_cmd_destroy_access(access_id);
1348 }
1349
TEST_F(iommufd_ioas,access_rw_unaligned)1350 TEST_F(iommufd_ioas, access_rw_unaligned)
1351 {
1352 __u32 access_id;
1353 __u64 iova;
1354
1355 test_cmd_create_access(self->ioas_id, &access_id, 0);
1356
1357 /* Unaligned pages */
1358 iova = self->base_iova + MOCK_PAGE_SIZE;
1359 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1360 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1361 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1362 test_cmd_destroy_access(access_id);
1363 }
1364
TEST_F(iommufd_ioas,fork_gone)1365 TEST_F(iommufd_ioas, fork_gone)
1366 {
1367 __u32 access_id;
1368 pid_t child;
1369
1370 test_cmd_create_access(self->ioas_id, &access_id, 0);
1371
1372 /* Create a mapping with a different mm */
1373 child = fork();
1374 if (!child) {
1375 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1376 MOCK_APERTURE_START);
1377 exit(0);
1378 }
1379 ASSERT_NE(-1, child);
1380 ASSERT_EQ(child, waitpid(child, NULL, 0));
1381
1382 if (self->stdev_id) {
1383 /*
1384 * If a domain already existed then everything was pinned within
1385 * the fork, so this copies from one domain to another.
1386 */
1387 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1388 check_access_rw(_metadata, self->fd, access_id,
1389 MOCK_APERTURE_START, 0);
1390
1391 } else {
1392 /*
1393 * Otherwise we need to actually pin pages which can't happen
1394 * since the fork is gone.
1395 */
1396 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1397 }
1398
1399 test_cmd_destroy_access(access_id);
1400 }
1401
TEST_F(iommufd_ioas,fork_present)1402 TEST_F(iommufd_ioas, fork_present)
1403 {
1404 __u32 access_id;
1405 int pipefds[2];
1406 uint64_t tmp;
1407 pid_t child;
1408 int efd;
1409
1410 test_cmd_create_access(self->ioas_id, &access_id, 0);
1411
1412 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1413 efd = eventfd(0, EFD_CLOEXEC);
1414 ASSERT_NE(-1, efd);
1415
1416 /* Create a mapping with a different mm */
1417 child = fork();
1418 if (!child) {
1419 __u64 iova;
1420 uint64_t one = 1;
1421
1422 close(pipefds[1]);
1423 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1424 MOCK_APERTURE_START);
1425 if (write(efd, &one, sizeof(one)) != sizeof(one))
1426 exit(100);
1427 if (read(pipefds[0], &iova, 1) != 1)
1428 exit(100);
1429 exit(0);
1430 }
1431 close(pipefds[0]);
1432 ASSERT_NE(-1, child);
1433 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1434
1435 /* Read pages from the remote process */
1436 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1437 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1438
1439 ASSERT_EQ(0, close(pipefds[1]));
1440 ASSERT_EQ(child, waitpid(child, NULL, 0));
1441
1442 test_cmd_destroy_access(access_id);
1443 }
1444
TEST_F(iommufd_ioas,ioas_option_huge_pages)1445 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1446 {
1447 struct iommu_option cmd = {
1448 .size = sizeof(cmd),
1449 .option_id = IOMMU_OPTION_HUGE_PAGES,
1450 .op = IOMMU_OPTION_OP_GET,
1451 .val64 = 3,
1452 .object_id = self->ioas_id,
1453 };
1454
1455 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1456 ASSERT_EQ(1, cmd.val64);
1457
1458 cmd.op = IOMMU_OPTION_OP_SET;
1459 cmd.val64 = 0;
1460 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1461
1462 cmd.op = IOMMU_OPTION_OP_GET;
1463 cmd.val64 = 3;
1464 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1465 ASSERT_EQ(0, cmd.val64);
1466
1467 cmd.op = IOMMU_OPTION_OP_SET;
1468 cmd.val64 = 2;
1469 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1470
1471 cmd.op = IOMMU_OPTION_OP_SET;
1472 cmd.val64 = 1;
1473 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1474 }
1475
TEST_F(iommufd_ioas,ioas_iova_alloc)1476 TEST_F(iommufd_ioas, ioas_iova_alloc)
1477 {
1478 unsigned int length;
1479 __u64 iova;
1480
1481 for (length = 1; length != PAGE_SIZE * 2; length++) {
1482 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1483 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1484 } else {
1485 test_ioctl_ioas_map(buffer, length, &iova);
1486 test_ioctl_ioas_unmap(iova, length);
1487 }
1488 }
1489 }
1490
TEST_F(iommufd_ioas,ioas_align_change)1491 TEST_F(iommufd_ioas, ioas_align_change)
1492 {
1493 struct iommu_option cmd = {
1494 .size = sizeof(cmd),
1495 .option_id = IOMMU_OPTION_HUGE_PAGES,
1496 .op = IOMMU_OPTION_OP_SET,
1497 .object_id = self->ioas_id,
1498 /* 0 means everything must be aligned to PAGE_SIZE */
1499 .val64 = 0,
1500 };
1501
1502 /*
1503 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1504 * and map are present.
1505 */
1506 if (variant->mock_domains)
1507 return;
1508
1509 /*
1510 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1511 */
1512 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1513 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1514
1515 /* Misalignment is rejected at map time */
1516 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1517 PAGE_SIZE,
1518 MOCK_APERTURE_START + PAGE_SIZE);
1519 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1520
1521 /* Reduce alignment */
1522 cmd.val64 = 1;
1523 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1524
1525 /* Confirm misalignment is rejected during alignment upgrade */
1526 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1527 MOCK_APERTURE_START + PAGE_SIZE);
1528 cmd.val64 = 0;
1529 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1530
1531 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1532 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1533 }
1534
TEST_F(iommufd_ioas,copy_sweep)1535 TEST_F(iommufd_ioas, copy_sweep)
1536 {
1537 struct iommu_ioas_copy copy_cmd = {
1538 .size = sizeof(copy_cmd),
1539 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1540 .src_ioas_id = self->ioas_id,
1541 .dst_iova = MOCK_APERTURE_START,
1542 .length = MOCK_PAGE_SIZE,
1543 };
1544 unsigned int dst_ioas_id;
1545 uint64_t last_iova;
1546 uint64_t iova;
1547
1548 test_ioctl_ioas_alloc(&dst_ioas_id);
1549 copy_cmd.dst_ioas_id = dst_ioas_id;
1550
1551 if (variant->mock_domains)
1552 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1553 else
1554 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1555
1556 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1557 MOCK_APERTURE_START);
1558
1559 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1560 iova += 511) {
1561 copy_cmd.src_iova = iova;
1562 if (iova < MOCK_APERTURE_START ||
1563 iova + copy_cmd.length - 1 > last_iova) {
1564 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1565 ©_cmd));
1566 } else {
1567 ASSERT_EQ(0,
1568 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1569 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1570 copy_cmd.length);
1571 }
1572 }
1573
1574 test_ioctl_destroy(dst_ioas_id);
1575 }
1576
FIXTURE(iommufd_mock_domain)1577 FIXTURE(iommufd_mock_domain)
1578 {
1579 int fd;
1580 uint32_t ioas_id;
1581 uint32_t hwpt_id;
1582 uint32_t hwpt_ids[2];
1583 uint32_t stdev_ids[2];
1584 uint32_t idev_ids[2];
1585 int mmap_flags;
1586 size_t mmap_buf_size;
1587 };
1588
FIXTURE_VARIANT(iommufd_mock_domain)1589 FIXTURE_VARIANT(iommufd_mock_domain)
1590 {
1591 unsigned int mock_domains;
1592 bool hugepages;
1593 bool file;
1594 };
1595
FIXTURE_SETUP(iommufd_mock_domain)1596 FIXTURE_SETUP(iommufd_mock_domain)
1597 {
1598 unsigned int i;
1599
1600 self->fd = open("/dev/iommu", O_RDWR);
1601 ASSERT_NE(-1, self->fd);
1602 test_ioctl_ioas_alloc(&self->ioas_id);
1603
1604 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1605
1606 for (i = 0; i != variant->mock_domains; i++) {
1607 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1608 &self->hwpt_ids[i], &self->idev_ids[i]);
1609 test_cmd_dev_check_cache_all(self->idev_ids[0],
1610 IOMMU_TEST_DEV_CACHE_DEFAULT);
1611 }
1612 self->hwpt_id = self->hwpt_ids[0];
1613
1614 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1615 self->mmap_buf_size = PAGE_SIZE * 8;
1616 if (variant->hugepages) {
1617 /*
1618 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1619 * not available.
1620 */
1621 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1622 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1623 }
1624 }
1625
FIXTURE_TEARDOWN(iommufd_mock_domain)1626 FIXTURE_TEARDOWN(iommufd_mock_domain)
1627 {
1628 teardown_iommufd(self->fd, _metadata);
1629 }
1630
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1631 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1632 {
1633 .mock_domains = 1,
1634 .hugepages = false,
1635 .file = false,
1636 };
1637
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1638 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1639 {
1640 .mock_domains = 2,
1641 .hugepages = false,
1642 .file = false,
1643 };
1644
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1645 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1646 {
1647 .mock_domains = 1,
1648 .hugepages = true,
1649 .file = false,
1650 };
1651
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1652 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1653 {
1654 .mock_domains = 2,
1655 .hugepages = true,
1656 .file = false,
1657 };
1658
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file)1659 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1660 {
1661 .mock_domains = 1,
1662 .hugepages = false,
1663 .file = true,
1664 };
1665
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file_hugepage)1666 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1667 {
1668 .mock_domains = 1,
1669 .hugepages = true,
1670 .file = true,
1671 };
1672
1673
1674 /* Have the kernel check that the user pages made it to the iommu_domain */
1675 #define check_mock_iova(_ptr, _iova, _length) \
1676 ({ \
1677 struct iommu_test_cmd check_map_cmd = { \
1678 .size = sizeof(check_map_cmd), \
1679 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1680 .id = self->hwpt_id, \
1681 .check_map = { .iova = _iova, \
1682 .length = _length, \
1683 .uptr = (uintptr_t)(_ptr) }, \
1684 }; \
1685 ASSERT_EQ(0, \
1686 ioctl(self->fd, \
1687 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1688 &check_map_cmd)); \
1689 if (self->hwpt_ids[1]) { \
1690 check_map_cmd.id = self->hwpt_ids[1]; \
1691 ASSERT_EQ(0, \
1692 ioctl(self->fd, \
1693 _IOMMU_TEST_CMD( \
1694 IOMMU_TEST_OP_MD_CHECK_MAP), \
1695 &check_map_cmd)); \
1696 } \
1697 })
1698
1699 static void
test_basic_mmap(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1700 test_basic_mmap(struct __test_metadata *_metadata,
1701 struct _test_data_iommufd_mock_domain *self,
1702 const struct _fixture_variant_iommufd_mock_domain *variant)
1703 {
1704 size_t buf_size = self->mmap_buf_size;
1705 uint8_t *buf;
1706 __u64 iova;
1707
1708 /* Simple one page map */
1709 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1710 check_mock_iova(buffer, iova, PAGE_SIZE);
1711
1712 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1713 0);
1714 ASSERT_NE(MAP_FAILED, buf);
1715
1716 /* EFAULT half way through mapping */
1717 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1718 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1719
1720 /* EFAULT on first page */
1721 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1722 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1723 }
1724
1725 static void
test_basic_file(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1726 test_basic_file(struct __test_metadata *_metadata,
1727 struct _test_data_iommufd_mock_domain *self,
1728 const struct _fixture_variant_iommufd_mock_domain *variant)
1729 {
1730 size_t buf_size = self->mmap_buf_size;
1731 uint8_t *buf;
1732 __u64 iova;
1733 int mfd_tmp;
1734 int prot = PROT_READ | PROT_WRITE;
1735
1736 /* Simple one page map */
1737 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1738 check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1739
1740 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1741 ASSERT_NE(MAP_FAILED, buf);
1742
1743 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1744
1745 ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1746 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1747
1748 close(mfd_tmp);
1749 }
1750
TEST_F(iommufd_mock_domain,basic)1751 TEST_F(iommufd_mock_domain, basic)
1752 {
1753 if (variant->file)
1754 test_basic_file(_metadata, self, variant);
1755 else
1756 test_basic_mmap(_metadata, self, variant);
1757 }
1758
TEST_F(iommufd_mock_domain,ro_unshare)1759 TEST_F(iommufd_mock_domain, ro_unshare)
1760 {
1761 uint8_t *buf;
1762 __u64 iova;
1763 int fd;
1764
1765 fd = open("/proc/self/exe", O_RDONLY);
1766 ASSERT_NE(-1, fd);
1767
1768 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1769 ASSERT_NE(MAP_FAILED, buf);
1770 close(fd);
1771
1772 /*
1773 * There have been lots of changes to the "unshare" mechanism in
1774 * get_user_pages(), make sure it works right. The write to the page
1775 * after we map it for reading should not change the assigned PFN.
1776 */
1777 ASSERT_EQ(0,
1778 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1779 &iova, IOMMU_IOAS_MAP_READABLE));
1780 check_mock_iova(buf, iova, PAGE_SIZE);
1781 memset(buf, 1, PAGE_SIZE);
1782 check_mock_iova(buf, iova, PAGE_SIZE);
1783 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1784 }
1785
TEST_F(iommufd_mock_domain,all_aligns)1786 TEST_F(iommufd_mock_domain, all_aligns)
1787 {
1788 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1789 MOCK_PAGE_SIZE;
1790 size_t buf_size = self->mmap_buf_size;
1791 unsigned int start;
1792 unsigned int end;
1793 uint8_t *buf;
1794 int prot = PROT_READ | PROT_WRITE;
1795 int mfd = -1;
1796
1797 if (variant->file)
1798 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1799 else
1800 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1801 ASSERT_NE(MAP_FAILED, buf);
1802 if (variant->file)
1803 ASSERT_GT(mfd, 0);
1804 check_refs(buf, buf_size, 0);
1805
1806 /*
1807 * Map every combination of page size and alignment within a big region,
1808 * less for hugepage case as it takes so long to finish.
1809 */
1810 for (start = 0; start < buf_size; start += test_step) {
1811 if (variant->hugepages)
1812 end = buf_size;
1813 else
1814 end = start + MOCK_PAGE_SIZE;
1815 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1816 size_t length = end - start;
1817 __u64 iova;
1818
1819 if (variant->file) {
1820 test_ioctl_ioas_map_file(mfd, start, length,
1821 &iova);
1822 } else {
1823 test_ioctl_ioas_map(buf + start, length, &iova);
1824 }
1825 check_mock_iova(buf + start, iova, length);
1826 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1827 end / PAGE_SIZE * PAGE_SIZE -
1828 start / PAGE_SIZE * PAGE_SIZE,
1829 1);
1830
1831 test_ioctl_ioas_unmap(iova, length);
1832 }
1833 }
1834 check_refs(buf, buf_size, 0);
1835 ASSERT_EQ(0, munmap(buf, buf_size));
1836 if (variant->file)
1837 close(mfd);
1838 }
1839
TEST_F(iommufd_mock_domain,all_aligns_copy)1840 TEST_F(iommufd_mock_domain, all_aligns_copy)
1841 {
1842 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1843 MOCK_PAGE_SIZE;
1844 size_t buf_size = self->mmap_buf_size;
1845 unsigned int start;
1846 unsigned int end;
1847 uint8_t *buf;
1848 int prot = PROT_READ | PROT_WRITE;
1849 int mfd = -1;
1850
1851 if (variant->file)
1852 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1853 else
1854 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1855 ASSERT_NE(MAP_FAILED, buf);
1856 if (variant->file)
1857 ASSERT_GT(mfd, 0);
1858 check_refs(buf, buf_size, 0);
1859
1860 /*
1861 * Map every combination of page size and alignment within a big region,
1862 * less for hugepage case as it takes so long to finish.
1863 */
1864 for (start = 0; start < buf_size; start += test_step) {
1865 if (variant->hugepages)
1866 end = buf_size;
1867 else
1868 end = start + MOCK_PAGE_SIZE;
1869 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1870 size_t length = end - start;
1871 unsigned int old_id;
1872 uint32_t mock_stdev_id;
1873 __u64 iova;
1874
1875 if (variant->file) {
1876 test_ioctl_ioas_map_file(mfd, start, length,
1877 &iova);
1878 } else {
1879 test_ioctl_ioas_map(buf + start, length, &iova);
1880 }
1881
1882 /* Add and destroy a domain while the area exists */
1883 old_id = self->hwpt_ids[1];
1884 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1885 &self->hwpt_ids[1], NULL);
1886
1887 check_mock_iova(buf + start, iova, length);
1888 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1889 end / PAGE_SIZE * PAGE_SIZE -
1890 start / PAGE_SIZE * PAGE_SIZE,
1891 1);
1892
1893 test_ioctl_destroy(mock_stdev_id);
1894 self->hwpt_ids[1] = old_id;
1895
1896 test_ioctl_ioas_unmap(iova, length);
1897 }
1898 }
1899 check_refs(buf, buf_size, 0);
1900 ASSERT_EQ(0, munmap(buf, buf_size));
1901 if (variant->file)
1902 close(mfd);
1903 }
1904
TEST_F(iommufd_mock_domain,user_copy)1905 TEST_F(iommufd_mock_domain, user_copy)
1906 {
1907 void *buf = variant->file ? mfd_buffer : buffer;
1908 struct iommu_test_cmd access_cmd = {
1909 .size = sizeof(access_cmd),
1910 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1911 .access_pages = { .length = BUFFER_SIZE,
1912 .uptr = (uintptr_t)buf },
1913 };
1914 struct iommu_ioas_copy copy_cmd = {
1915 .size = sizeof(copy_cmd),
1916 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1917 .dst_ioas_id = self->ioas_id,
1918 .dst_iova = MOCK_APERTURE_START,
1919 .length = BUFFER_SIZE,
1920 };
1921 struct iommu_ioas_unmap unmap_cmd = {
1922 .size = sizeof(unmap_cmd),
1923 .ioas_id = self->ioas_id,
1924 .iova = MOCK_APERTURE_START,
1925 .length = BUFFER_SIZE,
1926 };
1927 unsigned int new_ioas_id, ioas_id;
1928
1929 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1930 test_ioctl_ioas_alloc(&ioas_id);
1931 if (variant->file) {
1932 test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1933 ©_cmd.src_iova);
1934 } else {
1935 test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1936 ©_cmd.src_iova);
1937 }
1938 test_cmd_create_access(ioas_id, &access_cmd.id,
1939 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1940
1941 access_cmd.access_pages.iova = copy_cmd.src_iova;
1942 ASSERT_EQ(0,
1943 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1944 &access_cmd));
1945 copy_cmd.src_ioas_id = ioas_id;
1946 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1947 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1948
1949 /* Now replace the ioas with a new one */
1950 test_ioctl_ioas_alloc(&new_ioas_id);
1951 if (variant->file) {
1952 test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1953 ©_cmd.src_iova);
1954 } else {
1955 test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1956 ©_cmd.src_iova);
1957 }
1958 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1959
1960 /* Destroy the old ioas and cleanup copied mapping */
1961 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1962 test_ioctl_destroy(ioas_id);
1963
1964 /* Then run the same test again with the new ioas */
1965 access_cmd.access_pages.iova = copy_cmd.src_iova;
1966 ASSERT_EQ(0,
1967 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1968 &access_cmd));
1969 copy_cmd.src_ioas_id = new_ioas_id;
1970 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1971 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1972
1973 test_cmd_destroy_access_pages(
1974 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1975 test_cmd_destroy_access(access_cmd.id);
1976
1977 test_ioctl_destroy(new_ioas_id);
1978 }
1979
TEST_F(iommufd_mock_domain,replace)1980 TEST_F(iommufd_mock_domain, replace)
1981 {
1982 uint32_t ioas_id;
1983
1984 test_ioctl_ioas_alloc(&ioas_id);
1985
1986 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1987
1988 /*
1989 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1990 * should get enoent when we try to use it.
1991 */
1992 if (variant->mock_domains == 1)
1993 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1994 self->hwpt_ids[0]);
1995
1996 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1997 if (variant->mock_domains >= 2) {
1998 test_cmd_mock_domain_replace(self->stdev_ids[0],
1999 self->hwpt_ids[1]);
2000 test_cmd_mock_domain_replace(self->stdev_ids[0],
2001 self->hwpt_ids[1]);
2002 test_cmd_mock_domain_replace(self->stdev_ids[0],
2003 self->hwpt_ids[0]);
2004 }
2005
2006 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
2007 test_ioctl_destroy(ioas_id);
2008 }
2009
TEST_F(iommufd_mock_domain,alloc_hwpt)2010 TEST_F(iommufd_mock_domain, alloc_hwpt)
2011 {
2012 int i;
2013
2014 for (i = 0; i != variant->mock_domains; i++) {
2015 uint32_t hwpt_id[2];
2016 uint32_t stddev_id;
2017
2018 test_err_hwpt_alloc(EOPNOTSUPP,
2019 self->idev_ids[i], self->ioas_id,
2020 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
2021 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2022 0, &hwpt_id[0]);
2023 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2024 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
2025
2026 /* Do a hw_pagetable rotation test */
2027 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
2028 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
2029 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
2030 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
2031 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
2032 test_ioctl_destroy(hwpt_id[1]);
2033
2034 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
2035 test_ioctl_destroy(stddev_id);
2036 test_ioctl_destroy(hwpt_id[0]);
2037 }
2038 }
2039
FIXTURE(iommufd_dirty_tracking)2040 FIXTURE(iommufd_dirty_tracking)
2041 {
2042 int fd;
2043 uint32_t ioas_id;
2044 uint32_t hwpt_id;
2045 uint32_t stdev_id;
2046 uint32_t idev_id;
2047 unsigned long page_size;
2048 unsigned long bitmap_size;
2049 void *bitmap;
2050 void *buffer;
2051 };
2052
FIXTURE_VARIANT(iommufd_dirty_tracking)2053 FIXTURE_VARIANT(iommufd_dirty_tracking)
2054 {
2055 unsigned long buffer_size;
2056 bool hugepages;
2057 };
2058
FIXTURE_SETUP(iommufd_dirty_tracking)2059 FIXTURE_SETUP(iommufd_dirty_tracking)
2060 {
2061 size_t mmap_buffer_size;
2062 unsigned long size;
2063 int mmap_flags;
2064 void *vrc;
2065 int rc;
2066
2067 if (variant->buffer_size < MOCK_PAGE_SIZE) {
2068 SKIP(return,
2069 "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
2070 variant->buffer_size, MOCK_PAGE_SIZE);
2071 }
2072
2073 self->fd = open("/dev/iommu", O_RDWR);
2074 ASSERT_NE(-1, self->fd);
2075
2076 mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2077 mmap_buffer_size = variant->buffer_size;
2078 if (variant->hugepages) {
2079 /*
2080 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2081 * not available.
2082 */
2083 mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2084
2085 /*
2086 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
2087 * following mmap() will automatically align the length to be a
2088 * multiple of the underlying huge page size. Failing to do the
2089 * same at this allocation will result in a memory overwrite by
2090 * the mmap().
2091 */
2092 if (mmap_buffer_size < HUGEPAGE_SIZE)
2093 mmap_buffer_size = HUGEPAGE_SIZE;
2094 }
2095
2096 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
2097 if (rc || !self->buffer) {
2098 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2099 mmap_buffer_size, rc);
2100 }
2101 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2102 vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
2103 mmap_flags, -1, 0);
2104 assert(vrc == self->buffer);
2105
2106 self->page_size = MOCK_PAGE_SIZE;
2107 self->bitmap_size = variant->buffer_size / self->page_size;
2108
2109 /* Provision with an extra (PAGE_SIZE) for the unaligned case */
2110 size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2111 rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2112 assert(!rc);
2113 assert(self->bitmap);
2114 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2115
2116 test_ioctl_ioas_alloc(&self->ioas_id);
2117 /* Enable 1M mock IOMMU hugepages */
2118 if (variant->hugepages) {
2119 test_cmd_mock_domain_flags(self->ioas_id,
2120 MOCK_FLAGS_DEVICE_HUGE_IOVA,
2121 &self->stdev_id, &self->hwpt_id,
2122 &self->idev_id);
2123 } else {
2124 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
2125 &self->hwpt_id, &self->idev_id);
2126 }
2127 }
2128
FIXTURE_TEARDOWN(iommufd_dirty_tracking)2129 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2130 {
2131 free(self->buffer);
2132 free(self->bitmap);
2133 teardown_iommufd(self->fd, _metadata);
2134 }
2135
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)2136 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2137 {
2138 /* half of an u8 index bitmap */
2139 .buffer_size = 8UL * 1024UL,
2140 };
2141
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)2142 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2143 {
2144 /* one u8 index bitmap */
2145 .buffer_size = 16UL * 1024UL,
2146 };
2147
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)2148 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2149 {
2150 /* one u32 index bitmap */
2151 .buffer_size = 64UL * 1024UL,
2152 };
2153
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)2154 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2155 {
2156 /* one u64 index bitmap */
2157 .buffer_size = 128UL * 1024UL,
2158 };
2159
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)2160 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2161 {
2162 /* two u64 index and trailing end bitmap */
2163 .buffer_size = 320UL * 1024UL,
2164 };
2165
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)2166 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2167 {
2168 /* 4K bitmap (64M IOVA range) */
2169 .buffer_size = 64UL * 1024UL * 1024UL,
2170 };
2171
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)2172 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2173 {
2174 /* 4K bitmap (64M IOVA range) */
2175 .buffer_size = 64UL * 1024UL * 1024UL,
2176 .hugepages = true,
2177 };
2178
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)2179 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2180 {
2181 /* 8K bitmap (128M IOVA range) */
2182 .buffer_size = 128UL * 1024UL * 1024UL,
2183 };
2184
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)2185 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2186 {
2187 /* 8K bitmap (128M IOVA range) */
2188 .buffer_size = 128UL * 1024UL * 1024UL,
2189 .hugepages = true,
2190 };
2191
TEST_F(iommufd_dirty_tracking,enforce_dirty)2192 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2193 {
2194 uint32_t ioas_id, stddev_id, idev_id;
2195 uint32_t hwpt_id, _hwpt_id;
2196 uint32_t dev_flags;
2197
2198 /* Regular case */
2199 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2200 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2201 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2202 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2203 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2204 NULL);
2205 test_ioctl_destroy(stddev_id);
2206 test_ioctl_destroy(hwpt_id);
2207
2208 /* IOMMU device does not support dirty tracking */
2209 test_ioctl_ioas_alloc(&ioas_id);
2210 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2211 &idev_id);
2212 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2213 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2214 test_ioctl_destroy(stddev_id);
2215 }
2216
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)2217 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2218 {
2219 uint32_t stddev_id;
2220 uint32_t hwpt_id;
2221
2222 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2223 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2224 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2225 test_cmd_set_dirty_tracking(hwpt_id, true);
2226 test_cmd_set_dirty_tracking(hwpt_id, false);
2227
2228 test_ioctl_destroy(stddev_id);
2229 test_ioctl_destroy(hwpt_id);
2230 }
2231
TEST_F(iommufd_dirty_tracking,device_dirty_capability)2232 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2233 {
2234 uint32_t caps = 0;
2235 uint32_t stddev_id;
2236 uint32_t hwpt_id;
2237
2238 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2239 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2240 test_cmd_get_hw_capabilities(self->idev_id, caps);
2241 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2242 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2243
2244 test_ioctl_destroy(stddev_id);
2245 test_ioctl_destroy(hwpt_id);
2246 }
2247
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)2248 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2249 {
2250 uint32_t page_size = MOCK_PAGE_SIZE;
2251 uint32_t hwpt_id;
2252 uint32_t ioas_id;
2253
2254 if (variant->hugepages)
2255 page_size = MOCK_HUGE_PAGE_SIZE;
2256
2257 test_ioctl_ioas_alloc(&ioas_id);
2258 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2259 variant->buffer_size, MOCK_APERTURE_START);
2260
2261 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2262 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2263
2264 test_cmd_set_dirty_tracking(hwpt_id, true);
2265
2266 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2267 MOCK_APERTURE_START, self->page_size, page_size,
2268 self->bitmap, self->bitmap_size, 0, _metadata);
2269
2270 /* PAGE_SIZE unaligned bitmap */
2271 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2272 MOCK_APERTURE_START, self->page_size, page_size,
2273 self->bitmap + MOCK_PAGE_SIZE,
2274 self->bitmap_size, 0, _metadata);
2275
2276 /* u64 unaligned bitmap */
2277 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2278 MOCK_APERTURE_START, self->page_size, page_size,
2279 self->bitmap + 0xff1, self->bitmap_size, 0,
2280 _metadata);
2281
2282 test_ioctl_destroy(hwpt_id);
2283 }
2284
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)2285 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2286 {
2287 uint32_t page_size = MOCK_PAGE_SIZE;
2288 uint32_t hwpt_id;
2289 uint32_t ioas_id;
2290
2291 if (variant->hugepages)
2292 page_size = MOCK_HUGE_PAGE_SIZE;
2293
2294 test_ioctl_ioas_alloc(&ioas_id);
2295 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2296 variant->buffer_size, MOCK_APERTURE_START);
2297
2298 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2299 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2300
2301 test_cmd_set_dirty_tracking(hwpt_id, true);
2302
2303 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2304 MOCK_APERTURE_START, self->page_size, page_size,
2305 self->bitmap, self->bitmap_size,
2306 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2307 _metadata);
2308
2309 /* Unaligned bitmap */
2310 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2311 MOCK_APERTURE_START, self->page_size, page_size,
2312 self->bitmap + MOCK_PAGE_SIZE,
2313 self->bitmap_size,
2314 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2315 _metadata);
2316
2317 /* u64 unaligned bitmap */
2318 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2319 MOCK_APERTURE_START, self->page_size, page_size,
2320 self->bitmap + 0xff1, self->bitmap_size,
2321 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2322 _metadata);
2323
2324 test_ioctl_destroy(hwpt_id);
2325 }
2326
2327 /* VFIO compatibility IOCTLs */
2328
TEST_F(iommufd,simple_ioctls)2329 TEST_F(iommufd, simple_ioctls)
2330 {
2331 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2332 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2333 }
2334
TEST_F(iommufd,unmap_cmd)2335 TEST_F(iommufd, unmap_cmd)
2336 {
2337 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2338 .iova = MOCK_APERTURE_START,
2339 .size = PAGE_SIZE,
2340 };
2341
2342 unmap_cmd.argsz = 1;
2343 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2344
2345 unmap_cmd.argsz = sizeof(unmap_cmd);
2346 unmap_cmd.flags = 1 << 31;
2347 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2348
2349 unmap_cmd.flags = 0;
2350 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2351 }
2352
TEST_F(iommufd,map_cmd)2353 TEST_F(iommufd, map_cmd)
2354 {
2355 struct vfio_iommu_type1_dma_map map_cmd = {
2356 .iova = MOCK_APERTURE_START,
2357 .size = PAGE_SIZE,
2358 .vaddr = (__u64)buffer,
2359 };
2360
2361 map_cmd.argsz = 1;
2362 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2363
2364 map_cmd.argsz = sizeof(map_cmd);
2365 map_cmd.flags = 1 << 31;
2366 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2367
2368 /* Requires a domain to be attached */
2369 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2370 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2371 }
2372
TEST_F(iommufd,info_cmd)2373 TEST_F(iommufd, info_cmd)
2374 {
2375 struct vfio_iommu_type1_info info_cmd = {};
2376
2377 /* Invalid argsz */
2378 info_cmd.argsz = 1;
2379 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2380
2381 info_cmd.argsz = sizeof(info_cmd);
2382 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2383 }
2384
TEST_F(iommufd,set_iommu_cmd)2385 TEST_F(iommufd, set_iommu_cmd)
2386 {
2387 /* Requires a domain to be attached */
2388 EXPECT_ERRNO(ENODEV,
2389 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2390 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2391 }
2392
TEST_F(iommufd,vfio_ioas)2393 TEST_F(iommufd, vfio_ioas)
2394 {
2395 struct iommu_vfio_ioas vfio_ioas_cmd = {
2396 .size = sizeof(vfio_ioas_cmd),
2397 .op = IOMMU_VFIO_IOAS_GET,
2398 };
2399 __u32 ioas_id;
2400
2401 /* ENODEV if there is no compat ioas */
2402 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2403
2404 /* Invalid id for set */
2405 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2406 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2407
2408 /* Valid id for set*/
2409 test_ioctl_ioas_alloc(&ioas_id);
2410 vfio_ioas_cmd.ioas_id = ioas_id;
2411 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2412
2413 /* Same id comes back from get */
2414 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2415 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2416 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2417
2418 /* Clear works */
2419 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2420 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2421 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2422 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2423 }
2424
FIXTURE(vfio_compat_mock_domain)2425 FIXTURE(vfio_compat_mock_domain)
2426 {
2427 int fd;
2428 uint32_t ioas_id;
2429 };
2430
FIXTURE_VARIANT(vfio_compat_mock_domain)2431 FIXTURE_VARIANT(vfio_compat_mock_domain)
2432 {
2433 unsigned int version;
2434 };
2435
FIXTURE_SETUP(vfio_compat_mock_domain)2436 FIXTURE_SETUP(vfio_compat_mock_domain)
2437 {
2438 struct iommu_vfio_ioas vfio_ioas_cmd = {
2439 .size = sizeof(vfio_ioas_cmd),
2440 .op = IOMMU_VFIO_IOAS_SET,
2441 };
2442
2443 self->fd = open("/dev/iommu", O_RDWR);
2444 ASSERT_NE(-1, self->fd);
2445
2446 /* Create what VFIO would consider a group */
2447 test_ioctl_ioas_alloc(&self->ioas_id);
2448 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2449
2450 /* Attach it to the vfio compat */
2451 vfio_ioas_cmd.ioas_id = self->ioas_id;
2452 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2453 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2454 }
2455
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2456 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2457 {
2458 teardown_iommufd(self->fd, _metadata);
2459 }
2460
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2461 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2462 {
2463 .version = VFIO_TYPE1v2_IOMMU,
2464 };
2465
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2466 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2467 {
2468 .version = VFIO_TYPE1_IOMMU,
2469 };
2470
TEST_F(vfio_compat_mock_domain,simple_close)2471 TEST_F(vfio_compat_mock_domain, simple_close)
2472 {
2473 }
2474
TEST_F(vfio_compat_mock_domain,option_huge_pages)2475 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2476 {
2477 struct iommu_option cmd = {
2478 .size = sizeof(cmd),
2479 .option_id = IOMMU_OPTION_HUGE_PAGES,
2480 .op = IOMMU_OPTION_OP_GET,
2481 .val64 = 3,
2482 .object_id = self->ioas_id,
2483 };
2484
2485 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2486 if (variant->version == VFIO_TYPE1_IOMMU) {
2487 ASSERT_EQ(0, cmd.val64);
2488 } else {
2489 ASSERT_EQ(1, cmd.val64);
2490 }
2491 }
2492
2493 /*
2494 * Execute an ioctl command stored in buffer and check that the result does not
2495 * overflow memory.
2496 */
is_filled(const void * buf,uint8_t c,size_t len)2497 static bool is_filled(const void *buf, uint8_t c, size_t len)
2498 {
2499 const uint8_t *cbuf = buf;
2500
2501 for (; len; cbuf++, len--)
2502 if (*cbuf != c)
2503 return false;
2504 return true;
2505 }
2506
2507 #define ioctl_check_buf(fd, cmd) \
2508 ({ \
2509 size_t _cmd_len = *(__u32 *)buffer; \
2510 \
2511 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2512 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2513 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2514 BUFFER_SIZE - _cmd_len)); \
2515 })
2516
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2517 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2518 struct vfio_iommu_type1_info *info_cmd)
2519 {
2520 const struct vfio_info_cap_header *cap;
2521
2522 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2523 cap = buffer + info_cmd->cap_offset;
2524 while (true) {
2525 size_t cap_size;
2526
2527 if (cap->next)
2528 cap_size = (buffer + cap->next) - (void *)cap;
2529 else
2530 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2531
2532 switch (cap->id) {
2533 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2534 struct vfio_iommu_type1_info_cap_iova_range *data =
2535 (void *)cap;
2536
2537 ASSERT_EQ(1, data->header.version);
2538 ASSERT_EQ(1, data->nr_iovas);
2539 EXPECT_EQ(MOCK_APERTURE_START,
2540 data->iova_ranges[0].start);
2541 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2542 break;
2543 }
2544 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2545 struct vfio_iommu_type1_info_dma_avail *data =
2546 (void *)cap;
2547
2548 ASSERT_EQ(1, data->header.version);
2549 ASSERT_EQ(sizeof(*data), cap_size);
2550 break;
2551 }
2552 default:
2553 ASSERT_EQ(false, true);
2554 break;
2555 }
2556 if (!cap->next)
2557 break;
2558
2559 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2560 ASSERT_GE(buffer + cap->next, (void *)cap);
2561 cap = buffer + cap->next;
2562 }
2563 }
2564
TEST_F(vfio_compat_mock_domain,get_info)2565 TEST_F(vfio_compat_mock_domain, get_info)
2566 {
2567 struct vfio_iommu_type1_info *info_cmd = buffer;
2568 unsigned int i;
2569 size_t caplen;
2570
2571 /* Pre-cap ABI */
2572 *info_cmd = (struct vfio_iommu_type1_info){
2573 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2574 };
2575 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2576 ASSERT_NE(0, info_cmd->iova_pgsizes);
2577 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2578 info_cmd->flags);
2579
2580 /* Read the cap chain size */
2581 *info_cmd = (struct vfio_iommu_type1_info){
2582 .argsz = sizeof(*info_cmd),
2583 };
2584 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2585 ASSERT_NE(0, info_cmd->iova_pgsizes);
2586 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2587 info_cmd->flags);
2588 ASSERT_EQ(0, info_cmd->cap_offset);
2589 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2590
2591 /* Read the caps, kernel should never create a corrupted caps */
2592 caplen = info_cmd->argsz;
2593 for (i = sizeof(*info_cmd); i < caplen; i++) {
2594 *info_cmd = (struct vfio_iommu_type1_info){
2595 .argsz = i,
2596 };
2597 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2598 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2599 info_cmd->flags);
2600 if (!info_cmd->cap_offset)
2601 continue;
2602 check_vfio_info_cap_chain(_metadata, info_cmd);
2603 }
2604 }
2605
shuffle_array(unsigned long * array,size_t nelms)2606 static void shuffle_array(unsigned long *array, size_t nelms)
2607 {
2608 unsigned int i;
2609
2610 /* Shuffle */
2611 for (i = 0; i != nelms; i++) {
2612 unsigned long tmp = array[i];
2613 unsigned int other = rand() % (nelms - i);
2614
2615 array[i] = array[other];
2616 array[other] = tmp;
2617 }
2618 }
2619
TEST_F(vfio_compat_mock_domain,map)2620 TEST_F(vfio_compat_mock_domain, map)
2621 {
2622 struct vfio_iommu_type1_dma_map map_cmd = {
2623 .argsz = sizeof(map_cmd),
2624 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2625 .vaddr = (uintptr_t)buffer,
2626 .size = BUFFER_SIZE,
2627 .iova = MOCK_APERTURE_START,
2628 };
2629 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2630 .argsz = sizeof(unmap_cmd),
2631 .size = BUFFER_SIZE,
2632 .iova = MOCK_APERTURE_START,
2633 };
2634 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2635 unsigned int i;
2636
2637 /* Simple map/unmap */
2638 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2639 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2640 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2641
2642 /* UNMAP_FLAG_ALL requires 0 iova/size */
2643 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2644 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2645 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2646
2647 unmap_cmd.iova = 0;
2648 unmap_cmd.size = 0;
2649 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2650 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2651
2652 /* Small pages */
2653 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2654 map_cmd.iova = pages_iova[i] =
2655 MOCK_APERTURE_START + i * PAGE_SIZE;
2656 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2657 map_cmd.size = PAGE_SIZE;
2658 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2659 }
2660 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2661
2662 unmap_cmd.flags = 0;
2663 unmap_cmd.size = PAGE_SIZE;
2664 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2665 unmap_cmd.iova = pages_iova[i];
2666 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2667 }
2668 }
2669
TEST_F(vfio_compat_mock_domain,huge_map)2670 TEST_F(vfio_compat_mock_domain, huge_map)
2671 {
2672 size_t buf_size = HUGEPAGE_SIZE * 2;
2673 struct vfio_iommu_type1_dma_map map_cmd = {
2674 .argsz = sizeof(map_cmd),
2675 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2676 .size = buf_size,
2677 .iova = MOCK_APERTURE_START,
2678 };
2679 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2680 .argsz = sizeof(unmap_cmd),
2681 };
2682 unsigned long pages_iova[16];
2683 unsigned int i;
2684 void *buf;
2685
2686 /* Test huge pages and splitting */
2687 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2688 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2689 0);
2690 ASSERT_NE(MAP_FAILED, buf);
2691 map_cmd.vaddr = (uintptr_t)buf;
2692 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2693
2694 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2695 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2696 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2697 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2698
2699 /* type1 mode can cut up larger mappings, type1v2 always fails */
2700 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2701 unmap_cmd.iova = pages_iova[i];
2702 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2703 if (variant->version == VFIO_TYPE1_IOMMU) {
2704 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2705 &unmap_cmd));
2706 } else {
2707 EXPECT_ERRNO(ENOENT,
2708 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2709 &unmap_cmd));
2710 }
2711 }
2712 }
2713
FIXTURE(iommufd_viommu)2714 FIXTURE(iommufd_viommu)
2715 {
2716 int fd;
2717 uint32_t ioas_id;
2718 uint32_t stdev_id;
2719 uint32_t hwpt_id;
2720 uint32_t nested_hwpt_id;
2721 uint32_t device_id;
2722 uint32_t viommu_id;
2723 };
2724
FIXTURE_VARIANT(iommufd_viommu)2725 FIXTURE_VARIANT(iommufd_viommu)
2726 {
2727 unsigned int viommu;
2728 };
2729
FIXTURE_SETUP(iommufd_viommu)2730 FIXTURE_SETUP(iommufd_viommu)
2731 {
2732 self->fd = open("/dev/iommu", O_RDWR);
2733 ASSERT_NE(-1, self->fd);
2734 test_ioctl_ioas_alloc(&self->ioas_id);
2735 test_ioctl_set_default_memory_limit();
2736
2737 if (variant->viommu) {
2738 struct iommu_hwpt_selftest data = {
2739 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2740 };
2741
2742 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2743 &self->device_id);
2744
2745 /* Allocate a nesting parent hwpt */
2746 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2747 IOMMU_HWPT_ALLOC_NEST_PARENT,
2748 &self->hwpt_id);
2749
2750 /* Allocate a vIOMMU taking refcount of the parent hwpt */
2751 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2752 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2753 &self->viommu_id);
2754
2755 /* Allocate a regular nested hwpt */
2756 test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2757 &self->nested_hwpt_id,
2758 IOMMU_HWPT_DATA_SELFTEST, &data,
2759 sizeof(data));
2760 }
2761 }
2762
FIXTURE_TEARDOWN(iommufd_viommu)2763 FIXTURE_TEARDOWN(iommufd_viommu)
2764 {
2765 teardown_iommufd(self->fd, _metadata);
2766 }
2767
FIXTURE_VARIANT_ADD(iommufd_viommu,no_viommu)2768 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2769 {
2770 .viommu = 0,
2771 };
2772
FIXTURE_VARIANT_ADD(iommufd_viommu,mock_viommu)2773 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2774 {
2775 .viommu = 1,
2776 };
2777
TEST_F(iommufd_viommu,viommu_auto_destroy)2778 TEST_F(iommufd_viommu, viommu_auto_destroy)
2779 {
2780 }
2781
TEST_F(iommufd_viommu,viommu_negative_tests)2782 TEST_F(iommufd_viommu, viommu_negative_tests)
2783 {
2784 uint32_t device_id = self->device_id;
2785 uint32_t ioas_id = self->ioas_id;
2786 uint32_t hwpt_id;
2787
2788 if (self->device_id) {
2789 /* Negative test -- invalid hwpt (hwpt_id=0) */
2790 test_err_viommu_alloc(ENOENT, device_id, 0,
2791 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2792 NULL);
2793
2794 /* Negative test -- not a nesting parent hwpt */
2795 test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2796 test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2797 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2798 NULL);
2799 test_ioctl_destroy(hwpt_id);
2800
2801 /* Negative test -- unsupported viommu type */
2802 test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2803 0xdead, NULL, 0, NULL);
2804 EXPECT_ERRNO(EBUSY,
2805 _test_ioctl_destroy(self->fd, self->hwpt_id));
2806 EXPECT_ERRNO(EBUSY,
2807 _test_ioctl_destroy(self->fd, self->viommu_id));
2808 } else {
2809 test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2810 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2811 NULL);
2812 }
2813 }
2814
TEST_F(iommufd_viommu,viommu_alloc_nested_iopf)2815 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2816 {
2817 struct iommu_hwpt_selftest data = {
2818 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2819 };
2820 uint32_t viommu_id = self->viommu_id;
2821 uint32_t dev_id = self->device_id;
2822 uint32_t iopf_hwpt_id;
2823 uint32_t fault_id;
2824 uint32_t fault_fd;
2825 uint32_t vdev_id;
2826
2827 if (!dev_id)
2828 SKIP(return, "Skipping test for variant no_viommu");
2829
2830 test_ioctl_fault_alloc(&fault_id, &fault_fd);
2831 test_err_hwpt_alloc_iopf(ENOENT, dev_id, viommu_id, UINT32_MAX,
2832 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2833 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2834 test_err_hwpt_alloc_iopf(EOPNOTSUPP, dev_id, viommu_id, fault_id,
2835 IOMMU_HWPT_FAULT_ID_VALID | (1 << 31),
2836 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2837 sizeof(data));
2838 test_cmd_hwpt_alloc_iopf(dev_id, viommu_id, fault_id,
2839 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2840 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2841
2842 /* Must allocate vdevice before attaching to a nested hwpt */
2843 test_err_mock_domain_replace(ENOENT, self->stdev_id, iopf_hwpt_id);
2844 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2845 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2846 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2847 test_cmd_trigger_iopf(dev_id, fault_fd);
2848
2849 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2850 test_ioctl_destroy(iopf_hwpt_id);
2851 close(fault_fd);
2852 test_ioctl_destroy(fault_id);
2853 }
2854
TEST_F(iommufd_viommu,viommu_alloc_with_data)2855 TEST_F(iommufd_viommu, viommu_alloc_with_data)
2856 {
2857 struct iommu_viommu_selftest data = {
2858 .in_data = 0xbeef,
2859 };
2860 uint32_t *test;
2861
2862 if (!self->device_id)
2863 SKIP(return, "Skipping test for variant no_viommu");
2864
2865 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2866 IOMMU_VIOMMU_TYPE_SELFTEST, &data, sizeof(data),
2867 &self->viommu_id);
2868 ASSERT_EQ(data.out_data, data.in_data);
2869
2870 /* Negative mmap tests -- offset and length cannot be changed */
2871 test_err_mmap(ENXIO, data.out_mmap_length,
2872 data.out_mmap_offset + PAGE_SIZE);
2873 test_err_mmap(ENXIO, data.out_mmap_length,
2874 data.out_mmap_offset + PAGE_SIZE * 2);
2875 test_err_mmap(ENXIO, data.out_mmap_length / 2, data.out_mmap_offset);
2876 test_err_mmap(ENXIO, data.out_mmap_length * 2, data.out_mmap_offset);
2877
2878 /* Now do a correct mmap for a loopback test */
2879 test = mmap(NULL, data.out_mmap_length, PROT_READ | PROT_WRITE,
2880 MAP_SHARED, self->fd, data.out_mmap_offset);
2881 ASSERT_NE(MAP_FAILED, test);
2882 ASSERT_EQ(data.in_data, *test);
2883
2884 /* The owner of the mmap region should be blocked */
2885 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, self->viommu_id));
2886 munmap(test, data.out_mmap_length);
2887 }
2888
TEST_F(iommufd_viommu,vdevice_alloc)2889 TEST_F(iommufd_viommu, vdevice_alloc)
2890 {
2891 uint32_t viommu_id = self->viommu_id;
2892 uint32_t dev_id = self->device_id;
2893 uint32_t vdev_id = 0;
2894 uint32_t veventq_id;
2895 uint32_t veventq_fd;
2896 int prev_seq = -1;
2897
2898 if (dev_id) {
2899 /* Must allocate vdevice before attaching to a nested hwpt */
2900 test_err_mock_domain_replace(ENOENT, self->stdev_id,
2901 self->nested_hwpt_id);
2902
2903 /* Allocate a vEVENTQ with veventq_depth=2 */
2904 test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
2905 &veventq_id, &veventq_fd);
2906 test_err_veventq_alloc(EEXIST, viommu_id,
2907 IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
2908 /* Set vdev_id to 0x99, unset it, and set to 0x88 */
2909 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2910 test_cmd_mock_domain_replace(self->stdev_id,
2911 self->nested_hwpt_id);
2912 test_cmd_trigger_vevents(dev_id, 1);
2913 test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
2914 test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
2915 &vdev_id);
2916 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2917 test_ioctl_destroy(vdev_id);
2918
2919 /* Try again with 0x88 */
2920 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
2921 test_cmd_mock_domain_replace(self->stdev_id,
2922 self->nested_hwpt_id);
2923 /* Trigger an overflow with three events */
2924 test_cmd_trigger_vevents(dev_id, 3);
2925 test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
2926 &prev_seq);
2927 /* Overflow must be gone after the previous reads */
2928 test_cmd_trigger_vevents(dev_id, 1);
2929 test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
2930 close(veventq_fd);
2931 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2932 test_ioctl_destroy(vdev_id);
2933 test_ioctl_destroy(veventq_id);
2934 } else {
2935 test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
2936 }
2937 }
2938
TEST_F(iommufd_viommu,vdevice_cache)2939 TEST_F(iommufd_viommu, vdevice_cache)
2940 {
2941 struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
2942 uint32_t viommu_id = self->viommu_id;
2943 uint32_t dev_id = self->device_id;
2944 uint32_t vdev_id = 0;
2945 uint32_t num_inv;
2946
2947 if (!dev_id)
2948 SKIP(return, "Skipping test for variant no_viommu");
2949
2950 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2951
2952 test_cmd_dev_check_cache_all(dev_id, IOMMU_TEST_DEV_CACHE_DEFAULT);
2953
2954 /* Check data_type by passing zero-length array */
2955 num_inv = 0;
2956 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
2957 &num_inv);
2958 assert(!num_inv);
2959
2960 /* Negative test: Invalid data_type */
2961 num_inv = 1;
2962 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2963 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
2964 sizeof(*inv_reqs), &num_inv);
2965 assert(!num_inv);
2966
2967 /* Negative test: structure size sanity */
2968 num_inv = 1;
2969 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2970 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2971 sizeof(*inv_reqs) + 1, &num_inv);
2972 assert(!num_inv);
2973
2974 num_inv = 1;
2975 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2976 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 1,
2977 &num_inv);
2978 assert(!num_inv);
2979
2980 /* Negative test: invalid flag is passed */
2981 num_inv = 1;
2982 inv_reqs[0].flags = 0xffffffff;
2983 inv_reqs[0].vdev_id = 0x99;
2984 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2985 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2986 sizeof(*inv_reqs), &num_inv);
2987 assert(!num_inv);
2988
2989 /* Negative test: invalid data_uptr when array is not empty */
2990 num_inv = 1;
2991 inv_reqs[0].flags = 0;
2992 inv_reqs[0].vdev_id = 0x99;
2993 test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
2994 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2995 sizeof(*inv_reqs), &num_inv);
2996 assert(!num_inv);
2997
2998 /* Negative test: invalid entry_len when array is not empty */
2999 num_inv = 1;
3000 inv_reqs[0].flags = 0;
3001 inv_reqs[0].vdev_id = 0x99;
3002 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3003 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 0,
3004 &num_inv);
3005 assert(!num_inv);
3006
3007 /* Negative test: invalid cache_id */
3008 num_inv = 1;
3009 inv_reqs[0].flags = 0;
3010 inv_reqs[0].vdev_id = 0x99;
3011 inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3012 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3013 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3014 sizeof(*inv_reqs), &num_inv);
3015 assert(!num_inv);
3016
3017 /* Negative test: invalid vdev_id */
3018 num_inv = 1;
3019 inv_reqs[0].flags = 0;
3020 inv_reqs[0].vdev_id = 0x9;
3021 inv_reqs[0].cache_id = 0;
3022 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3023 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3024 sizeof(*inv_reqs), &num_inv);
3025 assert(!num_inv);
3026
3027 /*
3028 * Invalidate the 1st cache entry but fail the 2nd request
3029 * due to invalid flags configuration in the 2nd request.
3030 */
3031 num_inv = 2;
3032 inv_reqs[0].flags = 0;
3033 inv_reqs[0].vdev_id = 0x99;
3034 inv_reqs[0].cache_id = 0;
3035 inv_reqs[1].flags = 0xffffffff;
3036 inv_reqs[1].vdev_id = 0x99;
3037 inv_reqs[1].cache_id = 1;
3038 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3039 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3040 sizeof(*inv_reqs), &num_inv);
3041 assert(num_inv == 1);
3042 test_cmd_dev_check_cache(dev_id, 0, 0);
3043 test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3044 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3045 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3046
3047 /*
3048 * Invalidate the 1st cache entry but fail the 2nd request
3049 * due to invalid cache_id configuration in the 2nd request.
3050 */
3051 num_inv = 2;
3052 inv_reqs[0].flags = 0;
3053 inv_reqs[0].vdev_id = 0x99;
3054 inv_reqs[0].cache_id = 0;
3055 inv_reqs[1].flags = 0;
3056 inv_reqs[1].vdev_id = 0x99;
3057 inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3058 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3059 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3060 sizeof(*inv_reqs), &num_inv);
3061 assert(num_inv == 1);
3062 test_cmd_dev_check_cache(dev_id, 0, 0);
3063 test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3064 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3065 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3066
3067 /* Invalidate the 2nd cache entry and verify */
3068 num_inv = 1;
3069 inv_reqs[0].flags = 0;
3070 inv_reqs[0].vdev_id = 0x99;
3071 inv_reqs[0].cache_id = 1;
3072 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3073 &num_inv);
3074 assert(num_inv == 1);
3075 test_cmd_dev_check_cache(dev_id, 0, 0);
3076 test_cmd_dev_check_cache(dev_id, 1, 0);
3077 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3078 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3079
3080 /* Invalidate the 3rd and 4th cache entries and verify */
3081 num_inv = 2;
3082 inv_reqs[0].flags = 0;
3083 inv_reqs[0].vdev_id = 0x99;
3084 inv_reqs[0].cache_id = 2;
3085 inv_reqs[1].flags = 0;
3086 inv_reqs[1].vdev_id = 0x99;
3087 inv_reqs[1].cache_id = 3;
3088 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3089 &num_inv);
3090 assert(num_inv == 2);
3091 test_cmd_dev_check_cache_all(dev_id, 0);
3092
3093 /* Invalidate all cache entries for nested_dev_id[1] and verify */
3094 num_inv = 1;
3095 inv_reqs[0].vdev_id = 0x99;
3096 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
3097 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3098 &num_inv);
3099 assert(num_inv == 1);
3100 test_cmd_dev_check_cache_all(dev_id, 0);
3101 test_ioctl_destroy(vdev_id);
3102 }
3103
TEST_F(iommufd_viommu,hw_queue)3104 TEST_F(iommufd_viommu, hw_queue)
3105 {
3106 __u64 iova = MOCK_APERTURE_START, iova2;
3107 uint32_t viommu_id = self->viommu_id;
3108 uint32_t hw_queue_id[2];
3109
3110 if (!viommu_id)
3111 SKIP(return, "Skipping test for variant no_viommu");
3112
3113 /* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
3114 test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
3115 IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
3116 &hw_queue_id[0]);
3117 /* Fail queue addr and length */
3118 test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3119 0, iova, 0, &hw_queue_id[0]);
3120 test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
3121 IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
3122 PAGE_SIZE, &hw_queue_id[0]);
3123 /* Fail missing iova */
3124 test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3125 0, iova, PAGE_SIZE, &hw_queue_id[0]);
3126
3127 /* Map iova */
3128 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
3129 test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
3130
3131 /* Fail index=1 and =MAX; must start from index=0 */
3132 test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3133 iova, PAGE_SIZE, &hw_queue_id[0]);
3134 test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3135 IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
3136 &hw_queue_id[0]);
3137
3138 /* Allocate index=0, declare ownership of the iova */
3139 test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
3140 iova, PAGE_SIZE, &hw_queue_id[0]);
3141 /* Fail duplicated index */
3142 test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3143 0, iova, PAGE_SIZE, &hw_queue_id[0]);
3144 /* Fail unmap, due to iova ownership */
3145 test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
3146 /* The 2nd page is not pinned, so it can be unmmap */
3147 test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
3148
3149 /* Allocate index=1, with an unaligned case */
3150 test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3151 iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
3152 &hw_queue_id[1]);
3153 /* Fail to destroy, due to dependency */
3154 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
3155
3156 /* Destroy in descending order */
3157 test_ioctl_destroy(hw_queue_id[1]);
3158 test_ioctl_destroy(hw_queue_id[0]);
3159 /* Now it can unmap the first page */
3160 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
3161 }
3162
TEST_F(iommufd_viommu,vdevice_tombstone)3163 TEST_F(iommufd_viommu, vdevice_tombstone)
3164 {
3165 uint32_t viommu_id = self->viommu_id;
3166 uint32_t dev_id = self->device_id;
3167 uint32_t vdev_id = 0;
3168
3169 if (!dev_id)
3170 SKIP(return, "Skipping test for variant no_viommu");
3171
3172 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3173 test_ioctl_destroy(self->stdev_id);
3174 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, vdev_id));
3175 }
3176
FIXTURE(iommufd_device_pasid)3177 FIXTURE(iommufd_device_pasid)
3178 {
3179 int fd;
3180 uint32_t ioas_id;
3181 uint32_t hwpt_id;
3182 uint32_t stdev_id;
3183 uint32_t device_id;
3184 uint32_t no_pasid_stdev_id;
3185 uint32_t no_pasid_device_id;
3186 };
3187
FIXTURE_VARIANT(iommufd_device_pasid)3188 FIXTURE_VARIANT(iommufd_device_pasid)
3189 {
3190 bool pasid_capable;
3191 };
3192
FIXTURE_SETUP(iommufd_device_pasid)3193 FIXTURE_SETUP(iommufd_device_pasid)
3194 {
3195 self->fd = open("/dev/iommu", O_RDWR);
3196 ASSERT_NE(-1, self->fd);
3197 test_ioctl_ioas_alloc(&self->ioas_id);
3198
3199 test_cmd_mock_domain_flags(self->ioas_id,
3200 MOCK_FLAGS_DEVICE_PASID,
3201 &self->stdev_id, &self->hwpt_id,
3202 &self->device_id);
3203 if (!variant->pasid_capable)
3204 test_cmd_mock_domain_flags(self->ioas_id, 0,
3205 &self->no_pasid_stdev_id, NULL,
3206 &self->no_pasid_device_id);
3207 }
3208
FIXTURE_TEARDOWN(iommufd_device_pasid)3209 FIXTURE_TEARDOWN(iommufd_device_pasid)
3210 {
3211 teardown_iommufd(self->fd, _metadata);
3212 }
3213
FIXTURE_VARIANT_ADD(iommufd_device_pasid,no_pasid)3214 FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
3215 {
3216 .pasid_capable = false,
3217 };
3218
FIXTURE_VARIANT_ADD(iommufd_device_pasid,has_pasid)3219 FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
3220 {
3221 .pasid_capable = true,
3222 };
3223
TEST_F(iommufd_device_pasid,pasid_attach)3224 TEST_F(iommufd_device_pasid, pasid_attach)
3225 {
3226 struct iommu_hwpt_selftest data = {
3227 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
3228 };
3229 uint32_t nested_hwpt_id[3] = {};
3230 uint32_t parent_hwpt_id = 0;
3231 uint32_t fault_id, fault_fd;
3232 uint32_t s2_hwpt_id = 0;
3233 uint32_t iopf_hwpt_id;
3234 uint32_t pasid = 100;
3235 uint32_t viommu_id;
3236
3237 /*
3238 * Negative, detach pasid without attaching, this is not expected.
3239 * But it should not result in failure anyway.
3240 */
3241 test_cmd_pasid_detach(pasid);
3242
3243 /* Allocate two nested hwpts sharing one common parent hwpt */
3244 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3245 IOMMU_HWPT_ALLOC_NEST_PARENT,
3246 &parent_hwpt_id);
3247 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3248 IOMMU_HWPT_ALLOC_PASID,
3249 &nested_hwpt_id[0],
3250 IOMMU_HWPT_DATA_SELFTEST,
3251 &data, sizeof(data));
3252 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3253 IOMMU_HWPT_ALLOC_PASID,
3254 &nested_hwpt_id[1],
3255 IOMMU_HWPT_DATA_SELFTEST,
3256 &data, sizeof(data));
3257
3258 /* Fault related preparation */
3259 test_ioctl_fault_alloc(&fault_id, &fault_fd);
3260 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
3261 IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
3262 &iopf_hwpt_id,
3263 IOMMU_HWPT_DATA_SELFTEST, &data,
3264 sizeof(data));
3265
3266 /* Allocate a regular nested hwpt based on viommu */
3267 test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
3268 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0, &viommu_id);
3269 test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
3270 IOMMU_HWPT_ALLOC_PASID,
3271 &nested_hwpt_id[2],
3272 IOMMU_HWPT_DATA_SELFTEST, &data,
3273 sizeof(data));
3274
3275 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3276 IOMMU_HWPT_ALLOC_PASID,
3277 &s2_hwpt_id);
3278
3279 /* Attach RID to non-pasid compat domain, */
3280 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
3281 /* then attach to pasid should fail */
3282 test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
3283
3284 /* Attach RID to pasid compat domain, */
3285 test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
3286 /* then attach to pasid should succeed, */
3287 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3288 /* but attach RID to non-pasid compat domain should fail now. */
3289 test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
3290 /*
3291 * Detach hwpt from pasid 100, and check if the pasid 100
3292 * has null domain.
3293 */
3294 test_cmd_pasid_detach(pasid);
3295 ASSERT_EQ(0,
3296 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3297 pasid, 0));
3298 /* RID is attached to pasid-comapt domain, pasid path is not used */
3299
3300 if (!variant->pasid_capable) {
3301 /*
3302 * PASID-compatible domain can be used by non-PASID-capable
3303 * device.
3304 */
3305 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
3306 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
3307 /*
3308 * Attach hwpt to pasid 100 of non-PASID-capable device,
3309 * should fail, no matter domain is pasid-comapt or not.
3310 */
3311 EXPECT_ERRNO(EINVAL,
3312 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3313 pasid, parent_hwpt_id));
3314 EXPECT_ERRNO(EINVAL,
3315 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3316 pasid, s2_hwpt_id));
3317 }
3318
3319 /*
3320 * Attach non pasid compat hwpt to pasid-capable device, should
3321 * fail, and have null domain.
3322 */
3323 test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
3324 ASSERT_EQ(0,
3325 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3326 pasid, 0));
3327
3328 /*
3329 * Attach ioas to pasid 100, should fail, domain should
3330 * be null.
3331 */
3332 test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
3333 ASSERT_EQ(0,
3334 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3335 pasid, 0));
3336
3337 /*
3338 * Attach the s2_hwpt to pasid 100, should succeed, domain should
3339 * be valid.
3340 */
3341 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3342 ASSERT_EQ(0,
3343 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3344 pasid, s2_hwpt_id));
3345
3346 /*
3347 * Try attach pasid 100 with another hwpt, should FAIL
3348 * as attach does not allow overwrite, use REPLACE instead.
3349 */
3350 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3351
3352 /*
3353 * Detach hwpt from pasid 100 for next test, should succeed,
3354 * and have null domain.
3355 */
3356 test_cmd_pasid_detach(pasid);
3357 ASSERT_EQ(0,
3358 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3359 pasid, 0));
3360
3361 /*
3362 * Attach nested hwpt to pasid 100, should succeed, domain
3363 * should be valid.
3364 */
3365 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3366 ASSERT_EQ(0,
3367 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3368 pasid, nested_hwpt_id[0]));
3369
3370 /* Attach to pasid 100 which has been attached, should fail. */
3371 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3372
3373 /* cleanup pasid 100 */
3374 test_cmd_pasid_detach(pasid);
3375
3376 /* Replace tests */
3377
3378 pasid = 200;
3379 /*
3380 * Replace pasid 200 without attaching it, should fail
3381 * with -EINVAL.
3382 */
3383 test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
3384
3385 /*
3386 * Attach the s2 hwpt to pasid 200, should succeed, domain should
3387 * be valid.
3388 */
3389 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3390 ASSERT_EQ(0,
3391 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3392 pasid, s2_hwpt_id));
3393
3394 /*
3395 * Replace pasid 200 with self->ioas_id, should fail
3396 * and domain should be the prior s2 hwpt.
3397 */
3398 test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
3399 ASSERT_EQ(0,
3400 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3401 pasid, s2_hwpt_id));
3402
3403 /*
3404 * Replace a nested hwpt for pasid 200, should succeed,
3405 * and have valid domain.
3406 */
3407 test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
3408 ASSERT_EQ(0,
3409 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3410 pasid, nested_hwpt_id[0]));
3411
3412 /*
3413 * Replace with another nested hwpt for pasid 200, should
3414 * succeed, and have valid domain.
3415 */
3416 test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
3417 ASSERT_EQ(0,
3418 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3419 pasid, nested_hwpt_id[1]));
3420
3421 /* cleanup pasid 200 */
3422 test_cmd_pasid_detach(pasid);
3423
3424 /* Negative Tests for pasid replace, use pasid 1024 */
3425
3426 /*
3427 * Attach the s2 hwpt to pasid 1024, should succeed, domain should
3428 * be valid.
3429 */
3430 pasid = 1024;
3431 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3432 ASSERT_EQ(0,
3433 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3434 pasid, s2_hwpt_id));
3435
3436 /*
3437 * Replace pasid 1024 with nested_hwpt_id[0], should fail,
3438 * but have the old valid domain. This is a designed
3439 * negative case. Normally, this shall succeed.
3440 */
3441 test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
3442 ASSERT_EQ(0,
3443 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3444 pasid, s2_hwpt_id));
3445
3446 /* cleanup pasid 1024 */
3447 test_cmd_pasid_detach(pasid);
3448
3449 /* Attach to iopf-capable hwpt */
3450
3451 /*
3452 * Attach an iopf hwpt to pasid 2048, should succeed, domain should
3453 * be valid.
3454 */
3455 pasid = 2048;
3456 test_cmd_pasid_attach(pasid, iopf_hwpt_id);
3457 ASSERT_EQ(0,
3458 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3459 pasid, iopf_hwpt_id));
3460
3461 test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
3462
3463 /*
3464 * Replace with s2_hwpt_id for pasid 2048, should
3465 * succeed, and have valid domain.
3466 */
3467 test_cmd_pasid_replace(pasid, s2_hwpt_id);
3468 ASSERT_EQ(0,
3469 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3470 pasid, s2_hwpt_id));
3471
3472 /* cleanup pasid 2048 */
3473 test_cmd_pasid_detach(pasid);
3474
3475 test_ioctl_destroy(iopf_hwpt_id);
3476 close(fault_fd);
3477 test_ioctl_destroy(fault_id);
3478
3479 /* Detach the s2_hwpt_id from RID */
3480 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3481 }
3482
3483 TEST_HARNESS_MAIN
3484