1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11
12 #include "iommufd_utils.h"
13
14 static unsigned long HUGEPAGE_SIZE;
15
get_huge_page_size(void)16 static unsigned long get_huge_page_size(void)
17 {
18 char buf[80];
19 int ret;
20 int fd;
21
22 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
23 O_RDONLY);
24 if (fd < 0)
25 return 2 * 1024 * 1024;
26
27 ret = read(fd, buf, sizeof(buf));
28 close(fd);
29 if (ret <= 0 || ret == sizeof(buf))
30 return 2 * 1024 * 1024;
31 buf[ret] = 0;
32 return strtoul(buf, NULL, 10);
33 }
34
setup_sizes(void)35 static __attribute__((constructor)) void setup_sizes(void)
36 {
37 void *vrc;
38 int rc;
39
40 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 HUGEPAGE_SIZE = get_huge_page_size();
42
43 BUFFER_SIZE = PAGE_SIZE * 16;
44 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
45 assert(!rc);
46 assert(buffer);
47 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 assert(vrc == buffer);
51
52 mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
53 &mfd);
54 assert(mfd_buffer != MAP_FAILED);
55 assert(mfd > 0);
56 }
57
FIXTURE(iommufd)58 FIXTURE(iommufd)
59 {
60 int fd;
61 };
62
FIXTURE_SETUP(iommufd)63 FIXTURE_SETUP(iommufd)
64 {
65 self->fd = open("/dev/iommu", O_RDWR);
66 ASSERT_NE(-1, self->fd);
67 }
68
FIXTURE_TEARDOWN(iommufd)69 FIXTURE_TEARDOWN(iommufd)
70 {
71 teardown_iommufd(self->fd, _metadata);
72 }
73
TEST_F(iommufd,simple_close)74 TEST_F(iommufd, simple_close)
75 {
76 }
77
TEST_F(iommufd,cmd_fail)78 TEST_F(iommufd, cmd_fail)
79 {
80 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
81
82 /* object id is invalid */
83 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
84 /* Bad pointer */
85 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
86 /* Unknown ioctl */
87 EXPECT_ERRNO(ENOTTY,
88 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
89 &cmd));
90 }
91
TEST_F(iommufd,cmd_length)92 TEST_F(iommufd, cmd_length)
93 {
94 #define TEST_LENGTH(_struct, _ioctl, _last) \
95 { \
96 size_t min_size = offsetofend(struct _struct, _last); \
97 struct { \
98 struct _struct cmd; \
99 uint8_t extra; \
100 } cmd = { .cmd = { .size = min_size - 1 }, \
101 .extra = UINT8_MAX }; \
102 int old_errno; \
103 int rc; \
104 \
105 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
106 cmd.cmd.size = sizeof(struct _struct) + 1; \
107 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
108 cmd.cmd.size = sizeof(struct _struct); \
109 rc = ioctl(self->fd, _ioctl, &cmd); \
110 old_errno = errno; \
111 cmd.cmd.size = sizeof(struct _struct) + 1; \
112 cmd.extra = 0; \
113 if (rc) { \
114 EXPECT_ERRNO(old_errno, \
115 ioctl(self->fd, _ioctl, &cmd)); \
116 } else { \
117 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
118 } \
119 }
120
121 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
122 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
123 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
124 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
125 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
126 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
127 out_iova_alignment);
128 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
129 allowed_iovas);
130 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
131 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
132 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
133 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
134 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
135 TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
136 TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
137 TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
138 TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
139 __reserved);
140 #undef TEST_LENGTH
141 }
142
TEST_F(iommufd,cmd_ex_fail)143 TEST_F(iommufd, cmd_ex_fail)
144 {
145 struct {
146 struct iommu_destroy cmd;
147 __u64 future;
148 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
149
150 /* object id is invalid and command is longer */
151 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
152 /* future area is non-zero */
153 cmd.future = 1;
154 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
155 /* Original command "works" */
156 cmd.cmd.size = sizeof(cmd.cmd);
157 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
158 /* Short command fails */
159 cmd.cmd.size = sizeof(cmd.cmd) - 1;
160 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
161 }
162
TEST_F(iommufd,global_options)163 TEST_F(iommufd, global_options)
164 {
165 struct iommu_option cmd = {
166 .size = sizeof(cmd),
167 .option_id = IOMMU_OPTION_RLIMIT_MODE,
168 .op = IOMMU_OPTION_OP_GET,
169 .val64 = 1,
170 };
171
172 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
173 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
174 ASSERT_EQ(0, cmd.val64);
175
176 /* This requires root */
177 cmd.op = IOMMU_OPTION_OP_SET;
178 cmd.val64 = 1;
179 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
180 cmd.val64 = 2;
181 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
182
183 cmd.op = IOMMU_OPTION_OP_GET;
184 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
185 ASSERT_EQ(1, cmd.val64);
186
187 cmd.op = IOMMU_OPTION_OP_SET;
188 cmd.val64 = 0;
189 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
190
191 cmd.op = IOMMU_OPTION_OP_GET;
192 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
193 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
194 cmd.op = IOMMU_OPTION_OP_SET;
195 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
196 }
197
drop_cap_ipc_lock(struct __test_metadata * _metadata)198 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
199 {
200 cap_t caps;
201 cap_value_t cap_list[1] = { CAP_IPC_LOCK };
202
203 caps = cap_get_proc();
204 ASSERT_NE(caps, NULL);
205 ASSERT_NE(-1,
206 cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
207 ASSERT_NE(-1, cap_set_proc(caps));
208 cap_free(caps);
209 }
210
get_proc_status_value(pid_t pid,const char * var)211 static long get_proc_status_value(pid_t pid, const char *var)
212 {
213 FILE *fp;
214 char buf[80], tag[80];
215 long val = -1;
216
217 snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
218 fp = fopen(buf, "r");
219 if (!fp)
220 return val;
221
222 while (fgets(buf, sizeof(buf), fp))
223 if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
224 break;
225
226 fclose(fp);
227 return val;
228 }
229
get_vm_pinned(pid_t pid)230 static long get_vm_pinned(pid_t pid)
231 {
232 return get_proc_status_value(pid, "VmPin:");
233 }
234
get_vm_locked(pid_t pid)235 static long get_vm_locked(pid_t pid)
236 {
237 return get_proc_status_value(pid, "VmLck:");
238 }
239
FIXTURE(change_process)240 FIXTURE(change_process)
241 {
242 int fd;
243 uint32_t ioas_id;
244 };
245
FIXTURE_VARIANT(change_process)246 FIXTURE_VARIANT(change_process)
247 {
248 int accounting;
249 };
250
FIXTURE_SETUP(change_process)251 FIXTURE_SETUP(change_process)
252 {
253 self->fd = open("/dev/iommu", O_RDWR);
254 ASSERT_NE(-1, self->fd);
255
256 drop_cap_ipc_lock(_metadata);
257 if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
258 struct iommu_option set_limit_cmd = {
259 .size = sizeof(set_limit_cmd),
260 .option_id = IOMMU_OPTION_RLIMIT_MODE,
261 .op = IOMMU_OPTION_OP_SET,
262 .val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
263 };
264 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
265 }
266
267 test_ioctl_ioas_alloc(&self->ioas_id);
268 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
269 }
270
FIXTURE_TEARDOWN(change_process)271 FIXTURE_TEARDOWN(change_process)
272 {
273 teardown_iommufd(self->fd, _metadata);
274 }
275
FIXTURE_VARIANT_ADD(change_process,account_none)276 FIXTURE_VARIANT_ADD(change_process, account_none)
277 {
278 .accounting = IOPT_PAGES_ACCOUNT_NONE,
279 };
280
FIXTURE_VARIANT_ADD(change_process,account_user)281 FIXTURE_VARIANT_ADD(change_process, account_user)
282 {
283 .accounting = IOPT_PAGES_ACCOUNT_USER,
284 };
285
FIXTURE_VARIANT_ADD(change_process,account_mm)286 FIXTURE_VARIANT_ADD(change_process, account_mm)
287 {
288 .accounting = IOPT_PAGES_ACCOUNT_MM,
289 };
290
TEST_F(change_process,basic)291 TEST_F(change_process, basic)
292 {
293 pid_t parent = getpid();
294 pid_t child;
295 __u64 iova;
296 struct iommu_ioas_change_process cmd = {
297 .size = sizeof(cmd),
298 };
299
300 /* Expect failure if non-file maps exist */
301 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
302 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
303 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
304
305 /* Change process works in current process. */
306 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
307 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
308
309 /* Change process works in another process */
310 child = fork();
311 if (!child) {
312 int nlock = PAGE_SIZE / 1024;
313
314 /* Parent accounts for locked memory before */
315 ASSERT_EQ(nlock, get_vm_pinned(parent));
316 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
317 ASSERT_EQ(nlock, get_vm_locked(parent));
318 ASSERT_EQ(0, get_vm_pinned(getpid()));
319 ASSERT_EQ(0, get_vm_locked(getpid()));
320
321 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
322
323 /* Child accounts for locked memory after */
324 ASSERT_EQ(0, get_vm_pinned(parent));
325 ASSERT_EQ(0, get_vm_locked(parent));
326 ASSERT_EQ(nlock, get_vm_pinned(getpid()));
327 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
328 ASSERT_EQ(nlock, get_vm_locked(getpid()));
329
330 exit(0);
331 }
332 ASSERT_NE(-1, child);
333 ASSERT_EQ(child, waitpid(child, NULL, 0));
334 }
335
FIXTURE(iommufd_ioas)336 FIXTURE(iommufd_ioas)
337 {
338 int fd;
339 uint32_t ioas_id;
340 uint32_t stdev_id;
341 uint32_t hwpt_id;
342 uint32_t device_id;
343 uint64_t base_iova;
344 uint32_t device_pasid_id;
345 };
346
FIXTURE_VARIANT(iommufd_ioas)347 FIXTURE_VARIANT(iommufd_ioas)
348 {
349 unsigned int mock_domains;
350 unsigned int memory_limit;
351 bool pasid_capable;
352 };
353
FIXTURE_SETUP(iommufd_ioas)354 FIXTURE_SETUP(iommufd_ioas)
355 {
356 unsigned int i;
357
358
359 self->fd = open("/dev/iommu", O_RDWR);
360 ASSERT_NE(-1, self->fd);
361 test_ioctl_ioas_alloc(&self->ioas_id);
362
363 if (!variant->memory_limit) {
364 test_ioctl_set_default_memory_limit();
365 } else {
366 test_ioctl_set_temp_memory_limit(variant->memory_limit);
367 }
368
369 for (i = 0; i != variant->mock_domains; i++) {
370 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
371 &self->hwpt_id, &self->device_id);
372 test_cmd_dev_check_cache_all(self->device_id,
373 IOMMU_TEST_DEV_CACHE_DEFAULT);
374 self->base_iova = MOCK_APERTURE_START;
375 }
376
377 if (variant->pasid_capable)
378 test_cmd_mock_domain_flags(self->ioas_id,
379 MOCK_FLAGS_DEVICE_PASID,
380 NULL, NULL,
381 &self->device_pasid_id);
382 }
383
FIXTURE_TEARDOWN(iommufd_ioas)384 FIXTURE_TEARDOWN(iommufd_ioas)
385 {
386 test_ioctl_set_default_memory_limit();
387 teardown_iommufd(self->fd, _metadata);
388 }
389
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)390 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
391 {
392 };
393
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)394 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
395 {
396 .mock_domains = 1,
397 .pasid_capable = true,
398 };
399
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)400 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
401 {
402 .mock_domains = 2,
403 };
404
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)405 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
406 {
407 .mock_domains = 1,
408 .memory_limit = 16,
409 };
410
TEST_F(iommufd_ioas,ioas_auto_destroy)411 TEST_F(iommufd_ioas, ioas_auto_destroy)
412 {
413 }
414
TEST_F(iommufd_ioas,ioas_destroy)415 TEST_F(iommufd_ioas, ioas_destroy)
416 {
417 if (self->stdev_id) {
418 /* IOAS cannot be freed while a device has a HWPT using it */
419 EXPECT_ERRNO(EBUSY,
420 _test_ioctl_destroy(self->fd, self->ioas_id));
421 } else {
422 /* Can allocate and manually free an IOAS table */
423 test_ioctl_destroy(self->ioas_id);
424 }
425 }
426
TEST_F(iommufd_ioas,alloc_hwpt_nested)427 TEST_F(iommufd_ioas, alloc_hwpt_nested)
428 {
429 const uint32_t min_data_len =
430 offsetofend(struct iommu_hwpt_selftest, iotlb);
431 struct iommu_hwpt_selftest data = {
432 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
433 };
434 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
435 uint32_t nested_hwpt_id[2] = {};
436 uint32_t num_inv;
437 uint32_t parent_hwpt_id = 0;
438 uint32_t parent_hwpt_id_not_work = 0;
439 uint32_t test_hwpt_id = 0;
440 uint32_t iopf_hwpt_id;
441 uint32_t fault_id;
442 uint32_t fault_fd;
443
444 if (self->device_id) {
445 /* Negative tests */
446 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
447 &test_hwpt_id);
448 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
449 &test_hwpt_id);
450 test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
451 IOMMU_HWPT_ALLOC_NEST_PARENT |
452 IOMMU_HWPT_FAULT_ID_VALID,
453 &test_hwpt_id);
454
455 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
456 IOMMU_HWPT_ALLOC_NEST_PARENT,
457 &parent_hwpt_id);
458
459 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
460 &parent_hwpt_id_not_work);
461
462 /* Negative nested tests */
463 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
464 parent_hwpt_id, 0,
465 &nested_hwpt_id[0],
466 IOMMU_HWPT_DATA_NONE, &data,
467 sizeof(data));
468 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
469 parent_hwpt_id, 0,
470 &nested_hwpt_id[0],
471 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
472 sizeof(data));
473 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
474 parent_hwpt_id, 0,
475 &nested_hwpt_id[0],
476 IOMMU_HWPT_DATA_SELFTEST, &data,
477 min_data_len - 1);
478 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
479 parent_hwpt_id, 0,
480 &nested_hwpt_id[0],
481 IOMMU_HWPT_DATA_SELFTEST, NULL,
482 sizeof(data));
483 test_err_hwpt_alloc_nested(
484 EOPNOTSUPP, self->device_id, parent_hwpt_id,
485 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
486 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
487 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
488 parent_hwpt_id_not_work, 0,
489 &nested_hwpt_id[0],
490 IOMMU_HWPT_DATA_SELFTEST, &data,
491 sizeof(data));
492
493 /* Allocate two nested hwpts sharing one common parent hwpt */
494 test_ioctl_fault_alloc(&fault_id, &fault_fd);
495 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
496 &nested_hwpt_id[0],
497 IOMMU_HWPT_DATA_SELFTEST, &data,
498 sizeof(data));
499 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
500 &nested_hwpt_id[1],
501 IOMMU_HWPT_DATA_SELFTEST, &data,
502 sizeof(data));
503 test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
504 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
505 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
506 &data, sizeof(data));
507 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
508 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
509 IOMMU_HWPT_DATA_SELFTEST, &data,
510 sizeof(data));
511 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
512 IOMMU_TEST_IOTLB_DEFAULT);
513 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
514 IOMMU_TEST_IOTLB_DEFAULT);
515
516 /* Negative test: a nested hwpt on top of a nested hwpt */
517 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
518 nested_hwpt_id[0], 0, &test_hwpt_id,
519 IOMMU_HWPT_DATA_SELFTEST, &data,
520 sizeof(data));
521 /* Negative test: parent hwpt now cannot be freed */
522 EXPECT_ERRNO(EBUSY,
523 _test_ioctl_destroy(self->fd, parent_hwpt_id));
524
525 /* hwpt_invalidate does not support a parent hwpt */
526 num_inv = 1;
527 test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
528 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
529 sizeof(*inv_reqs), &num_inv);
530 assert(!num_inv);
531
532 /* Check data_type by passing zero-length array */
533 num_inv = 0;
534 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
535 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
536 sizeof(*inv_reqs), &num_inv);
537 assert(!num_inv);
538
539 /* Negative test: Invalid data_type */
540 num_inv = 1;
541 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
542 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
543 sizeof(*inv_reqs), &num_inv);
544 assert(!num_inv);
545
546 /* Negative test: structure size sanity */
547 num_inv = 1;
548 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
549 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
550 sizeof(*inv_reqs) + 1, &num_inv);
551 assert(!num_inv);
552
553 num_inv = 1;
554 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
555 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
556 1, &num_inv);
557 assert(!num_inv);
558
559 /* Negative test: invalid flag is passed */
560 num_inv = 1;
561 inv_reqs[0].flags = 0xffffffff;
562 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
563 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
564 sizeof(*inv_reqs), &num_inv);
565 assert(!num_inv);
566
567 /* Negative test: invalid data_uptr when array is not empty */
568 num_inv = 1;
569 inv_reqs[0].flags = 0;
570 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
571 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
572 sizeof(*inv_reqs), &num_inv);
573 assert(!num_inv);
574
575 /* Negative test: invalid entry_len when array is not empty */
576 num_inv = 1;
577 inv_reqs[0].flags = 0;
578 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
579 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
580 0, &num_inv);
581 assert(!num_inv);
582
583 /* Negative test: invalid iotlb_id */
584 num_inv = 1;
585 inv_reqs[0].flags = 0;
586 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
587 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
588 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
589 sizeof(*inv_reqs), &num_inv);
590 assert(!num_inv);
591
592 /*
593 * Invalidate the 1st iotlb entry but fail the 2nd request
594 * due to invalid flags configuration in the 2nd request.
595 */
596 num_inv = 2;
597 inv_reqs[0].flags = 0;
598 inv_reqs[0].iotlb_id = 0;
599 inv_reqs[1].flags = 0xffffffff;
600 inv_reqs[1].iotlb_id = 1;
601 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
602 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
603 sizeof(*inv_reqs), &num_inv);
604 assert(num_inv == 1);
605 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
606 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
607 IOMMU_TEST_IOTLB_DEFAULT);
608 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
609 IOMMU_TEST_IOTLB_DEFAULT);
610 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
611 IOMMU_TEST_IOTLB_DEFAULT);
612
613 /*
614 * Invalidate the 1st iotlb entry but fail the 2nd request
615 * due to invalid iotlb_id configuration in the 2nd request.
616 */
617 num_inv = 2;
618 inv_reqs[0].flags = 0;
619 inv_reqs[0].iotlb_id = 0;
620 inv_reqs[1].flags = 0;
621 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
622 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
623 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
624 sizeof(*inv_reqs), &num_inv);
625 assert(num_inv == 1);
626 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
627 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
628 IOMMU_TEST_IOTLB_DEFAULT);
629 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
630 IOMMU_TEST_IOTLB_DEFAULT);
631 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
632 IOMMU_TEST_IOTLB_DEFAULT);
633
634 /* Invalidate the 2nd iotlb entry and verify */
635 num_inv = 1;
636 inv_reqs[0].flags = 0;
637 inv_reqs[0].iotlb_id = 1;
638 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
639 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
640 sizeof(*inv_reqs), &num_inv);
641 assert(num_inv == 1);
642 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
643 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
644 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
645 IOMMU_TEST_IOTLB_DEFAULT);
646 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
647 IOMMU_TEST_IOTLB_DEFAULT);
648
649 /* Invalidate the 3rd and 4th iotlb entries and verify */
650 num_inv = 2;
651 inv_reqs[0].flags = 0;
652 inv_reqs[0].iotlb_id = 2;
653 inv_reqs[1].flags = 0;
654 inv_reqs[1].iotlb_id = 3;
655 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
656 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
657 sizeof(*inv_reqs), &num_inv);
658 assert(num_inv == 2);
659 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
660
661 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
662 num_inv = 1;
663 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
664 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
665 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
666 sizeof(*inv_reqs), &num_inv);
667 assert(num_inv == 1);
668 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
669
670 /* Attach device to nested_hwpt_id[0] that then will be busy */
671 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
672 EXPECT_ERRNO(EBUSY,
673 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
674
675 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
676 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
677 EXPECT_ERRNO(EBUSY,
678 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
679 test_ioctl_destroy(nested_hwpt_id[0]);
680
681 /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
682 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
683 EXPECT_ERRNO(EBUSY,
684 _test_ioctl_destroy(self->fd, iopf_hwpt_id));
685 /* Trigger an IOPF on the device */
686 test_cmd_trigger_iopf(self->device_id, fault_fd);
687
688 /* Detach from nested_hwpt_id[1] and destroy it */
689 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
690 test_ioctl_destroy(nested_hwpt_id[1]);
691 test_ioctl_destroy(iopf_hwpt_id);
692
693 /* Detach from the parent hw_pagetable and destroy it */
694 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
695 test_ioctl_destroy(parent_hwpt_id);
696 test_ioctl_destroy(parent_hwpt_id_not_work);
697 close(fault_fd);
698 test_ioctl_destroy(fault_id);
699 } else {
700 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
701 &parent_hwpt_id);
702 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
703 parent_hwpt_id, 0,
704 &nested_hwpt_id[0],
705 IOMMU_HWPT_DATA_SELFTEST, &data,
706 sizeof(data));
707 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
708 parent_hwpt_id, 0,
709 &nested_hwpt_id[1],
710 IOMMU_HWPT_DATA_SELFTEST, &data,
711 sizeof(data));
712 test_err_mock_domain_replace(ENOENT, self->stdev_id,
713 nested_hwpt_id[0]);
714 test_err_mock_domain_replace(ENOENT, self->stdev_id,
715 nested_hwpt_id[1]);
716 }
717 }
718
TEST_F(iommufd_ioas,hwpt_attach)719 TEST_F(iommufd_ioas, hwpt_attach)
720 {
721 /* Create a device attached directly to a hwpt */
722 if (self->stdev_id) {
723 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
724 } else {
725 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
726 }
727 }
728
TEST_F(iommufd_ioas,ioas_area_destroy)729 TEST_F(iommufd_ioas, ioas_area_destroy)
730 {
731 /* Adding an area does not change ability to destroy */
732 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
733 if (self->stdev_id)
734 EXPECT_ERRNO(EBUSY,
735 _test_ioctl_destroy(self->fd, self->ioas_id));
736 else
737 test_ioctl_destroy(self->ioas_id);
738 }
739
TEST_F(iommufd_ioas,ioas_area_auto_destroy)740 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
741 {
742 int i;
743
744 /* Can allocate and automatically free an IOAS table with many areas */
745 for (i = 0; i != 10; i++) {
746 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
747 self->base_iova + i * PAGE_SIZE);
748 }
749 }
750
TEST_F(iommufd_ioas,get_hw_info)751 TEST_F(iommufd_ioas, get_hw_info)
752 {
753 struct iommu_test_hw_info buffer_exact;
754 struct iommu_test_hw_info_buffer_larger {
755 struct iommu_test_hw_info info;
756 uint64_t trailing_bytes;
757 } buffer_larger;
758
759 if (self->device_id) {
760 uint8_t max_pasid = 0;
761
762 /* Provide a zero-size user_buffer */
763 test_cmd_get_hw_info(self->device_id,
764 IOMMU_HW_INFO_TYPE_DEFAULT, NULL, 0);
765 /* Provide a user_buffer with exact size */
766 test_cmd_get_hw_info(self->device_id,
767 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
768 sizeof(buffer_exact));
769
770 /* Request for a wrong data_type, and a correct one */
771 test_err_get_hw_info(EOPNOTSUPP, self->device_id,
772 IOMMU_HW_INFO_TYPE_SELFTEST + 1,
773 &buffer_exact, sizeof(buffer_exact));
774 test_cmd_get_hw_info(self->device_id,
775 IOMMU_HW_INFO_TYPE_SELFTEST, &buffer_exact,
776 sizeof(buffer_exact));
777 /*
778 * Provide a user_buffer with size larger than the exact size to check if
779 * kernel zero the trailing bytes.
780 */
781 test_cmd_get_hw_info(self->device_id,
782 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
783 sizeof(buffer_larger));
784 /*
785 * Provide a user_buffer with size smaller than the exact size to check if
786 * the fields within the size range still gets updated.
787 */
788 test_cmd_get_hw_info(self->device_id,
789 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
790 offsetofend(struct iommu_test_hw_info,
791 flags));
792 test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
793 ASSERT_EQ(0, max_pasid);
794 if (variant->pasid_capable) {
795 test_cmd_get_hw_info_pasid(self->device_pasid_id,
796 &max_pasid);
797 ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
798 }
799 } else {
800 test_err_get_hw_info(ENOENT, self->device_id,
801 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
802 sizeof(buffer_exact));
803 test_err_get_hw_info(ENOENT, self->device_id,
804 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
805 sizeof(buffer_larger));
806 }
807 }
808
TEST_F(iommufd_ioas,area)809 TEST_F(iommufd_ioas, area)
810 {
811 int i;
812
813 /* Unmap fails if nothing is mapped */
814 for (i = 0; i != 10; i++)
815 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
816
817 /* Unmap works */
818 for (i = 0; i != 10; i++)
819 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
820 self->base_iova + i * PAGE_SIZE);
821 for (i = 0; i != 10; i++)
822 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
823 PAGE_SIZE);
824
825 /* Split fails */
826 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
827 self->base_iova + 16 * PAGE_SIZE);
828 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
829 PAGE_SIZE);
830 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
831 PAGE_SIZE);
832
833 /* Over map fails */
834 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
835 self->base_iova + 16 * PAGE_SIZE);
836 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
837 self->base_iova + 16 * PAGE_SIZE);
838 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
839 self->base_iova + 17 * PAGE_SIZE);
840 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
841 self->base_iova + 15 * PAGE_SIZE);
842 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
843 self->base_iova + 15 * PAGE_SIZE);
844
845 /* unmap all works */
846 test_ioctl_ioas_unmap(0, UINT64_MAX);
847
848 /* Unmap all succeeds on an empty IOAS */
849 test_ioctl_ioas_unmap(0, UINT64_MAX);
850 }
851
TEST_F(iommufd_ioas,unmap_fully_contained_areas)852 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
853 {
854 uint64_t unmap_len;
855 int i;
856
857 /* Give no_domain some space to rewind base_iova */
858 self->base_iova += 4 * PAGE_SIZE;
859
860 for (i = 0; i != 4; i++)
861 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
862 self->base_iova + i * 16 * PAGE_SIZE);
863
864 /* Unmap not fully contained area doesn't work */
865 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
866 8 * PAGE_SIZE);
867 test_err_ioctl_ioas_unmap(ENOENT,
868 self->base_iova + 3 * 16 * PAGE_SIZE +
869 8 * PAGE_SIZE - 4 * PAGE_SIZE,
870 8 * PAGE_SIZE);
871
872 /* Unmap fully contained areas works */
873 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
874 self->base_iova - 4 * PAGE_SIZE,
875 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
876 4 * PAGE_SIZE,
877 &unmap_len));
878 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
879 }
880
TEST_F(iommufd_ioas,area_auto_iova)881 TEST_F(iommufd_ioas, area_auto_iova)
882 {
883 struct iommu_test_cmd test_cmd = {
884 .size = sizeof(test_cmd),
885 .op = IOMMU_TEST_OP_ADD_RESERVED,
886 .id = self->ioas_id,
887 .add_reserved = { .start = PAGE_SIZE * 4,
888 .length = PAGE_SIZE * 100 },
889 };
890 struct iommu_iova_range ranges[1] = {};
891 struct iommu_ioas_allow_iovas allow_cmd = {
892 .size = sizeof(allow_cmd),
893 .ioas_id = self->ioas_id,
894 .num_iovas = 1,
895 .allowed_iovas = (uintptr_t)ranges,
896 };
897 __u64 iovas[10];
898 int i;
899
900 /* Simple 4k pages */
901 for (i = 0; i != 10; i++)
902 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
903 for (i = 0; i != 10; i++)
904 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
905
906 /* Kernel automatically aligns IOVAs properly */
907 for (i = 0; i != 10; i++) {
908 size_t length = PAGE_SIZE * (i + 1);
909
910 if (self->stdev_id) {
911 test_ioctl_ioas_map(buffer, length, &iovas[i]);
912 } else {
913 test_ioctl_ioas_map((void *)(1UL << 31), length,
914 &iovas[i]);
915 }
916 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
917 }
918 for (i = 0; i != 10; i++)
919 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
920
921 /* Avoids a reserved region */
922 ASSERT_EQ(0,
923 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
924 &test_cmd));
925 for (i = 0; i != 10; i++) {
926 size_t length = PAGE_SIZE * (i + 1);
927
928 test_ioctl_ioas_map(buffer, length, &iovas[i]);
929 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
930 EXPECT_EQ(false,
931 iovas[i] > test_cmd.add_reserved.start &&
932 iovas[i] <
933 test_cmd.add_reserved.start +
934 test_cmd.add_reserved.length);
935 }
936 for (i = 0; i != 10; i++)
937 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
938
939 /* Allowed region intersects with a reserved region */
940 ranges[0].start = PAGE_SIZE;
941 ranges[0].last = PAGE_SIZE * 600;
942 EXPECT_ERRNO(EADDRINUSE,
943 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
944
945 /* Allocate from an allowed region */
946 if (self->stdev_id) {
947 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
948 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
949 } else {
950 ranges[0].start = PAGE_SIZE * 200;
951 ranges[0].last = PAGE_SIZE * 600 - 1;
952 }
953 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
954 for (i = 0; i != 10; i++) {
955 size_t length = PAGE_SIZE * (i + 1);
956
957 test_ioctl_ioas_map(buffer, length, &iovas[i]);
958 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
959 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
960 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
961 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
962 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
963 }
964 for (i = 0; i != 10; i++)
965 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
966 }
967
968 /* https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com */
TEST_F(iommufd_ioas,reserved_overflow)969 TEST_F(iommufd_ioas, reserved_overflow)
970 {
971 struct iommu_test_cmd test_cmd = {
972 .size = sizeof(test_cmd),
973 .op = IOMMU_TEST_OP_ADD_RESERVED,
974 .id = self->ioas_id,
975 .add_reserved.start = 6,
976 };
977 unsigned int map_len;
978 __u64 iova;
979
980 if (PAGE_SIZE == 4096) {
981 test_cmd.add_reserved.length = 0xffffffffffff8001;
982 map_len = 0x5000;
983 } else {
984 test_cmd.add_reserved.length =
985 0xffffffffffffffff - MOCK_PAGE_SIZE * 16;
986 map_len = MOCK_PAGE_SIZE * 10;
987 }
988
989 ASSERT_EQ(0,
990 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
991 &test_cmd));
992 test_err_ioctl_ioas_map(ENOSPC, buffer, map_len, &iova);
993 }
994
TEST_F(iommufd_ioas,area_allowed)995 TEST_F(iommufd_ioas, area_allowed)
996 {
997 struct iommu_test_cmd test_cmd = {
998 .size = sizeof(test_cmd),
999 .op = IOMMU_TEST_OP_ADD_RESERVED,
1000 .id = self->ioas_id,
1001 .add_reserved = { .start = PAGE_SIZE * 4,
1002 .length = PAGE_SIZE * 100 },
1003 };
1004 struct iommu_iova_range ranges[1] = {};
1005 struct iommu_ioas_allow_iovas allow_cmd = {
1006 .size = sizeof(allow_cmd),
1007 .ioas_id = self->ioas_id,
1008 .num_iovas = 1,
1009 .allowed_iovas = (uintptr_t)ranges,
1010 };
1011
1012 /* Reserved intersects an allowed */
1013 allow_cmd.num_iovas = 1;
1014 ranges[0].start = self->base_iova;
1015 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1016 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1017 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
1018 test_cmd.add_reserved.length = PAGE_SIZE;
1019 EXPECT_ERRNO(EADDRINUSE,
1020 ioctl(self->fd,
1021 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1022 &test_cmd));
1023 allow_cmd.num_iovas = 0;
1024 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1025
1026 /* Allowed intersects a reserved */
1027 ASSERT_EQ(0,
1028 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1029 &test_cmd));
1030 allow_cmd.num_iovas = 1;
1031 ranges[0].start = self->base_iova;
1032 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1033 EXPECT_ERRNO(EADDRINUSE,
1034 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1035 }
1036
TEST_F(iommufd_ioas,copy_area)1037 TEST_F(iommufd_ioas, copy_area)
1038 {
1039 struct iommu_ioas_copy copy_cmd = {
1040 .size = sizeof(copy_cmd),
1041 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1042 .dst_ioas_id = self->ioas_id,
1043 .src_ioas_id = self->ioas_id,
1044 .length = PAGE_SIZE,
1045 };
1046
1047 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
1048
1049 /* Copy inside a single IOAS */
1050 copy_cmd.src_iova = self->base_iova;
1051 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
1052 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1053
1054 /* Copy between IOAS's */
1055 copy_cmd.src_iova = self->base_iova;
1056 copy_cmd.dst_iova = 0;
1057 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
1058 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1059 }
1060
TEST_F(iommufd_ioas,iova_ranges)1061 TEST_F(iommufd_ioas, iova_ranges)
1062 {
1063 struct iommu_test_cmd test_cmd = {
1064 .size = sizeof(test_cmd),
1065 .op = IOMMU_TEST_OP_ADD_RESERVED,
1066 .id = self->ioas_id,
1067 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1068 };
1069 struct iommu_iova_range *ranges = buffer;
1070 struct iommu_ioas_iova_ranges ranges_cmd = {
1071 .size = sizeof(ranges_cmd),
1072 .ioas_id = self->ioas_id,
1073 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
1074 .allowed_iovas = (uintptr_t)ranges,
1075 };
1076
1077 /* Range can be read */
1078 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1079 EXPECT_EQ(1, ranges_cmd.num_iovas);
1080 if (!self->stdev_id) {
1081 EXPECT_EQ(0, ranges[0].start);
1082 EXPECT_EQ(SIZE_MAX, ranges[0].last);
1083 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1084 } else {
1085 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1086 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1087 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1088 }
1089
1090 /* Buffer too small */
1091 memset(ranges, 0, BUFFER_SIZE);
1092 ranges_cmd.num_iovas = 0;
1093 EXPECT_ERRNO(EMSGSIZE,
1094 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1095 EXPECT_EQ(1, ranges_cmd.num_iovas);
1096 EXPECT_EQ(0, ranges[0].start);
1097 EXPECT_EQ(0, ranges[0].last);
1098
1099 /* 2 ranges */
1100 ASSERT_EQ(0,
1101 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1102 &test_cmd));
1103 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1104 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1105 if (!self->stdev_id) {
1106 EXPECT_EQ(2, ranges_cmd.num_iovas);
1107 EXPECT_EQ(0, ranges[0].start);
1108 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1109 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1110 EXPECT_EQ(SIZE_MAX, ranges[1].last);
1111 } else {
1112 EXPECT_EQ(1, ranges_cmd.num_iovas);
1113 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1114 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1115 }
1116
1117 /* Buffer too small */
1118 memset(ranges, 0, BUFFER_SIZE);
1119 ranges_cmd.num_iovas = 1;
1120 if (!self->stdev_id) {
1121 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1122 &ranges_cmd));
1123 EXPECT_EQ(2, ranges_cmd.num_iovas);
1124 EXPECT_EQ(0, ranges[0].start);
1125 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1126 } else {
1127 ASSERT_EQ(0,
1128 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1129 EXPECT_EQ(1, ranges_cmd.num_iovas);
1130 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1131 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1132 }
1133 EXPECT_EQ(0, ranges[1].start);
1134 EXPECT_EQ(0, ranges[1].last);
1135 }
1136
TEST_F(iommufd_ioas,access_domain_destory)1137 TEST_F(iommufd_ioas, access_domain_destory)
1138 {
1139 struct iommu_test_cmd access_cmd = {
1140 .size = sizeof(access_cmd),
1141 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1142 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
1143 .length = PAGE_SIZE},
1144 };
1145 size_t buf_size = 2 * HUGEPAGE_SIZE;
1146 uint8_t *buf;
1147
1148 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1149 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1150 0);
1151 ASSERT_NE(MAP_FAILED, buf);
1152 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1153
1154 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1155 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1156 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1157 ASSERT_EQ(0,
1158 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1159 &access_cmd));
1160
1161 /* Causes a complicated unpin across a huge page boundary */
1162 if (self->stdev_id)
1163 test_ioctl_destroy(self->stdev_id);
1164
1165 test_cmd_destroy_access_pages(
1166 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1167 test_cmd_destroy_access(access_cmd.id);
1168 ASSERT_EQ(0, munmap(buf, buf_size));
1169 }
1170
TEST_F(iommufd_ioas,access_pin)1171 TEST_F(iommufd_ioas, access_pin)
1172 {
1173 struct iommu_test_cmd access_cmd = {
1174 .size = sizeof(access_cmd),
1175 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1176 .access_pages = { .iova = MOCK_APERTURE_START,
1177 .length = BUFFER_SIZE,
1178 .uptr = (uintptr_t)buffer },
1179 };
1180 struct iommu_test_cmd check_map_cmd = {
1181 .size = sizeof(check_map_cmd),
1182 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
1183 .check_map = { .iova = MOCK_APERTURE_START,
1184 .length = BUFFER_SIZE,
1185 .uptr = (uintptr_t)buffer },
1186 };
1187 uint32_t access_pages_id;
1188 unsigned int npages;
1189
1190 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1191 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1192
1193 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1194 uint32_t mock_stdev_id;
1195 uint32_t mock_hwpt_id;
1196
1197 access_cmd.access_pages.length = npages * PAGE_SIZE;
1198
1199 /* Single map/unmap */
1200 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1201 MOCK_APERTURE_START);
1202 ASSERT_EQ(0, ioctl(self->fd,
1203 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1204 &access_cmd));
1205 test_cmd_destroy_access_pages(
1206 access_cmd.id,
1207 access_cmd.access_pages.out_access_pages_id);
1208
1209 /* Double user */
1210 ASSERT_EQ(0, ioctl(self->fd,
1211 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1212 &access_cmd));
1213 access_pages_id = access_cmd.access_pages.out_access_pages_id;
1214 ASSERT_EQ(0, ioctl(self->fd,
1215 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1216 &access_cmd));
1217 test_cmd_destroy_access_pages(
1218 access_cmd.id,
1219 access_cmd.access_pages.out_access_pages_id);
1220 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1221
1222 /* Add/remove a domain with a user */
1223 ASSERT_EQ(0, ioctl(self->fd,
1224 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1225 &access_cmd));
1226 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1227 &mock_hwpt_id, NULL);
1228 check_map_cmd.id = mock_hwpt_id;
1229 ASSERT_EQ(0, ioctl(self->fd,
1230 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1231 &check_map_cmd));
1232
1233 test_ioctl_destroy(mock_stdev_id);
1234 test_cmd_destroy_access_pages(
1235 access_cmd.id,
1236 access_cmd.access_pages.out_access_pages_id);
1237
1238 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1239 }
1240 test_cmd_destroy_access(access_cmd.id);
1241 }
1242
TEST_F(iommufd_ioas,access_pin_unmap)1243 TEST_F(iommufd_ioas, access_pin_unmap)
1244 {
1245 struct iommu_test_cmd access_pages_cmd = {
1246 .size = sizeof(access_pages_cmd),
1247 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1248 .access_pages = { .iova = MOCK_APERTURE_START,
1249 .length = BUFFER_SIZE,
1250 .uptr = (uintptr_t)buffer },
1251 };
1252
1253 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1254 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1255 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1256 ASSERT_EQ(0,
1257 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1258 &access_pages_cmd));
1259
1260 /* Trigger the unmap op */
1261 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1262
1263 /* kernel removed the item for us */
1264 test_err_destroy_access_pages(
1265 ENOENT, access_pages_cmd.id,
1266 access_pages_cmd.access_pages.out_access_pages_id);
1267 }
1268
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1269 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1270 unsigned int access_id, uint64_t iova,
1271 unsigned int def_flags)
1272 {
1273 uint16_t tmp[32];
1274 struct iommu_test_cmd access_cmd = {
1275 .size = sizeof(access_cmd),
1276 .op = IOMMU_TEST_OP_ACCESS_RW,
1277 .id = access_id,
1278 .access_rw = { .uptr = (uintptr_t)tmp },
1279 };
1280 uint16_t *buffer16 = buffer;
1281 unsigned int i;
1282 void *tmp2;
1283
1284 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1285 buffer16[i] = rand();
1286
1287 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1288 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1289 access_cmd.access_rw.iova++) {
1290 for (access_cmd.access_rw.length = 1;
1291 access_cmd.access_rw.length < sizeof(tmp);
1292 access_cmd.access_rw.length++) {
1293 access_cmd.access_rw.flags = def_flags;
1294 ASSERT_EQ(0, ioctl(fd,
1295 _IOMMU_TEST_CMD(
1296 IOMMU_TEST_OP_ACCESS_RW),
1297 &access_cmd));
1298 ASSERT_EQ(0,
1299 memcmp(buffer + (access_cmd.access_rw.iova -
1300 iova),
1301 tmp, access_cmd.access_rw.length));
1302
1303 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1304 tmp[i] = rand();
1305 access_cmd.access_rw.flags = def_flags |
1306 MOCK_ACCESS_RW_WRITE;
1307 ASSERT_EQ(0, ioctl(fd,
1308 _IOMMU_TEST_CMD(
1309 IOMMU_TEST_OP_ACCESS_RW),
1310 &access_cmd));
1311 ASSERT_EQ(0,
1312 memcmp(buffer + (access_cmd.access_rw.iova -
1313 iova),
1314 tmp, access_cmd.access_rw.length));
1315 }
1316 }
1317
1318 /* Multi-page test */
1319 tmp2 = malloc(BUFFER_SIZE);
1320 ASSERT_NE(NULL, tmp2);
1321 access_cmd.access_rw.iova = iova;
1322 access_cmd.access_rw.length = BUFFER_SIZE;
1323 access_cmd.access_rw.flags = def_flags;
1324 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1325 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1326 &access_cmd));
1327 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1328 free(tmp2);
1329 }
1330
TEST_F(iommufd_ioas,access_rw)1331 TEST_F(iommufd_ioas, access_rw)
1332 {
1333 __u32 access_id;
1334 __u64 iova;
1335
1336 test_cmd_create_access(self->ioas_id, &access_id, 0);
1337 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1338 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1339 check_access_rw(_metadata, self->fd, access_id, iova,
1340 MOCK_ACCESS_RW_SLOW_PATH);
1341 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1342 test_cmd_destroy_access(access_id);
1343 }
1344
TEST_F(iommufd_ioas,access_rw_unaligned)1345 TEST_F(iommufd_ioas, access_rw_unaligned)
1346 {
1347 __u32 access_id;
1348 __u64 iova;
1349
1350 test_cmd_create_access(self->ioas_id, &access_id, 0);
1351
1352 /* Unaligned pages */
1353 iova = self->base_iova + MOCK_PAGE_SIZE;
1354 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1355 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1356 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1357 test_cmd_destroy_access(access_id);
1358 }
1359
TEST_F(iommufd_ioas,fork_gone)1360 TEST_F(iommufd_ioas, fork_gone)
1361 {
1362 __u32 access_id;
1363 pid_t child;
1364
1365 test_cmd_create_access(self->ioas_id, &access_id, 0);
1366
1367 /* Create a mapping with a different mm */
1368 child = fork();
1369 if (!child) {
1370 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1371 MOCK_APERTURE_START);
1372 exit(0);
1373 }
1374 ASSERT_NE(-1, child);
1375 ASSERT_EQ(child, waitpid(child, NULL, 0));
1376
1377 if (self->stdev_id) {
1378 /*
1379 * If a domain already existed then everything was pinned within
1380 * the fork, so this copies from one domain to another.
1381 */
1382 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1383 check_access_rw(_metadata, self->fd, access_id,
1384 MOCK_APERTURE_START, 0);
1385
1386 } else {
1387 /*
1388 * Otherwise we need to actually pin pages which can't happen
1389 * since the fork is gone.
1390 */
1391 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1392 }
1393
1394 test_cmd_destroy_access(access_id);
1395 }
1396
TEST_F(iommufd_ioas,fork_present)1397 TEST_F(iommufd_ioas, fork_present)
1398 {
1399 __u32 access_id;
1400 int pipefds[2];
1401 uint64_t tmp;
1402 pid_t child;
1403 int efd;
1404
1405 test_cmd_create_access(self->ioas_id, &access_id, 0);
1406
1407 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1408 efd = eventfd(0, EFD_CLOEXEC);
1409 ASSERT_NE(-1, efd);
1410
1411 /* Create a mapping with a different mm */
1412 child = fork();
1413 if (!child) {
1414 __u64 iova;
1415 uint64_t one = 1;
1416
1417 close(pipefds[1]);
1418 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1419 MOCK_APERTURE_START);
1420 if (write(efd, &one, sizeof(one)) != sizeof(one))
1421 exit(100);
1422 if (read(pipefds[0], &iova, 1) != 1)
1423 exit(100);
1424 exit(0);
1425 }
1426 close(pipefds[0]);
1427 ASSERT_NE(-1, child);
1428 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1429
1430 /* Read pages from the remote process */
1431 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1432 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1433
1434 ASSERT_EQ(0, close(pipefds[1]));
1435 ASSERT_EQ(child, waitpid(child, NULL, 0));
1436
1437 test_cmd_destroy_access(access_id);
1438 }
1439
TEST_F(iommufd_ioas,ioas_option_huge_pages)1440 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1441 {
1442 struct iommu_option cmd = {
1443 .size = sizeof(cmd),
1444 .option_id = IOMMU_OPTION_HUGE_PAGES,
1445 .op = IOMMU_OPTION_OP_GET,
1446 .val64 = 3,
1447 .object_id = self->ioas_id,
1448 };
1449
1450 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1451 ASSERT_EQ(1, cmd.val64);
1452
1453 cmd.op = IOMMU_OPTION_OP_SET;
1454 cmd.val64 = 0;
1455 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1456
1457 cmd.op = IOMMU_OPTION_OP_GET;
1458 cmd.val64 = 3;
1459 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1460 ASSERT_EQ(0, cmd.val64);
1461
1462 cmd.op = IOMMU_OPTION_OP_SET;
1463 cmd.val64 = 2;
1464 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1465
1466 cmd.op = IOMMU_OPTION_OP_SET;
1467 cmd.val64 = 1;
1468 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1469 }
1470
TEST_F(iommufd_ioas,ioas_iova_alloc)1471 TEST_F(iommufd_ioas, ioas_iova_alloc)
1472 {
1473 unsigned int length;
1474 __u64 iova;
1475
1476 for (length = 1; length != PAGE_SIZE * 2; length++) {
1477 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1478 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1479 } else {
1480 test_ioctl_ioas_map(buffer, length, &iova);
1481 test_ioctl_ioas_unmap(iova, length);
1482 }
1483 }
1484 }
1485
TEST_F(iommufd_ioas,ioas_align_change)1486 TEST_F(iommufd_ioas, ioas_align_change)
1487 {
1488 struct iommu_option cmd = {
1489 .size = sizeof(cmd),
1490 .option_id = IOMMU_OPTION_HUGE_PAGES,
1491 .op = IOMMU_OPTION_OP_SET,
1492 .object_id = self->ioas_id,
1493 /* 0 means everything must be aligned to PAGE_SIZE */
1494 .val64 = 0,
1495 };
1496
1497 /*
1498 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1499 * and map are present.
1500 */
1501 if (variant->mock_domains)
1502 return;
1503
1504 /*
1505 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1506 */
1507 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1508 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1509
1510 /* Misalignment is rejected at map time */
1511 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1512 PAGE_SIZE,
1513 MOCK_APERTURE_START + PAGE_SIZE);
1514 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1515
1516 /* Reduce alignment */
1517 cmd.val64 = 1;
1518 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1519
1520 /* Confirm misalignment is rejected during alignment upgrade */
1521 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1522 MOCK_APERTURE_START + PAGE_SIZE);
1523 cmd.val64 = 0;
1524 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1525
1526 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1527 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1528 }
1529
TEST_F(iommufd_ioas,copy_sweep)1530 TEST_F(iommufd_ioas, copy_sweep)
1531 {
1532 struct iommu_ioas_copy copy_cmd = {
1533 .size = sizeof(copy_cmd),
1534 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1535 .src_ioas_id = self->ioas_id,
1536 .dst_iova = MOCK_APERTURE_START,
1537 .length = MOCK_PAGE_SIZE,
1538 };
1539 unsigned int dst_ioas_id;
1540 uint64_t last_iova;
1541 uint64_t iova;
1542
1543 test_ioctl_ioas_alloc(&dst_ioas_id);
1544 copy_cmd.dst_ioas_id = dst_ioas_id;
1545
1546 if (variant->mock_domains)
1547 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1548 else
1549 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1550
1551 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1552 MOCK_APERTURE_START);
1553
1554 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1555 iova += 511) {
1556 copy_cmd.src_iova = iova;
1557 if (iova < MOCK_APERTURE_START ||
1558 iova + copy_cmd.length - 1 > last_iova) {
1559 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1560 ©_cmd));
1561 } else {
1562 ASSERT_EQ(0,
1563 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1564 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1565 copy_cmd.length);
1566 }
1567 }
1568
1569 test_ioctl_destroy(dst_ioas_id);
1570 }
1571
TEST_F(iommufd_ioas,dmabuf_simple)1572 TEST_F(iommufd_ioas, dmabuf_simple)
1573 {
1574 size_t buf_size = PAGE_SIZE*4;
1575 __u64 iova;
1576 int dfd;
1577
1578 test_cmd_get_dmabuf(buf_size, &dfd);
1579 test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, 0, &iova);
1580 test_err_ioctl_ioas_map_file(EINVAL, dfd, buf_size, buf_size, &iova);
1581 test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, buf_size + 1, &iova);
1582 test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
1583
1584 close(dfd);
1585 }
1586
TEST_F(iommufd_ioas,dmabuf_revoke)1587 TEST_F(iommufd_ioas, dmabuf_revoke)
1588 {
1589 size_t buf_size = PAGE_SIZE*4;
1590 __u32 hwpt_id;
1591 __u64 iova;
1592 __u64 iova2;
1593 int dfd;
1594
1595 test_cmd_get_dmabuf(buf_size, &dfd);
1596 test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
1597 test_cmd_revoke_dmabuf(dfd, true);
1598
1599 if (variant->mock_domains)
1600 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
1601 &hwpt_id);
1602
1603 test_err_ioctl_ioas_map_file(ENODEV, dfd, 0, buf_size, &iova2);
1604
1605 test_cmd_revoke_dmabuf(dfd, false);
1606 test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova2);
1607
1608 /* Restore the iova back */
1609 test_ioctl_ioas_unmap(iova, buf_size);
1610 test_ioctl_ioas_map_fixed_file(dfd, 0, buf_size, iova);
1611
1612 close(dfd);
1613 }
1614
FIXTURE(iommufd_mock_domain)1615 FIXTURE(iommufd_mock_domain)
1616 {
1617 int fd;
1618 uint32_t ioas_id;
1619 uint32_t hwpt_id;
1620 uint32_t hwpt_ids[2];
1621 uint32_t stdev_ids[2];
1622 uint32_t idev_ids[2];
1623 int mmap_flags;
1624 size_t mmap_buf_size;
1625 };
1626
FIXTURE_VARIANT(iommufd_mock_domain)1627 FIXTURE_VARIANT(iommufd_mock_domain)
1628 {
1629 unsigned int mock_domains;
1630 bool hugepages;
1631 bool file;
1632 };
1633
FIXTURE_SETUP(iommufd_mock_domain)1634 FIXTURE_SETUP(iommufd_mock_domain)
1635 {
1636 unsigned int i;
1637
1638 self->fd = open("/dev/iommu", O_RDWR);
1639 ASSERT_NE(-1, self->fd);
1640 test_ioctl_ioas_alloc(&self->ioas_id);
1641
1642 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1643
1644 for (i = 0; i != variant->mock_domains; i++) {
1645 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1646 &self->hwpt_ids[i], &self->idev_ids[i]);
1647 test_cmd_dev_check_cache_all(self->idev_ids[0],
1648 IOMMU_TEST_DEV_CACHE_DEFAULT);
1649 }
1650 self->hwpt_id = self->hwpt_ids[0];
1651
1652 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1653 self->mmap_buf_size = PAGE_SIZE * 8;
1654 if (variant->hugepages) {
1655 /*
1656 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1657 * not available.
1658 */
1659 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1660 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1661 }
1662 }
1663
FIXTURE_TEARDOWN(iommufd_mock_domain)1664 FIXTURE_TEARDOWN(iommufd_mock_domain)
1665 {
1666 teardown_iommufd(self->fd, _metadata);
1667 }
1668
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1669 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1670 {
1671 .mock_domains = 1,
1672 .hugepages = false,
1673 .file = false,
1674 };
1675
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1676 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1677 {
1678 .mock_domains = 2,
1679 .hugepages = false,
1680 .file = false,
1681 };
1682
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1683 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1684 {
1685 .mock_domains = 1,
1686 .hugepages = true,
1687 .file = false,
1688 };
1689
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1690 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1691 {
1692 .mock_domains = 2,
1693 .hugepages = true,
1694 .file = false,
1695 };
1696
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file)1697 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1698 {
1699 .mock_domains = 1,
1700 .hugepages = false,
1701 .file = true,
1702 };
1703
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file_hugepage)1704 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1705 {
1706 .mock_domains = 1,
1707 .hugepages = true,
1708 .file = true,
1709 };
1710
1711
1712 /* Have the kernel check that the user pages made it to the iommu_domain */
1713 #define check_mock_iova(_ptr, _iova, _length) \
1714 ({ \
1715 struct iommu_test_cmd check_map_cmd = { \
1716 .size = sizeof(check_map_cmd), \
1717 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1718 .id = self->hwpt_id, \
1719 .check_map = { .iova = _iova, \
1720 .length = _length, \
1721 .uptr = (uintptr_t)(_ptr) }, \
1722 }; \
1723 ASSERT_EQ(0, \
1724 ioctl(self->fd, \
1725 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1726 &check_map_cmd)); \
1727 if (self->hwpt_ids[1]) { \
1728 check_map_cmd.id = self->hwpt_ids[1]; \
1729 ASSERT_EQ(0, \
1730 ioctl(self->fd, \
1731 _IOMMU_TEST_CMD( \
1732 IOMMU_TEST_OP_MD_CHECK_MAP), \
1733 &check_map_cmd)); \
1734 } \
1735 })
1736
1737 static void
test_basic_mmap(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1738 test_basic_mmap(struct __test_metadata *_metadata,
1739 struct _test_data_iommufd_mock_domain *self,
1740 const struct _fixture_variant_iommufd_mock_domain *variant)
1741 {
1742 size_t buf_size = self->mmap_buf_size;
1743 uint8_t *buf;
1744 __u64 iova;
1745
1746 /* Simple one page map */
1747 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1748 check_mock_iova(buffer, iova, PAGE_SIZE);
1749
1750 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1751 0);
1752 ASSERT_NE(MAP_FAILED, buf);
1753
1754 /* EFAULT half way through mapping */
1755 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1756 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1757
1758 /* EFAULT on first page */
1759 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1760 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1761 }
1762
1763 static void
test_basic_file(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1764 test_basic_file(struct __test_metadata *_metadata,
1765 struct _test_data_iommufd_mock_domain *self,
1766 const struct _fixture_variant_iommufd_mock_domain *variant)
1767 {
1768 size_t buf_size = self->mmap_buf_size;
1769 uint8_t *buf;
1770 __u64 iova;
1771 int mfd_tmp;
1772 int prot = PROT_READ | PROT_WRITE;
1773
1774 /* Simple one page map */
1775 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1776 check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1777
1778 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1779 ASSERT_NE(MAP_FAILED, buf);
1780
1781 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1782
1783 ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1784 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1785
1786 close(mfd_tmp);
1787 }
1788
TEST_F(iommufd_mock_domain,basic)1789 TEST_F(iommufd_mock_domain, basic)
1790 {
1791 if (variant->file)
1792 test_basic_file(_metadata, self, variant);
1793 else
1794 test_basic_mmap(_metadata, self, variant);
1795 }
1796
TEST_F(iommufd_mock_domain,ro_unshare)1797 TEST_F(iommufd_mock_domain, ro_unshare)
1798 {
1799 uint8_t *buf;
1800 __u64 iova;
1801 int fd;
1802
1803 fd = open("/proc/self/exe", O_RDONLY);
1804 ASSERT_NE(-1, fd);
1805
1806 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1807 ASSERT_NE(MAP_FAILED, buf);
1808 close(fd);
1809
1810 /*
1811 * There have been lots of changes to the "unshare" mechanism in
1812 * get_user_pages(), make sure it works right. The write to the page
1813 * after we map it for reading should not change the assigned PFN.
1814 */
1815 ASSERT_EQ(0,
1816 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1817 &iova, IOMMU_IOAS_MAP_READABLE));
1818 check_mock_iova(buf, iova, PAGE_SIZE);
1819 memset(buf, 1, PAGE_SIZE);
1820 check_mock_iova(buf, iova, PAGE_SIZE);
1821 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1822 }
1823
TEST_F(iommufd_mock_domain,all_aligns)1824 TEST_F(iommufd_mock_domain, all_aligns)
1825 {
1826 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1827 MOCK_PAGE_SIZE;
1828 size_t buf_size = self->mmap_buf_size;
1829 unsigned int start;
1830 unsigned int end;
1831 uint8_t *buf;
1832 int prot = PROT_READ | PROT_WRITE;
1833 int mfd = -1;
1834
1835 if (variant->file)
1836 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1837 else
1838 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1839 ASSERT_NE(MAP_FAILED, buf);
1840 if (variant->file)
1841 ASSERT_GT(mfd, 0);
1842 check_refs(buf, buf_size, 0);
1843
1844 /*
1845 * Map every combination of page size and alignment within a big region,
1846 * less for hugepage case as it takes so long to finish.
1847 */
1848 for (start = 0; start < buf_size; start += test_step) {
1849 if (variant->hugepages)
1850 end = buf_size;
1851 else
1852 end = start + MOCK_PAGE_SIZE;
1853 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1854 size_t length = end - start;
1855 __u64 iova;
1856
1857 if (variant->file) {
1858 test_ioctl_ioas_map_file(mfd, start, length,
1859 &iova);
1860 } else {
1861 test_ioctl_ioas_map(buf + start, length, &iova);
1862 }
1863 check_mock_iova(buf + start, iova, length);
1864 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1865 end / PAGE_SIZE * PAGE_SIZE -
1866 start / PAGE_SIZE * PAGE_SIZE,
1867 1);
1868
1869 test_ioctl_ioas_unmap(iova, length);
1870 }
1871 }
1872 check_refs(buf, buf_size, 0);
1873 ASSERT_EQ(0, munmap(buf, buf_size));
1874 if (variant->file)
1875 close(mfd);
1876 }
1877
TEST_F(iommufd_mock_domain,all_aligns_copy)1878 TEST_F(iommufd_mock_domain, all_aligns_copy)
1879 {
1880 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1881 MOCK_PAGE_SIZE;
1882 size_t buf_size = self->mmap_buf_size;
1883 unsigned int start;
1884 unsigned int end;
1885 uint8_t *buf;
1886 int prot = PROT_READ | PROT_WRITE;
1887 int mfd = -1;
1888
1889 if (variant->file)
1890 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1891 else
1892 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1893 ASSERT_NE(MAP_FAILED, buf);
1894 if (variant->file)
1895 ASSERT_GT(mfd, 0);
1896 check_refs(buf, buf_size, 0);
1897
1898 /*
1899 * Map every combination of page size and alignment within a big region,
1900 * less for hugepage case as it takes so long to finish.
1901 */
1902 for (start = 0; start < buf_size; start += test_step) {
1903 if (variant->hugepages)
1904 end = buf_size;
1905 else
1906 end = start + MOCK_PAGE_SIZE;
1907 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1908 size_t length = end - start;
1909 unsigned int old_id;
1910 uint32_t mock_stdev_id;
1911 __u64 iova;
1912
1913 if (variant->file) {
1914 test_ioctl_ioas_map_file(mfd, start, length,
1915 &iova);
1916 } else {
1917 test_ioctl_ioas_map(buf + start, length, &iova);
1918 }
1919
1920 /* Add and destroy a domain while the area exists */
1921 old_id = self->hwpt_ids[1];
1922 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1923 &self->hwpt_ids[1], NULL);
1924
1925 check_mock_iova(buf + start, iova, length);
1926 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1927 end / PAGE_SIZE * PAGE_SIZE -
1928 start / PAGE_SIZE * PAGE_SIZE,
1929 1);
1930
1931 test_ioctl_destroy(mock_stdev_id);
1932 self->hwpt_ids[1] = old_id;
1933
1934 test_ioctl_ioas_unmap(iova, length);
1935 }
1936 }
1937 check_refs(buf, buf_size, 0);
1938 ASSERT_EQ(0, munmap(buf, buf_size));
1939 if (variant->file)
1940 close(mfd);
1941 }
1942
TEST_F(iommufd_mock_domain,user_copy)1943 TEST_F(iommufd_mock_domain, user_copy)
1944 {
1945 void *buf = variant->file ? mfd_buffer : buffer;
1946 struct iommu_test_cmd access_cmd = {
1947 .size = sizeof(access_cmd),
1948 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1949 .access_pages = { .length = BUFFER_SIZE,
1950 .uptr = (uintptr_t)buf },
1951 };
1952 struct iommu_ioas_copy copy_cmd = {
1953 .size = sizeof(copy_cmd),
1954 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1955 .dst_ioas_id = self->ioas_id,
1956 .dst_iova = MOCK_APERTURE_START,
1957 .length = BUFFER_SIZE,
1958 };
1959 struct iommu_ioas_unmap unmap_cmd = {
1960 .size = sizeof(unmap_cmd),
1961 .ioas_id = self->ioas_id,
1962 .iova = MOCK_APERTURE_START,
1963 .length = BUFFER_SIZE,
1964 };
1965 unsigned int new_ioas_id, ioas_id;
1966
1967 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1968 test_ioctl_ioas_alloc(&ioas_id);
1969 if (variant->file) {
1970 test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1971 ©_cmd.src_iova);
1972 } else {
1973 test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1974 ©_cmd.src_iova);
1975 }
1976 test_cmd_create_access(ioas_id, &access_cmd.id,
1977 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1978
1979 access_cmd.access_pages.iova = copy_cmd.src_iova;
1980 ASSERT_EQ(0,
1981 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1982 &access_cmd));
1983 copy_cmd.src_ioas_id = ioas_id;
1984 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1985 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1986
1987 /* Now replace the ioas with a new one */
1988 test_ioctl_ioas_alloc(&new_ioas_id);
1989 if (variant->file) {
1990 test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1991 ©_cmd.src_iova);
1992 } else {
1993 test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1994 ©_cmd.src_iova);
1995 }
1996 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1997
1998 /* Destroy the old ioas and cleanup copied mapping */
1999 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
2000 test_ioctl_destroy(ioas_id);
2001
2002 /* Then run the same test again with the new ioas */
2003 access_cmd.access_pages.iova = copy_cmd.src_iova;
2004 ASSERT_EQ(0,
2005 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
2006 &access_cmd));
2007 copy_cmd.src_ioas_id = new_ioas_id;
2008 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
2009 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
2010
2011 test_cmd_destroy_access_pages(
2012 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
2013 test_cmd_destroy_access(access_cmd.id);
2014
2015 test_ioctl_destroy(new_ioas_id);
2016 }
2017
TEST_F(iommufd_mock_domain,replace)2018 TEST_F(iommufd_mock_domain, replace)
2019 {
2020 uint32_t ioas_id;
2021
2022 test_ioctl_ioas_alloc(&ioas_id);
2023
2024 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
2025
2026 /*
2027 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
2028 * should get enoent when we try to use it.
2029 */
2030 if (variant->mock_domains == 1)
2031 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
2032 self->hwpt_ids[0]);
2033
2034 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
2035 if (variant->mock_domains >= 2) {
2036 test_cmd_mock_domain_replace(self->stdev_ids[0],
2037 self->hwpt_ids[1]);
2038 test_cmd_mock_domain_replace(self->stdev_ids[0],
2039 self->hwpt_ids[1]);
2040 test_cmd_mock_domain_replace(self->stdev_ids[0],
2041 self->hwpt_ids[0]);
2042 }
2043
2044 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
2045 test_ioctl_destroy(ioas_id);
2046 }
2047
TEST_F(iommufd_mock_domain,alloc_hwpt)2048 TEST_F(iommufd_mock_domain, alloc_hwpt)
2049 {
2050 int i;
2051
2052 for (i = 0; i != variant->mock_domains; i++) {
2053 uint32_t hwpt_id[2];
2054 uint32_t stddev_id;
2055
2056 test_err_hwpt_alloc(EOPNOTSUPP,
2057 self->idev_ids[i], self->ioas_id,
2058 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
2059 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2060 0, &hwpt_id[0]);
2061 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2062 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
2063
2064 /* Do a hw_pagetable rotation test */
2065 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
2066 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
2067 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
2068 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
2069 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
2070 test_ioctl_destroy(hwpt_id[1]);
2071
2072 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
2073 test_ioctl_destroy(stddev_id);
2074 test_ioctl_destroy(hwpt_id[0]);
2075 }
2076 }
2077
FIXTURE(iommufd_dirty_tracking)2078 FIXTURE(iommufd_dirty_tracking)
2079 {
2080 int fd;
2081 uint32_t ioas_id;
2082 uint32_t hwpt_id;
2083 uint32_t stdev_id;
2084 uint32_t idev_id;
2085 unsigned long page_size;
2086 unsigned long bitmap_size;
2087 void *bitmap;
2088 void *buffer;
2089 };
2090
FIXTURE_VARIANT(iommufd_dirty_tracking)2091 FIXTURE_VARIANT(iommufd_dirty_tracking)
2092 {
2093 unsigned long buffer_size;
2094 bool hugepages;
2095 };
2096
FIXTURE_SETUP(iommufd_dirty_tracking)2097 FIXTURE_SETUP(iommufd_dirty_tracking)
2098 {
2099 struct iommu_option cmd = {
2100 .size = sizeof(cmd),
2101 .option_id = IOMMU_OPTION_HUGE_PAGES,
2102 .op = IOMMU_OPTION_OP_SET,
2103 .val64 = 0,
2104 };
2105 size_t mmap_buffer_size;
2106 unsigned long size;
2107 int mmap_flags;
2108 void *vrc;
2109 int rc;
2110
2111 if (variant->buffer_size < MOCK_PAGE_SIZE) {
2112 SKIP(return,
2113 "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%u",
2114 variant->buffer_size, MOCK_PAGE_SIZE);
2115 }
2116
2117 self->fd = open("/dev/iommu", O_RDWR);
2118 ASSERT_NE(-1, self->fd);
2119
2120 mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2121 mmap_buffer_size = variant->buffer_size;
2122 if (variant->hugepages) {
2123 /*
2124 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2125 * not available.
2126 */
2127 mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2128
2129 /*
2130 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
2131 * following mmap() will automatically align the length to be a
2132 * multiple of the underlying huge page size. Failing to do the
2133 * same at this allocation will result in a memory overwrite by
2134 * the mmap().
2135 */
2136 if (mmap_buffer_size < HUGEPAGE_SIZE)
2137 mmap_buffer_size = HUGEPAGE_SIZE;
2138 }
2139
2140 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
2141 if (rc || !self->buffer) {
2142 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2143 mmap_buffer_size, rc);
2144 }
2145 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2146 vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
2147 mmap_flags, -1, 0);
2148 assert(vrc == self->buffer);
2149
2150 self->page_size = MOCK_PAGE_SIZE;
2151 self->bitmap_size = variant->buffer_size / self->page_size;
2152
2153 /* Provision with an extra (PAGE_SIZE) for the unaligned case */
2154 size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2155 rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2156 assert(!rc);
2157 assert(self->bitmap);
2158 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2159
2160 test_ioctl_ioas_alloc(&self->ioas_id);
2161
2162 /*
2163 * For dirty testing it is important that the page size fed into
2164 * the iommu page tables matches the size the dirty logic
2165 * expects, or set_dirty can touch too much stuff.
2166 */
2167 cmd.object_id = self->ioas_id;
2168 if (!variant->hugepages)
2169 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2170
2171 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
2172 &self->idev_id);
2173 }
2174
FIXTURE_TEARDOWN(iommufd_dirty_tracking)2175 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2176 {
2177 free(self->buffer);
2178 free(self->bitmap);
2179 teardown_iommufd(self->fd, _metadata);
2180 }
2181
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)2182 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2183 {
2184 /* half of an u8 index bitmap */
2185 .buffer_size = 8UL * 1024UL,
2186 };
2187
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)2188 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2189 {
2190 /* one u8 index bitmap */
2191 .buffer_size = 16UL * 1024UL,
2192 };
2193
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)2194 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2195 {
2196 /* one u32 index bitmap */
2197 .buffer_size = 64UL * 1024UL,
2198 };
2199
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)2200 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2201 {
2202 /* one u64 index bitmap */
2203 .buffer_size = 128UL * 1024UL,
2204 };
2205
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)2206 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2207 {
2208 /* two u64 index and trailing end bitmap */
2209 .buffer_size = 320UL * 1024UL,
2210 };
2211
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)2212 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2213 {
2214 /* 4K bitmap (64M IOVA range) */
2215 .buffer_size = 64UL * 1024UL * 1024UL,
2216 };
2217
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)2218 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2219 {
2220 /* 4K bitmap (64M IOVA range) */
2221 .buffer_size = 64UL * 1024UL * 1024UL,
2222 .hugepages = true,
2223 };
2224
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)2225 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2226 {
2227 /* 8K bitmap (128M IOVA range) */
2228 .buffer_size = 128UL * 1024UL * 1024UL,
2229 };
2230
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)2231 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2232 {
2233 /* 8K bitmap (128M IOVA range) */
2234 .buffer_size = 128UL * 1024UL * 1024UL,
2235 .hugepages = true,
2236 };
2237
TEST_F(iommufd_dirty_tracking,enforce_dirty)2238 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2239 {
2240 uint32_t ioas_id, stddev_id, idev_id;
2241 uint32_t hwpt_id, _hwpt_id;
2242 uint32_t dev_flags;
2243
2244 /* Regular case */
2245 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2246 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2247 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2248 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2249 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2250 NULL);
2251 test_ioctl_destroy(stddev_id);
2252 test_ioctl_destroy(hwpt_id);
2253
2254 /* IOMMU device does not support dirty tracking */
2255 test_ioctl_ioas_alloc(&ioas_id);
2256 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2257 &idev_id);
2258 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2259 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2260 test_ioctl_destroy(stddev_id);
2261 }
2262
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)2263 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2264 {
2265 uint32_t stddev_id;
2266 uint32_t hwpt_id;
2267
2268 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2269 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2270 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2271 test_cmd_set_dirty_tracking(hwpt_id, true);
2272 test_cmd_set_dirty_tracking(hwpt_id, false);
2273
2274 test_ioctl_destroy(stddev_id);
2275 test_ioctl_destroy(hwpt_id);
2276 }
2277
TEST_F(iommufd_dirty_tracking,device_dirty_capability)2278 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2279 {
2280 uint32_t caps = 0;
2281 uint32_t stddev_id;
2282 uint32_t hwpt_id;
2283
2284 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2285 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2286 test_cmd_get_hw_capabilities(self->idev_id, caps);
2287 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2288 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2289
2290 test_ioctl_destroy(stddev_id);
2291 test_ioctl_destroy(hwpt_id);
2292 }
2293
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)2294 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2295 {
2296 uint32_t page_size = MOCK_PAGE_SIZE;
2297 uint32_t ioas_id = self->ioas_id;
2298 uint32_t hwpt_id;
2299
2300 if (variant->hugepages)
2301 page_size = MOCK_HUGE_PAGE_SIZE;
2302
2303 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2304 variant->buffer_size, MOCK_APERTURE_START);
2305
2306 if (variant->hugepages)
2307 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2308 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2309 MOCK_IOMMUPT_HUGE, &hwpt_id);
2310 else
2311 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2312 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2313 MOCK_IOMMUPT_DEFAULT, &hwpt_id);
2314
2315 test_cmd_set_dirty_tracking(hwpt_id, true);
2316
2317 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2318 MOCK_APERTURE_START, self->page_size, page_size,
2319 self->bitmap, self->bitmap_size, 0, _metadata);
2320
2321 /* PAGE_SIZE unaligned bitmap */
2322 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2323 MOCK_APERTURE_START, self->page_size, page_size,
2324 self->bitmap + MOCK_PAGE_SIZE,
2325 self->bitmap_size, 0, _metadata);
2326
2327 /* u64 unaligned bitmap */
2328 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2329 MOCK_APERTURE_START, self->page_size, page_size,
2330 self->bitmap + 0xff1, self->bitmap_size, 0,
2331 _metadata);
2332
2333 test_ioctl_destroy(hwpt_id);
2334 }
2335
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)2336 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2337 {
2338 uint32_t page_size = MOCK_PAGE_SIZE;
2339 uint32_t ioas_id = self->ioas_id;
2340 uint32_t hwpt_id;
2341
2342 if (variant->hugepages)
2343 page_size = MOCK_HUGE_PAGE_SIZE;
2344
2345 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2346 variant->buffer_size, MOCK_APERTURE_START);
2347
2348
2349 if (variant->hugepages)
2350 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2351 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2352 MOCK_IOMMUPT_HUGE, &hwpt_id);
2353 else
2354 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2355 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2356 MOCK_IOMMUPT_DEFAULT, &hwpt_id);
2357
2358 test_cmd_set_dirty_tracking(hwpt_id, true);
2359
2360 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2361 MOCK_APERTURE_START, self->page_size, page_size,
2362 self->bitmap, self->bitmap_size,
2363 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2364 _metadata);
2365
2366 /* Unaligned bitmap */
2367 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2368 MOCK_APERTURE_START, self->page_size, page_size,
2369 self->bitmap + MOCK_PAGE_SIZE,
2370 self->bitmap_size,
2371 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2372 _metadata);
2373
2374 /* u64 unaligned bitmap */
2375 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2376 MOCK_APERTURE_START, self->page_size, page_size,
2377 self->bitmap + 0xff1, self->bitmap_size,
2378 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2379 _metadata);
2380
2381 test_ioctl_destroy(hwpt_id);
2382 }
2383
2384 /* VFIO compatibility IOCTLs */
2385
TEST_F(iommufd,simple_ioctls)2386 TEST_F(iommufd, simple_ioctls)
2387 {
2388 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2389 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2390 }
2391
TEST_F(iommufd,unmap_cmd)2392 TEST_F(iommufd, unmap_cmd)
2393 {
2394 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2395 .iova = MOCK_APERTURE_START,
2396 .size = PAGE_SIZE,
2397 };
2398
2399 unmap_cmd.argsz = 1;
2400 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2401
2402 unmap_cmd.argsz = sizeof(unmap_cmd);
2403 unmap_cmd.flags = 1 << 31;
2404 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2405
2406 unmap_cmd.flags = 0;
2407 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2408 }
2409
TEST_F(iommufd,map_cmd)2410 TEST_F(iommufd, map_cmd)
2411 {
2412 struct vfio_iommu_type1_dma_map map_cmd = {
2413 .iova = MOCK_APERTURE_START,
2414 .size = PAGE_SIZE,
2415 .vaddr = (__u64)buffer,
2416 };
2417
2418 map_cmd.argsz = 1;
2419 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2420
2421 map_cmd.argsz = sizeof(map_cmd);
2422 map_cmd.flags = 1 << 31;
2423 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2424
2425 /* Requires a domain to be attached */
2426 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2427 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2428 }
2429
TEST_F(iommufd,info_cmd)2430 TEST_F(iommufd, info_cmd)
2431 {
2432 struct vfio_iommu_type1_info info_cmd = {};
2433
2434 /* Invalid argsz */
2435 info_cmd.argsz = 1;
2436 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2437
2438 info_cmd.argsz = sizeof(info_cmd);
2439 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2440 }
2441
TEST_F(iommufd,set_iommu_cmd)2442 TEST_F(iommufd, set_iommu_cmd)
2443 {
2444 /* Requires a domain to be attached */
2445 EXPECT_ERRNO(ENODEV,
2446 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2447 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2448 }
2449
TEST_F(iommufd,vfio_ioas)2450 TEST_F(iommufd, vfio_ioas)
2451 {
2452 struct iommu_vfio_ioas vfio_ioas_cmd = {
2453 .size = sizeof(vfio_ioas_cmd),
2454 .op = IOMMU_VFIO_IOAS_GET,
2455 };
2456 __u32 ioas_id;
2457
2458 /* ENODEV if there is no compat ioas */
2459 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2460
2461 /* Invalid id for set */
2462 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2463 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2464
2465 /* Valid id for set*/
2466 test_ioctl_ioas_alloc(&ioas_id);
2467 vfio_ioas_cmd.ioas_id = ioas_id;
2468 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2469
2470 /* Same id comes back from get */
2471 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2472 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2473 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2474
2475 /* Clear works */
2476 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2477 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2478 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2479 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2480 }
2481
FIXTURE(vfio_compat_mock_domain)2482 FIXTURE(vfio_compat_mock_domain)
2483 {
2484 int fd;
2485 uint32_t ioas_id;
2486 };
2487
FIXTURE_VARIANT(vfio_compat_mock_domain)2488 FIXTURE_VARIANT(vfio_compat_mock_domain)
2489 {
2490 unsigned int version;
2491 };
2492
FIXTURE_SETUP(vfio_compat_mock_domain)2493 FIXTURE_SETUP(vfio_compat_mock_domain)
2494 {
2495 struct iommu_vfio_ioas vfio_ioas_cmd = {
2496 .size = sizeof(vfio_ioas_cmd),
2497 .op = IOMMU_VFIO_IOAS_SET,
2498 };
2499
2500 self->fd = open("/dev/iommu", O_RDWR);
2501 ASSERT_NE(-1, self->fd);
2502
2503 /* Create what VFIO would consider a group */
2504 test_ioctl_ioas_alloc(&self->ioas_id);
2505 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2506
2507 /* Attach it to the vfio compat */
2508 vfio_ioas_cmd.ioas_id = self->ioas_id;
2509 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2510 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2511 }
2512
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2513 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2514 {
2515 teardown_iommufd(self->fd, _metadata);
2516 }
2517
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2518 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2519 {
2520 .version = VFIO_TYPE1v2_IOMMU,
2521 };
2522
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2523 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2524 {
2525 .version = VFIO_TYPE1_IOMMU,
2526 };
2527
TEST_F(vfio_compat_mock_domain,simple_close)2528 TEST_F(vfio_compat_mock_domain, simple_close)
2529 {
2530 }
2531
TEST_F(vfio_compat_mock_domain,option_huge_pages)2532 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2533 {
2534 struct iommu_option cmd = {
2535 .size = sizeof(cmd),
2536 .option_id = IOMMU_OPTION_HUGE_PAGES,
2537 .op = IOMMU_OPTION_OP_GET,
2538 .val64 = 3,
2539 .object_id = self->ioas_id,
2540 };
2541
2542 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2543 if (variant->version == VFIO_TYPE1_IOMMU) {
2544 ASSERT_EQ(0, cmd.val64);
2545 } else {
2546 ASSERT_EQ(1, cmd.val64);
2547 }
2548 }
2549
2550 /*
2551 * Execute an ioctl command stored in buffer and check that the result does not
2552 * overflow memory.
2553 */
is_filled(const void * buf,uint8_t c,size_t len)2554 static bool is_filled(const void *buf, uint8_t c, size_t len)
2555 {
2556 const uint8_t *cbuf = buf;
2557
2558 for (; len; cbuf++, len--)
2559 if (*cbuf != c)
2560 return false;
2561 return true;
2562 }
2563
2564 #define ioctl_check_buf(fd, cmd) \
2565 ({ \
2566 size_t _cmd_len = *(__u32 *)buffer; \
2567 \
2568 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2569 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2570 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2571 BUFFER_SIZE - _cmd_len)); \
2572 })
2573
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2574 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2575 struct vfio_iommu_type1_info *info_cmd)
2576 {
2577 const struct vfio_info_cap_header *cap;
2578
2579 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2580 cap = buffer + info_cmd->cap_offset;
2581 while (true) {
2582 size_t cap_size;
2583
2584 if (cap->next)
2585 cap_size = (buffer + cap->next) - (void *)cap;
2586 else
2587 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2588
2589 switch (cap->id) {
2590 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2591 struct vfio_iommu_type1_info_cap_iova_range *data =
2592 (void *)cap;
2593
2594 ASSERT_EQ(1, data->header.version);
2595 ASSERT_EQ(1, data->nr_iovas);
2596 EXPECT_EQ(MOCK_APERTURE_START,
2597 data->iova_ranges[0].start);
2598 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2599 break;
2600 }
2601 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2602 struct vfio_iommu_type1_info_dma_avail *data =
2603 (void *)cap;
2604
2605 ASSERT_EQ(1, data->header.version);
2606 ASSERT_EQ(sizeof(*data), cap_size);
2607 break;
2608 }
2609 default:
2610 ASSERT_EQ(false, true);
2611 break;
2612 }
2613 if (!cap->next)
2614 break;
2615
2616 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2617 ASSERT_GE(buffer + cap->next, (void *)cap);
2618 cap = buffer + cap->next;
2619 }
2620 }
2621
TEST_F(vfio_compat_mock_domain,get_info)2622 TEST_F(vfio_compat_mock_domain, get_info)
2623 {
2624 struct vfio_iommu_type1_info *info_cmd = buffer;
2625 unsigned int i;
2626 size_t caplen;
2627
2628 /* Pre-cap ABI */
2629 *info_cmd = (struct vfio_iommu_type1_info){
2630 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2631 };
2632 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2633 ASSERT_NE(0, info_cmd->iova_pgsizes);
2634 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2635 info_cmd->flags);
2636
2637 /* Read the cap chain size */
2638 *info_cmd = (struct vfio_iommu_type1_info){
2639 .argsz = sizeof(*info_cmd),
2640 };
2641 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2642 ASSERT_NE(0, info_cmd->iova_pgsizes);
2643 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2644 info_cmd->flags);
2645 ASSERT_EQ(0, info_cmd->cap_offset);
2646 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2647
2648 /* Read the caps, kernel should never create a corrupted caps */
2649 caplen = info_cmd->argsz;
2650 for (i = sizeof(*info_cmd); i < caplen; i++) {
2651 *info_cmd = (struct vfio_iommu_type1_info){
2652 .argsz = i,
2653 };
2654 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2655 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2656 info_cmd->flags);
2657 if (!info_cmd->cap_offset)
2658 continue;
2659 check_vfio_info_cap_chain(_metadata, info_cmd);
2660 }
2661 }
2662
shuffle_array(unsigned long * array,size_t nelms)2663 static void shuffle_array(unsigned long *array, size_t nelms)
2664 {
2665 unsigned int i;
2666
2667 /* Shuffle */
2668 for (i = 0; i != nelms; i++) {
2669 unsigned long tmp = array[i];
2670 unsigned int other = rand() % (nelms - i);
2671
2672 array[i] = array[other];
2673 array[other] = tmp;
2674 }
2675 }
2676
TEST_F(vfio_compat_mock_domain,map)2677 TEST_F(vfio_compat_mock_domain, map)
2678 {
2679 struct vfio_iommu_type1_dma_map map_cmd = {
2680 .argsz = sizeof(map_cmd),
2681 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2682 .vaddr = (uintptr_t)buffer,
2683 .size = BUFFER_SIZE,
2684 .iova = MOCK_APERTURE_START,
2685 };
2686 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2687 .argsz = sizeof(unmap_cmd),
2688 .size = BUFFER_SIZE,
2689 .iova = MOCK_APERTURE_START,
2690 };
2691 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2692 unsigned int i;
2693
2694 /* Simple map/unmap */
2695 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2696 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2697 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2698 /* Unmap of empty is success */
2699 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2700
2701 /* UNMAP_FLAG_ALL requires 0 iova/size */
2702 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2703 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2704 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2705
2706 unmap_cmd.iova = 0;
2707 unmap_cmd.size = 0;
2708 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2709 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2710
2711 /* Small pages */
2712 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2713 map_cmd.iova = pages_iova[i] =
2714 MOCK_APERTURE_START + i * PAGE_SIZE;
2715 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2716 map_cmd.size = PAGE_SIZE;
2717 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2718 }
2719 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2720
2721 unmap_cmd.flags = 0;
2722 unmap_cmd.size = PAGE_SIZE;
2723 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2724 unmap_cmd.iova = pages_iova[i];
2725 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2726 }
2727 }
2728
TEST_F(vfio_compat_mock_domain,huge_map)2729 TEST_F(vfio_compat_mock_domain, huge_map)
2730 {
2731 size_t buf_size = HUGEPAGE_SIZE * 2;
2732 struct vfio_iommu_type1_dma_map map_cmd = {
2733 .argsz = sizeof(map_cmd),
2734 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2735 .size = buf_size,
2736 .iova = MOCK_APERTURE_START,
2737 };
2738 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2739 .argsz = sizeof(unmap_cmd),
2740 };
2741 unsigned long pages_iova[16];
2742 unsigned int i;
2743 void *buf;
2744
2745 /* Test huge pages and splitting */
2746 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2747 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2748 0);
2749 ASSERT_NE(MAP_FAILED, buf);
2750 map_cmd.vaddr = (uintptr_t)buf;
2751 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2752
2753 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2754 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2755 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2756 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2757
2758 /* type1 mode can cut up larger mappings, type1v2 always fails */
2759 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2760 unmap_cmd.iova = pages_iova[i];
2761 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2762 if (variant->version == VFIO_TYPE1_IOMMU) {
2763 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2764 &unmap_cmd));
2765 } else {
2766 EXPECT_ERRNO(ENOENT,
2767 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2768 &unmap_cmd));
2769 }
2770 }
2771 }
2772
FIXTURE(iommufd_viommu)2773 FIXTURE(iommufd_viommu)
2774 {
2775 int fd;
2776 uint32_t ioas_id;
2777 uint32_t stdev_id;
2778 uint32_t hwpt_id;
2779 uint32_t nested_hwpt_id;
2780 uint32_t device_id;
2781 uint32_t viommu_id;
2782 };
2783
FIXTURE_VARIANT(iommufd_viommu)2784 FIXTURE_VARIANT(iommufd_viommu)
2785 {
2786 unsigned int viommu;
2787 };
2788
FIXTURE_SETUP(iommufd_viommu)2789 FIXTURE_SETUP(iommufd_viommu)
2790 {
2791 self->fd = open("/dev/iommu", O_RDWR);
2792 ASSERT_NE(-1, self->fd);
2793 test_ioctl_ioas_alloc(&self->ioas_id);
2794 test_ioctl_set_default_memory_limit();
2795
2796 if (variant->viommu) {
2797 struct iommu_hwpt_selftest data = {
2798 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2799 };
2800
2801 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2802 &self->device_id);
2803
2804 /* Allocate a nesting parent hwpt */
2805 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2806 IOMMU_HWPT_ALLOC_NEST_PARENT,
2807 &self->hwpt_id);
2808
2809 /* Allocate a vIOMMU taking refcount of the parent hwpt */
2810 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2811 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2812 &self->viommu_id);
2813
2814 /* Allocate a regular nested hwpt */
2815 test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2816 &self->nested_hwpt_id,
2817 IOMMU_HWPT_DATA_SELFTEST, &data,
2818 sizeof(data));
2819 }
2820 }
2821
FIXTURE_TEARDOWN(iommufd_viommu)2822 FIXTURE_TEARDOWN(iommufd_viommu)
2823 {
2824 teardown_iommufd(self->fd, _metadata);
2825 }
2826
FIXTURE_VARIANT_ADD(iommufd_viommu,no_viommu)2827 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2828 {
2829 .viommu = 0,
2830 };
2831
FIXTURE_VARIANT_ADD(iommufd_viommu,mock_viommu)2832 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2833 {
2834 .viommu = 1,
2835 };
2836
TEST_F(iommufd_viommu,viommu_auto_destroy)2837 TEST_F(iommufd_viommu, viommu_auto_destroy)
2838 {
2839 }
2840
TEST_F(iommufd_viommu,viommu_negative_tests)2841 TEST_F(iommufd_viommu, viommu_negative_tests)
2842 {
2843 uint32_t device_id = self->device_id;
2844 uint32_t ioas_id = self->ioas_id;
2845 uint32_t hwpt_id;
2846
2847 if (self->device_id) {
2848 /* Negative test -- invalid hwpt (hwpt_id=0) */
2849 test_err_viommu_alloc(ENOENT, device_id, 0,
2850 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2851 NULL);
2852
2853 /* Negative test -- not a nesting parent hwpt */
2854 test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2855 test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2856 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2857 NULL);
2858 test_ioctl_destroy(hwpt_id);
2859
2860 /* Negative test -- unsupported viommu type */
2861 test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2862 0xdead, NULL, 0, NULL);
2863 EXPECT_ERRNO(EBUSY,
2864 _test_ioctl_destroy(self->fd, self->hwpt_id));
2865 EXPECT_ERRNO(EBUSY,
2866 _test_ioctl_destroy(self->fd, self->viommu_id));
2867 } else {
2868 test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2869 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2870 NULL);
2871 }
2872 }
2873
TEST_F(iommufd_viommu,viommu_alloc_nested_iopf)2874 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2875 {
2876 struct iommu_hwpt_selftest data = {
2877 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2878 };
2879 uint32_t viommu_id = self->viommu_id;
2880 uint32_t dev_id = self->device_id;
2881 uint32_t iopf_hwpt_id;
2882 uint32_t fault_id;
2883 uint32_t fault_fd;
2884 uint32_t vdev_id;
2885
2886 if (!dev_id)
2887 SKIP(return, "Skipping test for variant no_viommu");
2888
2889 test_ioctl_fault_alloc(&fault_id, &fault_fd);
2890 test_err_hwpt_alloc_iopf(ENOENT, dev_id, viommu_id, UINT32_MAX,
2891 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2892 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2893 test_err_hwpt_alloc_iopf(EOPNOTSUPP, dev_id, viommu_id, fault_id,
2894 IOMMU_HWPT_FAULT_ID_VALID | (1 << 31),
2895 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2896 sizeof(data));
2897 test_cmd_hwpt_alloc_iopf(dev_id, viommu_id, fault_id,
2898 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2899 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2900
2901 /* Must allocate vdevice before attaching to a nested hwpt */
2902 test_err_mock_domain_replace(ENOENT, self->stdev_id, iopf_hwpt_id);
2903 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2904 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2905 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2906 test_cmd_trigger_iopf(dev_id, fault_fd);
2907
2908 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2909 test_ioctl_destroy(iopf_hwpt_id);
2910 close(fault_fd);
2911 test_ioctl_destroy(fault_id);
2912 }
2913
TEST_F(iommufd_viommu,viommu_alloc_with_data)2914 TEST_F(iommufd_viommu, viommu_alloc_with_data)
2915 {
2916 struct iommu_viommu_selftest data = {
2917 .in_data = 0xbeef,
2918 };
2919 uint32_t *test;
2920
2921 if (!self->device_id)
2922 SKIP(return, "Skipping test for variant no_viommu");
2923
2924 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2925 IOMMU_VIOMMU_TYPE_SELFTEST, &data, sizeof(data),
2926 &self->viommu_id);
2927 ASSERT_EQ(data.out_data, data.in_data);
2928
2929 /* Negative mmap tests -- offset and length cannot be changed */
2930 test_err_mmap(ENXIO, data.out_mmap_length,
2931 data.out_mmap_offset + PAGE_SIZE);
2932 test_err_mmap(ENXIO, data.out_mmap_length,
2933 data.out_mmap_offset + PAGE_SIZE * 2);
2934 test_err_mmap(ENXIO, data.out_mmap_length / 2, data.out_mmap_offset);
2935 test_err_mmap(ENXIO, data.out_mmap_length * 2, data.out_mmap_offset);
2936
2937 /* Now do a correct mmap for a loopback test */
2938 test = mmap(NULL, data.out_mmap_length, PROT_READ | PROT_WRITE,
2939 MAP_SHARED, self->fd, data.out_mmap_offset);
2940 ASSERT_NE(MAP_FAILED, test);
2941 ASSERT_EQ(data.in_data, *test);
2942
2943 /* The owner of the mmap region should be blocked */
2944 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, self->viommu_id));
2945 munmap(test, data.out_mmap_length);
2946 }
2947
TEST_F(iommufd_viommu,vdevice_alloc)2948 TEST_F(iommufd_viommu, vdevice_alloc)
2949 {
2950 uint32_t viommu_id = self->viommu_id;
2951 uint32_t dev_id = self->device_id;
2952 uint32_t vdev_id = 0;
2953 uint32_t veventq_id;
2954 uint32_t veventq_fd;
2955 int prev_seq = -1;
2956
2957 if (dev_id) {
2958 /* Must allocate vdevice before attaching to a nested hwpt */
2959 test_err_mock_domain_replace(ENOENT, self->stdev_id,
2960 self->nested_hwpt_id);
2961
2962 /* Allocate a vEVENTQ with veventq_depth=2 */
2963 test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
2964 &veventq_id, &veventq_fd);
2965 test_err_veventq_alloc(EEXIST, viommu_id,
2966 IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
2967 /* Set vdev_id to 0x99, unset it, and set to 0x88 */
2968 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2969 test_cmd_mock_domain_replace(self->stdev_id,
2970 self->nested_hwpt_id);
2971 test_cmd_trigger_vevents(dev_id, 1);
2972 test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
2973 test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
2974 &vdev_id);
2975 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2976 test_ioctl_destroy(vdev_id);
2977
2978 /* Try again with 0x88 */
2979 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
2980 test_cmd_mock_domain_replace(self->stdev_id,
2981 self->nested_hwpt_id);
2982 /* Trigger an overflow with three events */
2983 test_cmd_trigger_vevents(dev_id, 3);
2984 test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
2985 &prev_seq);
2986 /* Overflow must be gone after the previous reads */
2987 test_cmd_trigger_vevents(dev_id, 1);
2988 test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
2989 close(veventq_fd);
2990 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2991 test_ioctl_destroy(vdev_id);
2992 test_ioctl_destroy(veventq_id);
2993 } else {
2994 test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
2995 }
2996 }
2997
TEST_F(iommufd_viommu,vdevice_cache)2998 TEST_F(iommufd_viommu, vdevice_cache)
2999 {
3000 struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
3001 uint32_t viommu_id = self->viommu_id;
3002 uint32_t dev_id = self->device_id;
3003 uint32_t vdev_id = 0;
3004 uint32_t num_inv;
3005
3006 if (!dev_id)
3007 SKIP(return, "Skipping test for variant no_viommu");
3008
3009 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3010
3011 test_cmd_dev_check_cache_all(dev_id, IOMMU_TEST_DEV_CACHE_DEFAULT);
3012
3013 /* Check data_type by passing zero-length array */
3014 num_inv = 0;
3015 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3016 &num_inv);
3017 assert(!num_inv);
3018
3019 /* Negative test: Invalid data_type */
3020 num_inv = 1;
3021 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3022 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
3023 sizeof(*inv_reqs), &num_inv);
3024 assert(!num_inv);
3025
3026 /* Negative test: structure size sanity */
3027 num_inv = 1;
3028 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3029 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3030 sizeof(*inv_reqs) + 1, &num_inv);
3031 assert(!num_inv);
3032
3033 num_inv = 1;
3034 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3035 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 1,
3036 &num_inv);
3037 assert(!num_inv);
3038
3039 /* Negative test: invalid flag is passed */
3040 num_inv = 1;
3041 inv_reqs[0].flags = 0xffffffff;
3042 inv_reqs[0].vdev_id = 0x99;
3043 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3044 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3045 sizeof(*inv_reqs), &num_inv);
3046 assert(!num_inv);
3047
3048 /* Negative test: invalid data_uptr when array is not empty */
3049 num_inv = 1;
3050 inv_reqs[0].flags = 0;
3051 inv_reqs[0].vdev_id = 0x99;
3052 test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
3053 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3054 sizeof(*inv_reqs), &num_inv);
3055 assert(!num_inv);
3056
3057 /* Negative test: invalid entry_len when array is not empty */
3058 num_inv = 1;
3059 inv_reqs[0].flags = 0;
3060 inv_reqs[0].vdev_id = 0x99;
3061 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3062 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 0,
3063 &num_inv);
3064 assert(!num_inv);
3065
3066 /* Negative test: invalid cache_id */
3067 num_inv = 1;
3068 inv_reqs[0].flags = 0;
3069 inv_reqs[0].vdev_id = 0x99;
3070 inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3071 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3072 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3073 sizeof(*inv_reqs), &num_inv);
3074 assert(!num_inv);
3075
3076 /* Negative test: invalid vdev_id */
3077 num_inv = 1;
3078 inv_reqs[0].flags = 0;
3079 inv_reqs[0].vdev_id = 0x9;
3080 inv_reqs[0].cache_id = 0;
3081 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3082 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3083 sizeof(*inv_reqs), &num_inv);
3084 assert(!num_inv);
3085
3086 /*
3087 * Invalidate the 1st cache entry but fail the 2nd request
3088 * due to invalid flags configuration in the 2nd request.
3089 */
3090 num_inv = 2;
3091 inv_reqs[0].flags = 0;
3092 inv_reqs[0].vdev_id = 0x99;
3093 inv_reqs[0].cache_id = 0;
3094 inv_reqs[1].flags = 0xffffffff;
3095 inv_reqs[1].vdev_id = 0x99;
3096 inv_reqs[1].cache_id = 1;
3097 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3098 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3099 sizeof(*inv_reqs), &num_inv);
3100 assert(num_inv == 1);
3101 test_cmd_dev_check_cache(dev_id, 0, 0);
3102 test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3103 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3104 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3105
3106 /*
3107 * Invalidate the 1st cache entry but fail the 2nd request
3108 * due to invalid cache_id configuration in the 2nd request.
3109 */
3110 num_inv = 2;
3111 inv_reqs[0].flags = 0;
3112 inv_reqs[0].vdev_id = 0x99;
3113 inv_reqs[0].cache_id = 0;
3114 inv_reqs[1].flags = 0;
3115 inv_reqs[1].vdev_id = 0x99;
3116 inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3117 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3118 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3119 sizeof(*inv_reqs), &num_inv);
3120 assert(num_inv == 1);
3121 test_cmd_dev_check_cache(dev_id, 0, 0);
3122 test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3123 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3124 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3125
3126 /* Invalidate the 2nd cache entry and verify */
3127 num_inv = 1;
3128 inv_reqs[0].flags = 0;
3129 inv_reqs[0].vdev_id = 0x99;
3130 inv_reqs[0].cache_id = 1;
3131 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3132 &num_inv);
3133 assert(num_inv == 1);
3134 test_cmd_dev_check_cache(dev_id, 0, 0);
3135 test_cmd_dev_check_cache(dev_id, 1, 0);
3136 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3137 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3138
3139 /* Invalidate the 3rd and 4th cache entries and verify */
3140 num_inv = 2;
3141 inv_reqs[0].flags = 0;
3142 inv_reqs[0].vdev_id = 0x99;
3143 inv_reqs[0].cache_id = 2;
3144 inv_reqs[1].flags = 0;
3145 inv_reqs[1].vdev_id = 0x99;
3146 inv_reqs[1].cache_id = 3;
3147 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3148 &num_inv);
3149 assert(num_inv == 2);
3150 test_cmd_dev_check_cache_all(dev_id, 0);
3151
3152 /* Invalidate all cache entries for nested_dev_id[1] and verify */
3153 num_inv = 1;
3154 inv_reqs[0].vdev_id = 0x99;
3155 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
3156 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3157 &num_inv);
3158 assert(num_inv == 1);
3159 test_cmd_dev_check_cache_all(dev_id, 0);
3160 test_ioctl_destroy(vdev_id);
3161 }
3162
TEST_F(iommufd_viommu,hw_queue)3163 TEST_F(iommufd_viommu, hw_queue)
3164 {
3165 __u64 iova = MOCK_APERTURE_START, iova2;
3166 uint32_t viommu_id = self->viommu_id;
3167 uint32_t hw_queue_id[2];
3168
3169 if (!viommu_id)
3170 SKIP(return, "Skipping test for variant no_viommu");
3171
3172 /* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
3173 test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
3174 IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
3175 &hw_queue_id[0]);
3176 /* Fail queue addr and length */
3177 test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3178 0, iova, 0, &hw_queue_id[0]);
3179 test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
3180 IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
3181 PAGE_SIZE, &hw_queue_id[0]);
3182 /* Fail missing iova */
3183 test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3184 0, iova, PAGE_SIZE, &hw_queue_id[0]);
3185
3186 /* Map iova */
3187 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
3188 test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
3189
3190 /* Fail index=1 and =MAX; must start from index=0 */
3191 test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3192 iova, PAGE_SIZE, &hw_queue_id[0]);
3193 test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3194 IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
3195 &hw_queue_id[0]);
3196
3197 /* Allocate index=0, declare ownership of the iova */
3198 test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
3199 iova, PAGE_SIZE, &hw_queue_id[0]);
3200 /* Fail duplicated index */
3201 test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3202 0, iova, PAGE_SIZE, &hw_queue_id[0]);
3203 /* Fail unmap, due to iova ownership */
3204 test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
3205 /* The 2nd page is not pinned, so it can be unmmap */
3206 test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
3207
3208 /* Allocate index=1, with an unaligned case */
3209 test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3210 iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
3211 &hw_queue_id[1]);
3212 /* Fail to destroy, due to dependency */
3213 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
3214
3215 /* Destroy in descending order */
3216 test_ioctl_destroy(hw_queue_id[1]);
3217 test_ioctl_destroy(hw_queue_id[0]);
3218 /* Now it can unmap the first page */
3219 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
3220 }
3221
TEST_F(iommufd_viommu,vdevice_tombstone)3222 TEST_F(iommufd_viommu, vdevice_tombstone)
3223 {
3224 uint32_t viommu_id = self->viommu_id;
3225 uint32_t dev_id = self->device_id;
3226 uint32_t vdev_id = 0;
3227
3228 if (!dev_id)
3229 SKIP(return, "Skipping test for variant no_viommu");
3230
3231 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3232 test_ioctl_destroy(self->stdev_id);
3233 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, vdev_id));
3234 }
3235
FIXTURE(iommufd_device_pasid)3236 FIXTURE(iommufd_device_pasid)
3237 {
3238 int fd;
3239 uint32_t ioas_id;
3240 uint32_t hwpt_id;
3241 uint32_t stdev_id;
3242 uint32_t device_id;
3243 uint32_t no_pasid_stdev_id;
3244 uint32_t no_pasid_device_id;
3245 };
3246
FIXTURE_VARIANT(iommufd_device_pasid)3247 FIXTURE_VARIANT(iommufd_device_pasid)
3248 {
3249 bool pasid_capable;
3250 };
3251
FIXTURE_SETUP(iommufd_device_pasid)3252 FIXTURE_SETUP(iommufd_device_pasid)
3253 {
3254 self->fd = open("/dev/iommu", O_RDWR);
3255 ASSERT_NE(-1, self->fd);
3256 test_ioctl_ioas_alloc(&self->ioas_id);
3257
3258 test_cmd_mock_domain_flags(self->ioas_id,
3259 MOCK_FLAGS_DEVICE_PASID,
3260 &self->stdev_id, &self->hwpt_id,
3261 &self->device_id);
3262 if (!variant->pasid_capable)
3263 test_cmd_mock_domain_flags(self->ioas_id, 0,
3264 &self->no_pasid_stdev_id, NULL,
3265 &self->no_pasid_device_id);
3266 }
3267
FIXTURE_TEARDOWN(iommufd_device_pasid)3268 FIXTURE_TEARDOWN(iommufd_device_pasid)
3269 {
3270 teardown_iommufd(self->fd, _metadata);
3271 }
3272
FIXTURE_VARIANT_ADD(iommufd_device_pasid,no_pasid)3273 FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
3274 {
3275 .pasid_capable = false,
3276 };
3277
FIXTURE_VARIANT_ADD(iommufd_device_pasid,has_pasid)3278 FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
3279 {
3280 .pasid_capable = true,
3281 };
3282
TEST_F(iommufd_device_pasid,pasid_attach)3283 TEST_F(iommufd_device_pasid, pasid_attach)
3284 {
3285 struct iommu_hwpt_selftest data = {
3286 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
3287 };
3288 uint32_t nested_hwpt_id[3] = {};
3289 uint32_t parent_hwpt_id = 0;
3290 uint32_t fault_id, fault_fd;
3291 uint32_t s2_hwpt_id = 0;
3292 uint32_t iopf_hwpt_id;
3293 uint32_t pasid = 100;
3294 uint32_t viommu_id;
3295
3296 /*
3297 * Negative, detach pasid without attaching, this is not expected.
3298 * But it should not result in failure anyway.
3299 */
3300 test_cmd_pasid_detach(pasid);
3301
3302 /* Allocate two nested hwpts sharing one common parent hwpt */
3303 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3304 IOMMU_HWPT_ALLOC_NEST_PARENT,
3305 &parent_hwpt_id);
3306 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3307 IOMMU_HWPT_ALLOC_PASID,
3308 &nested_hwpt_id[0],
3309 IOMMU_HWPT_DATA_SELFTEST,
3310 &data, sizeof(data));
3311 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3312 IOMMU_HWPT_ALLOC_PASID,
3313 &nested_hwpt_id[1],
3314 IOMMU_HWPT_DATA_SELFTEST,
3315 &data, sizeof(data));
3316
3317 /* Fault related preparation */
3318 test_ioctl_fault_alloc(&fault_id, &fault_fd);
3319 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
3320 IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
3321 &iopf_hwpt_id,
3322 IOMMU_HWPT_DATA_SELFTEST, &data,
3323 sizeof(data));
3324
3325 /* Allocate a regular nested hwpt based on viommu */
3326 test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
3327 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0, &viommu_id);
3328 test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
3329 IOMMU_HWPT_ALLOC_PASID,
3330 &nested_hwpt_id[2],
3331 IOMMU_HWPT_DATA_SELFTEST, &data,
3332 sizeof(data));
3333
3334 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3335 IOMMU_HWPT_ALLOC_PASID,
3336 &s2_hwpt_id);
3337
3338 /* Attach RID to non-pasid compat domain, */
3339 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
3340 /* then attach to pasid should fail */
3341 test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
3342
3343 /* Attach RID to pasid compat domain, */
3344 test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
3345 /* then attach to pasid should succeed, */
3346 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3347 /* but attach RID to non-pasid compat domain should fail now. */
3348 test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
3349 /*
3350 * Detach hwpt from pasid 100, and check if the pasid 100
3351 * has null domain.
3352 */
3353 test_cmd_pasid_detach(pasid);
3354 ASSERT_EQ(0,
3355 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3356 pasid, 0));
3357 /* RID is attached to pasid-comapt domain, pasid path is not used */
3358
3359 if (!variant->pasid_capable) {
3360 /*
3361 * PASID-compatible domain can be used by non-PASID-capable
3362 * device.
3363 */
3364 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
3365 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
3366 /*
3367 * Attach hwpt to pasid 100 of non-PASID-capable device,
3368 * should fail, no matter domain is pasid-comapt or not.
3369 */
3370 EXPECT_ERRNO(EINVAL,
3371 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3372 pasid, parent_hwpt_id));
3373 EXPECT_ERRNO(EINVAL,
3374 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3375 pasid, s2_hwpt_id));
3376 }
3377
3378 /*
3379 * Attach non pasid compat hwpt to pasid-capable device, should
3380 * fail, and have null domain.
3381 */
3382 test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
3383 ASSERT_EQ(0,
3384 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3385 pasid, 0));
3386
3387 /*
3388 * Attach ioas to pasid 100, should fail, domain should
3389 * be null.
3390 */
3391 test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
3392 ASSERT_EQ(0,
3393 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3394 pasid, 0));
3395
3396 /*
3397 * Attach the s2_hwpt to pasid 100, should succeed, domain should
3398 * be valid.
3399 */
3400 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3401 ASSERT_EQ(0,
3402 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3403 pasid, s2_hwpt_id));
3404
3405 /*
3406 * Try attach pasid 100 with another hwpt, should FAIL
3407 * as attach does not allow overwrite, use REPLACE instead.
3408 */
3409 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3410
3411 /*
3412 * Detach hwpt from pasid 100 for next test, should succeed,
3413 * and have null domain.
3414 */
3415 test_cmd_pasid_detach(pasid);
3416 ASSERT_EQ(0,
3417 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3418 pasid, 0));
3419
3420 /*
3421 * Attach nested hwpt to pasid 100, should succeed, domain
3422 * should be valid.
3423 */
3424 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3425 ASSERT_EQ(0,
3426 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3427 pasid, nested_hwpt_id[0]));
3428
3429 /* Attach to pasid 100 which has been attached, should fail. */
3430 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3431
3432 /* cleanup pasid 100 */
3433 test_cmd_pasid_detach(pasid);
3434
3435 /* Replace tests */
3436
3437 pasid = 200;
3438 /*
3439 * Replace pasid 200 without attaching it, should fail
3440 * with -EINVAL.
3441 */
3442 test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
3443
3444 /*
3445 * Attach the s2 hwpt to pasid 200, should succeed, domain should
3446 * be valid.
3447 */
3448 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3449 ASSERT_EQ(0,
3450 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3451 pasid, s2_hwpt_id));
3452
3453 /*
3454 * Replace pasid 200 with self->ioas_id, should fail
3455 * and domain should be the prior s2 hwpt.
3456 */
3457 test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
3458 ASSERT_EQ(0,
3459 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3460 pasid, s2_hwpt_id));
3461
3462 /*
3463 * Replace a nested hwpt for pasid 200, should succeed,
3464 * and have valid domain.
3465 */
3466 test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
3467 ASSERT_EQ(0,
3468 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3469 pasid, nested_hwpt_id[0]));
3470
3471 /*
3472 * Replace with another nested hwpt for pasid 200, should
3473 * succeed, and have valid domain.
3474 */
3475 test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
3476 ASSERT_EQ(0,
3477 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3478 pasid, nested_hwpt_id[1]));
3479
3480 /* cleanup pasid 200 */
3481 test_cmd_pasid_detach(pasid);
3482
3483 /* Negative Tests for pasid replace, use pasid 1024 */
3484
3485 /*
3486 * Attach the s2 hwpt to pasid 1024, should succeed, domain should
3487 * be valid.
3488 */
3489 pasid = 1024;
3490 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3491 ASSERT_EQ(0,
3492 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3493 pasid, s2_hwpt_id));
3494
3495 /*
3496 * Replace pasid 1024 with nested_hwpt_id[0], should fail,
3497 * but have the old valid domain. This is a designed
3498 * negative case. Normally, this shall succeed.
3499 */
3500 test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
3501 ASSERT_EQ(0,
3502 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3503 pasid, s2_hwpt_id));
3504
3505 /* cleanup pasid 1024 */
3506 test_cmd_pasid_detach(pasid);
3507
3508 /* Attach to iopf-capable hwpt */
3509
3510 /*
3511 * Attach an iopf hwpt to pasid 2048, should succeed, domain should
3512 * be valid.
3513 */
3514 pasid = 2048;
3515 test_cmd_pasid_attach(pasid, iopf_hwpt_id);
3516 ASSERT_EQ(0,
3517 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3518 pasid, iopf_hwpt_id));
3519
3520 test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
3521
3522 /*
3523 * Replace with s2_hwpt_id for pasid 2048, should
3524 * succeed, and have valid domain.
3525 */
3526 test_cmd_pasid_replace(pasid, s2_hwpt_id);
3527 ASSERT_EQ(0,
3528 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3529 pasid, s2_hwpt_id));
3530
3531 /* cleanup pasid 2048 */
3532 test_cmd_pasid_detach(pasid);
3533
3534 test_ioctl_destroy(iopf_hwpt_id);
3535 close(fault_fd);
3536 test_ioctl_destroy(fault_id);
3537
3538 /* Detach the s2_hwpt_id from RID */
3539 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3540 }
3541
3542 TEST_HARNESS_MAIN
3543