1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3 *
4 * These tests are "kernel integrity" tests. They are looking for kernel
5 * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6 * features. It does not attempt to verify that the system calls are doing what
7 * they are supposed to do.
8 *
9 * The basic philosophy is to run a sequence of calls that will succeed and then
10 * sweep every failure injection point on that call chain to look for
11 * interesting things in error handling.
12 *
13 * This test is best run with:
14 * echo 1 > /proc/sys/kernel/panic_on_warn
15 * If something is actually going wrong.
16 */
17 #include <fcntl.h>
18 #include <dirent.h>
19
20 #define __EXPORTED_HEADERS__
21 #include <linux/vfio.h>
22
23 #include "iommufd_utils.h"
24
25 static bool have_fault_injection;
26
writeat(int dfd,const char * fn,const char * val)27 static int writeat(int dfd, const char *fn, const char *val)
28 {
29 size_t val_len = strlen(val);
30 ssize_t res;
31 int fd;
32
33 fd = openat(dfd, fn, O_WRONLY);
34 if (fd == -1)
35 return -1;
36 res = write(fd, val, val_len);
37 assert(res == val_len);
38 close(fd);
39 return 0;
40 }
41
setup_buffer(void)42 static __attribute__((constructor)) void setup_buffer(void)
43 {
44 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
45
46 BUFFER_SIZE = 2*1024*1024;
47
48 buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
50
51 mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
52 &mfd);
53 }
54
55 /*
56 * This sets up fail_injection in a way that is useful for this test.
57 * It does not attempt to restore things back to how they were.
58 */
setup_fault_injection(void)59 static __attribute__((constructor)) void setup_fault_injection(void)
60 {
61 DIR *debugfs = opendir("/sys/kernel/debug/");
62 struct dirent *dent;
63
64 if (!debugfs)
65 return;
66
67 /* Allow any allocation call to be fault injected */
68 if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
69 return;
70 writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
71 writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
72
73 while ((dent = readdir(debugfs))) {
74 char fn[300];
75
76 if (strncmp(dent->d_name, "fail", 4) != 0)
77 continue;
78
79 /* We are looking for kernel splats, quiet down the log */
80 snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
81 writeat(dirfd(debugfs), fn, "0");
82 }
83 closedir(debugfs);
84 have_fault_injection = true;
85 }
86
87 struct fail_nth_state {
88 int proc_fd;
89 unsigned int iteration;
90 };
91
fail_nth_first(struct __test_metadata * _metadata,struct fail_nth_state * nth_state)92 static void fail_nth_first(struct __test_metadata *_metadata,
93 struct fail_nth_state *nth_state)
94 {
95 char buf[300];
96
97 snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
98 nth_state->proc_fd = open(buf, O_RDWR);
99 ASSERT_NE(-1, nth_state->proc_fd);
100 }
101
fail_nth_next(struct __test_metadata * _metadata,struct fail_nth_state * nth_state,int test_result)102 static bool fail_nth_next(struct __test_metadata *_metadata,
103 struct fail_nth_state *nth_state,
104 int test_result)
105 {
106 static const char disable_nth[] = "0";
107 char buf[300];
108
109 /*
110 * This is just an arbitrary limit based on the current kernel
111 * situation. Changes in the kernel can dramatically change the number of
112 * required fault injection sites, so if this hits it doesn't
113 * necessarily mean a test failure, just that the limit has to be made
114 * bigger.
115 */
116 ASSERT_GT(400, nth_state->iteration);
117 if (nth_state->iteration != 0) {
118 ssize_t res;
119 ssize_t res2;
120
121 buf[0] = 0;
122 /*
123 * Annoyingly disabling the nth can also fail. This means
124 * the test passed without triggering failure
125 */
126 res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
127 if (res == -1 && errno == EFAULT) {
128 buf[0] = '1';
129 buf[1] = '\n';
130 res = 2;
131 }
132
133 res2 = pwrite(nth_state->proc_fd, disable_nth,
134 ARRAY_SIZE(disable_nth) - 1, 0);
135 if (res2 == -1 && errno == EFAULT) {
136 res2 = pwrite(nth_state->proc_fd, disable_nth,
137 ARRAY_SIZE(disable_nth) - 1, 0);
138 buf[0] = '1';
139 buf[1] = '\n';
140 }
141 ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
142
143 /* printf(" nth %u result=%d nth=%u\n", nth_state->iteration,
144 test_result, atoi(buf)); */
145 fflush(stdout);
146 ASSERT_LT(1, res);
147 if (res != 2 || buf[0] != '0' || buf[1] != '\n')
148 return false;
149 } else {
150 /* printf(" nth %u result=%d\n", nth_state->iteration,
151 test_result); */
152 }
153 nth_state->iteration++;
154 return true;
155 }
156
157 /*
158 * This is called during the test to start failure injection. It allows the test
159 * to do some setup that has already been swept and thus reduce the required
160 * iterations.
161 */
__fail_nth_enable(struct __test_metadata * _metadata,struct fail_nth_state * nth_state)162 void __fail_nth_enable(struct __test_metadata *_metadata,
163 struct fail_nth_state *nth_state)
164 {
165 char buf[300];
166 size_t len;
167
168 if (!nth_state->iteration)
169 return;
170
171 len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
172 ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
173 }
174 #define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
175
176 #define TEST_FAIL_NTH(fixture_name, name) \
177 static int test_nth_##name(struct __test_metadata *_metadata, \
178 FIXTURE_DATA(fixture_name) *self, \
179 const FIXTURE_VARIANT(fixture_name) \
180 *variant, \
181 struct fail_nth_state *_nth_state); \
182 TEST_F(fixture_name, name) \
183 { \
184 struct fail_nth_state nth_state = {}; \
185 int test_result = 0; \
186 \
187 if (!have_fault_injection) \
188 SKIP(return, \
189 "fault injection is not enabled in the kernel"); \
190 fail_nth_first(_metadata, &nth_state); \
191 ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \
192 &nth_state)); \
193 while (fail_nth_next(_metadata, &nth_state, test_result)) { \
194 fixture_name##_teardown(_metadata, self, variant); \
195 fixture_name##_setup(_metadata, self, variant); \
196 test_result = test_nth_##name(_metadata, self, \
197 variant, &nth_state); \
198 }; \
199 ASSERT_EQ(0, test_result); \
200 } \
201 static int test_nth_##name( \
202 struct __test_metadata __attribute__((unused)) *_metadata, \
203 FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
204 const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \
205 *variant, \
206 struct fail_nth_state *_nth_state)
207
FIXTURE(basic_fail_nth)208 FIXTURE(basic_fail_nth)
209 {
210 int fd;
211 uint32_t access_id;
212 };
213
FIXTURE_SETUP(basic_fail_nth)214 FIXTURE_SETUP(basic_fail_nth)
215 {
216 self->fd = -1;
217 self->access_id = 0;
218 }
219
FIXTURE_TEARDOWN(basic_fail_nth)220 FIXTURE_TEARDOWN(basic_fail_nth)
221 {
222 int rc;
223
224 if (self->access_id) {
225 /* The access FD holds the iommufd open until it closes */
226 rc = _test_cmd_destroy_access(self->access_id);
227 assert(rc == 0);
228 }
229 teardown_iommufd(self->fd, _metadata);
230 }
231
232 /* Cover ioas.c */
TEST_FAIL_NTH(basic_fail_nth,basic)233 TEST_FAIL_NTH(basic_fail_nth, basic)
234 {
235 struct iommu_iova_range ranges[10];
236 uint32_t ioas_id;
237 __u64 iova;
238
239 fail_nth_enable();
240
241 self->fd = open("/dev/iommu", O_RDWR);
242 if (self->fd == -1)
243 return -1;
244
245 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
246 return -1;
247
248 {
249 struct iommu_ioas_iova_ranges ranges_cmd = {
250 .size = sizeof(ranges_cmd),
251 .num_iovas = ARRAY_SIZE(ranges),
252 .ioas_id = ioas_id,
253 .allowed_iovas = (uintptr_t)ranges,
254 };
255 if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
256 return -1;
257 }
258
259 {
260 struct iommu_ioas_allow_iovas allow_cmd = {
261 .size = sizeof(allow_cmd),
262 .ioas_id = ioas_id,
263 .num_iovas = 1,
264 .allowed_iovas = (uintptr_t)ranges,
265 };
266
267 ranges[0].start = 16*1024;
268 ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
269 if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
270 return -1;
271 }
272
273 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
274 IOMMU_IOAS_MAP_WRITEABLE |
275 IOMMU_IOAS_MAP_READABLE))
276 return -1;
277
278 {
279 struct iommu_ioas_copy copy_cmd = {
280 .size = sizeof(copy_cmd),
281 .flags = IOMMU_IOAS_MAP_WRITEABLE |
282 IOMMU_IOAS_MAP_READABLE,
283 .dst_ioas_id = ioas_id,
284 .src_ioas_id = ioas_id,
285 .src_iova = iova,
286 .length = sizeof(ranges),
287 };
288
289 if (ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd))
290 return -1;
291 }
292
293 if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
294 NULL))
295 return -1;
296 /* Failure path of no IOVA to unmap */
297 _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
298 return 0;
299 }
300
301 /* iopt_area_fill_domains() and iopt_area_fill_domain() */
TEST_FAIL_NTH(basic_fail_nth,map_domain)302 TEST_FAIL_NTH(basic_fail_nth, map_domain)
303 {
304 uint32_t ioas_id;
305 __u32 stdev_id;
306 __u32 hwpt_id;
307 __u64 iova;
308
309 self->fd = open("/dev/iommu", O_RDWR);
310 if (self->fd == -1)
311 return -1;
312
313 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
314 return -1;
315
316 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
317 return -1;
318
319 fail_nth_enable();
320
321 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
322 return -1;
323
324 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
325 IOMMU_IOAS_MAP_WRITEABLE |
326 IOMMU_IOAS_MAP_READABLE))
327 return -1;
328
329 if (_test_ioctl_destroy(self->fd, stdev_id))
330 return -1;
331
332 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
333 return -1;
334 return 0;
335 }
336
337 /* iopt_area_fill_domains() and iopt_area_fill_domain() */
TEST_FAIL_NTH(basic_fail_nth,map_file_domain)338 TEST_FAIL_NTH(basic_fail_nth, map_file_domain)
339 {
340 uint32_t ioas_id;
341 __u32 stdev_id;
342 __u32 hwpt_id;
343 __u64 iova;
344
345 self->fd = open("/dev/iommu", O_RDWR);
346 if (self->fd == -1)
347 return -1;
348
349 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
350 return -1;
351
352 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
353 return -1;
354
355 fail_nth_enable();
356
357 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
358 return -1;
359
360 if (_test_ioctl_ioas_map_file(self->fd, ioas_id, mfd, 0, 262144, &iova,
361 IOMMU_IOAS_MAP_WRITEABLE |
362 IOMMU_IOAS_MAP_READABLE))
363 return -1;
364
365 if (_test_ioctl_destroy(self->fd, stdev_id))
366 return -1;
367
368 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
369 return -1;
370 return 0;
371 }
372
TEST_FAIL_NTH(basic_fail_nth,map_two_domains)373 TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
374 {
375 uint32_t ioas_id;
376 __u32 stdev_id2;
377 __u32 stdev_id;
378 __u32 hwpt_id2;
379 __u32 hwpt_id;
380 __u64 iova;
381
382 self->fd = open("/dev/iommu", O_RDWR);
383 if (self->fd == -1)
384 return -1;
385
386 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
387 return -1;
388
389 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
390 return -1;
391
392 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
393 return -1;
394
395 fail_nth_enable();
396
397 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
398 NULL))
399 return -1;
400
401 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
402 IOMMU_IOAS_MAP_WRITEABLE |
403 IOMMU_IOAS_MAP_READABLE))
404 return -1;
405
406 if (_test_ioctl_destroy(self->fd, stdev_id))
407 return -1;
408
409 if (_test_ioctl_destroy(self->fd, stdev_id2))
410 return -1;
411
412 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
413 return -1;
414 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
415 NULL))
416 return -1;
417 return 0;
418 }
419
TEST_FAIL_NTH(basic_fail_nth,access_rw)420 TEST_FAIL_NTH(basic_fail_nth, access_rw)
421 {
422 uint64_t tmp_big[4096];
423 uint32_t ioas_id;
424 uint16_t tmp[32];
425 __u64 iova;
426
427 self->fd = open("/dev/iommu", O_RDWR);
428 if (self->fd == -1)
429 return -1;
430
431 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
432 return -1;
433
434 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
435 return -1;
436
437 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
438 IOMMU_IOAS_MAP_WRITEABLE |
439 IOMMU_IOAS_MAP_READABLE))
440 return -1;
441
442 fail_nth_enable();
443
444 if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
445 return -1;
446
447 {
448 struct iommu_test_cmd access_cmd = {
449 .size = sizeof(access_cmd),
450 .op = IOMMU_TEST_OP_ACCESS_RW,
451 .id = self->access_id,
452 .access_rw = { .iova = iova,
453 .length = sizeof(tmp),
454 .uptr = (uintptr_t)tmp },
455 };
456
457 // READ
458 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
459 &access_cmd))
460 return -1;
461
462 access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
463 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
464 &access_cmd))
465 return -1;
466
467 access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
468 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
469 &access_cmd))
470 return -1;
471 access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
472 MOCK_ACCESS_RW_WRITE;
473 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
474 &access_cmd))
475 return -1;
476 }
477
478 {
479 struct iommu_test_cmd access_cmd = {
480 .size = sizeof(access_cmd),
481 .op = IOMMU_TEST_OP_ACCESS_RW,
482 .id = self->access_id,
483 .access_rw = { .iova = iova,
484 .flags = MOCK_ACCESS_RW_SLOW_PATH,
485 .length = sizeof(tmp_big),
486 .uptr = (uintptr_t)tmp_big },
487 };
488
489 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
490 &access_cmd))
491 return -1;
492 }
493 if (_test_cmd_destroy_access(self->access_id))
494 return -1;
495 self->access_id = 0;
496 return 0;
497 }
498
499 /* pages.c access functions */
TEST_FAIL_NTH(basic_fail_nth,access_pin)500 TEST_FAIL_NTH(basic_fail_nth, access_pin)
501 {
502 uint32_t access_pages_id;
503 uint32_t ioas_id;
504 __u64 iova;
505
506 self->fd = open("/dev/iommu", O_RDWR);
507 if (self->fd == -1)
508 return -1;
509
510 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
511 return -1;
512
513 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
514 return -1;
515
516 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
517 IOMMU_IOAS_MAP_WRITEABLE |
518 IOMMU_IOAS_MAP_READABLE))
519 return -1;
520
521 if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
522 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
523 return -1;
524
525 fail_nth_enable();
526
527 {
528 struct iommu_test_cmd access_cmd = {
529 .size = sizeof(access_cmd),
530 .op = IOMMU_TEST_OP_ACCESS_PAGES,
531 .id = self->access_id,
532 .access_pages = { .iova = iova,
533 .length = BUFFER_SIZE,
534 .uptr = (uintptr_t)buffer },
535 };
536
537 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
538 &access_cmd))
539 return -1;
540 access_pages_id = access_cmd.access_pages.out_access_pages_id;
541 }
542
543 if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
544 access_pages_id))
545 return -1;
546
547 if (_test_cmd_destroy_access(self->access_id))
548 return -1;
549 self->access_id = 0;
550 return 0;
551 }
552
553 /* iopt_pages_fill_xarray() */
TEST_FAIL_NTH(basic_fail_nth,access_pin_domain)554 TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
555 {
556 uint32_t access_pages_id;
557 uint32_t ioas_id;
558 __u32 stdev_id;
559 __u32 hwpt_id;
560 __u64 iova;
561
562 self->fd = open("/dev/iommu", O_RDWR);
563 if (self->fd == -1)
564 return -1;
565
566 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
567 return -1;
568
569 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
570 return -1;
571
572 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
573 return -1;
574
575 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
576 IOMMU_IOAS_MAP_WRITEABLE |
577 IOMMU_IOAS_MAP_READABLE))
578 return -1;
579
580 if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
581 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
582 return -1;
583
584 fail_nth_enable();
585
586 {
587 struct iommu_test_cmd access_cmd = {
588 .size = sizeof(access_cmd),
589 .op = IOMMU_TEST_OP_ACCESS_PAGES,
590 .id = self->access_id,
591 .access_pages = { .iova = iova,
592 .length = BUFFER_SIZE,
593 .uptr = (uintptr_t)buffer },
594 };
595
596 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
597 &access_cmd))
598 return -1;
599 access_pages_id = access_cmd.access_pages.out_access_pages_id;
600 }
601
602 if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
603 access_pages_id))
604 return -1;
605
606 if (_test_cmd_destroy_access(self->access_id))
607 return -1;
608 self->access_id = 0;
609
610 if (_test_ioctl_destroy(self->fd, stdev_id))
611 return -1;
612 return 0;
613 }
614
615 /* device.c */
TEST_FAIL_NTH(basic_fail_nth,device)616 TEST_FAIL_NTH(basic_fail_nth, device)
617 {
618 struct iommu_hwpt_selftest data = {
619 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
620 };
621 struct iommu_test_hw_info info;
622 uint32_t fault_id, fault_fd;
623 uint32_t fault_hwpt_id;
624 uint32_t ioas_id;
625 uint32_t ioas_id2;
626 uint32_t stdev_id;
627 uint32_t idev_id;
628 uint32_t hwpt_id;
629 uint32_t viommu_id;
630 uint32_t vdev_id;
631 __u64 iova;
632
633 self->fd = open("/dev/iommu", O_RDWR);
634 if (self->fd == -1)
635 return -1;
636
637 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
638 return -1;
639
640 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
641 return -1;
642
643 iova = MOCK_APERTURE_START;
644 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
645 IOMMU_IOAS_MAP_FIXED_IOVA |
646 IOMMU_IOAS_MAP_WRITEABLE |
647 IOMMU_IOAS_MAP_READABLE))
648 return -1;
649 if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
650 IOMMU_IOAS_MAP_FIXED_IOVA |
651 IOMMU_IOAS_MAP_WRITEABLE |
652 IOMMU_IOAS_MAP_READABLE))
653 return -1;
654
655 fail_nth_enable();
656
657 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, NULL,
658 &idev_id))
659 return -1;
660
661 if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL))
662 return -1;
663
664 if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id,
665 IOMMU_HWPT_DATA_NONE, 0, 0))
666 return -1;
667
668 if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL))
669 return -1;
670
671 if (_test_cmd_mock_domain_replace(self->fd, stdev_id, hwpt_id, NULL))
672 return -1;
673
674 if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
675 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id,
676 IOMMU_HWPT_DATA_NONE, 0, 0))
677 return -1;
678
679 if (_test_cmd_viommu_alloc(self->fd, idev_id, hwpt_id,
680 IOMMU_VIOMMU_TYPE_SELFTEST, 0, &viommu_id))
681 return -1;
682
683 if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
684 return -1;
685
686 if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
687 return -1;
688 close(fault_fd);
689
690 if (_test_cmd_hwpt_alloc(self->fd, idev_id, hwpt_id, fault_id,
691 IOMMU_HWPT_FAULT_ID_VALID, &fault_hwpt_id,
692 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data)))
693 return -1;
694
695 return 0;
696 }
697
698 TEST_HARNESS_MAIN
699