xref: /linux/tools/testing/selftests/iommu/iommufd.c (revision 056daec2925dc200b22c30419bc7b9e01f7843c4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8 
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11 
12 #include "iommufd_utils.h"
13 
14 static unsigned long HUGEPAGE_SIZE;
15 
get_huge_page_size(void)16 static unsigned long get_huge_page_size(void)
17 {
18 	char buf[80];
19 	int ret;
20 	int fd;
21 
22 	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
23 		  O_RDONLY);
24 	if (fd < 0)
25 		return 2 * 1024 * 1024;
26 
27 	ret = read(fd, buf, sizeof(buf));
28 	close(fd);
29 	if (ret <= 0 || ret == sizeof(buf))
30 		return 2 * 1024 * 1024;
31 	buf[ret] = 0;
32 	return strtoul(buf, NULL, 10);
33 }
34 
setup_sizes(void)35 static __attribute__((constructor)) void setup_sizes(void)
36 {
37 	void *vrc;
38 	int rc;
39 
40 	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 	HUGEPAGE_SIZE = get_huge_page_size();
42 
43 	BUFFER_SIZE = PAGE_SIZE * 16;
44 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
45 	assert(!rc);
46 	assert(buffer);
47 	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 	assert(vrc == buffer);
51 
52 	mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
53 				&mfd);
54 	assert(mfd_buffer != MAP_FAILED);
55 	assert(mfd > 0);
56 }
57 
FIXTURE(iommufd)58 FIXTURE(iommufd)
59 {
60 	int fd;
61 };
62 
FIXTURE_SETUP(iommufd)63 FIXTURE_SETUP(iommufd)
64 {
65 	self->fd = open("/dev/iommu", O_RDWR);
66 	ASSERT_NE(-1, self->fd);
67 }
68 
FIXTURE_TEARDOWN(iommufd)69 FIXTURE_TEARDOWN(iommufd)
70 {
71 	teardown_iommufd(self->fd, _metadata);
72 }
73 
TEST_F(iommufd,simple_close)74 TEST_F(iommufd, simple_close)
75 {
76 }
77 
TEST_F(iommufd,cmd_fail)78 TEST_F(iommufd, cmd_fail)
79 {
80 	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
81 
82 	/* object id is invalid */
83 	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
84 	/* Bad pointer */
85 	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
86 	/* Unknown ioctl */
87 	EXPECT_ERRNO(ENOTTY,
88 		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
89 			   &cmd));
90 }
91 
TEST_F(iommufd,cmd_length)92 TEST_F(iommufd, cmd_length)
93 {
94 #define TEST_LENGTH(_struct, _ioctl, _last)                              \
95 	{                                                                \
96 		size_t min_size = offsetofend(struct _struct, _last);    \
97 		struct {                                                 \
98 			struct _struct cmd;                              \
99 			uint8_t extra;                                   \
100 		} cmd = { .cmd = { .size = min_size - 1 },               \
101 			  .extra = UINT8_MAX };                          \
102 		int old_errno;                                           \
103 		int rc;                                                  \
104 									 \
105 		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
106 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
107 		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
108 		cmd.cmd.size = sizeof(struct _struct);                   \
109 		rc = ioctl(self->fd, _ioctl, &cmd);                      \
110 		old_errno = errno;                                       \
111 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
112 		cmd.extra = 0;                                           \
113 		if (rc) {                                                \
114 			EXPECT_ERRNO(old_errno,                          \
115 				     ioctl(self->fd, _ioctl, &cmd));     \
116 		} else {                                                 \
117 			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
118 		}                                                        \
119 	}
120 
121 	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
122 	TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
123 	TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
124 	TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
125 	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
126 	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
127 		    out_iova_alignment);
128 	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
129 		    allowed_iovas);
130 	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
131 	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
132 	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
133 	TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
134 	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
135 	TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
136 	TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
137 	TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
138 	TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
139 		    __reserved);
140 #undef TEST_LENGTH
141 }
142 
TEST_F(iommufd,cmd_ex_fail)143 TEST_F(iommufd, cmd_ex_fail)
144 {
145 	struct {
146 		struct iommu_destroy cmd;
147 		__u64 future;
148 	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
149 
150 	/* object id is invalid and command is longer */
151 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
152 	/* future area is non-zero */
153 	cmd.future = 1;
154 	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
155 	/* Original command "works" */
156 	cmd.cmd.size = sizeof(cmd.cmd);
157 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
158 	/* Short command fails */
159 	cmd.cmd.size = sizeof(cmd.cmd) - 1;
160 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
161 }
162 
TEST_F(iommufd,global_options)163 TEST_F(iommufd, global_options)
164 {
165 	struct iommu_option cmd = {
166 		.size = sizeof(cmd),
167 		.option_id = IOMMU_OPTION_RLIMIT_MODE,
168 		.op = IOMMU_OPTION_OP_GET,
169 		.val64 = 1,
170 	};
171 
172 	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
173 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
174 	ASSERT_EQ(0, cmd.val64);
175 
176 	/* This requires root */
177 	cmd.op = IOMMU_OPTION_OP_SET;
178 	cmd.val64 = 1;
179 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
180 	cmd.val64 = 2;
181 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
182 
183 	cmd.op = IOMMU_OPTION_OP_GET;
184 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
185 	ASSERT_EQ(1, cmd.val64);
186 
187 	cmd.op = IOMMU_OPTION_OP_SET;
188 	cmd.val64 = 0;
189 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
190 
191 	cmd.op = IOMMU_OPTION_OP_GET;
192 	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
193 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
194 	cmd.op = IOMMU_OPTION_OP_SET;
195 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
196 }
197 
drop_cap_ipc_lock(struct __test_metadata * _metadata)198 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
199 {
200 	cap_t caps;
201 	cap_value_t cap_list[1] = { CAP_IPC_LOCK };
202 
203 	caps = cap_get_proc();
204 	ASSERT_NE(caps, NULL);
205 	ASSERT_NE(-1,
206 		  cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
207 	ASSERT_NE(-1, cap_set_proc(caps));
208 	cap_free(caps);
209 }
210 
get_proc_status_value(pid_t pid,const char * var)211 static long get_proc_status_value(pid_t pid, const char *var)
212 {
213 	FILE *fp;
214 	char buf[80], tag[80];
215 	long val = -1;
216 
217 	snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
218 	fp = fopen(buf, "r");
219 	if (!fp)
220 		return val;
221 
222 	while (fgets(buf, sizeof(buf), fp))
223 		if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
224 			break;
225 
226 	fclose(fp);
227 	return val;
228 }
229 
get_vm_pinned(pid_t pid)230 static long get_vm_pinned(pid_t pid)
231 {
232 	return get_proc_status_value(pid, "VmPin:");
233 }
234 
get_vm_locked(pid_t pid)235 static long get_vm_locked(pid_t pid)
236 {
237 	return get_proc_status_value(pid, "VmLck:");
238 }
239 
FIXTURE(change_process)240 FIXTURE(change_process)
241 {
242 	int fd;
243 	uint32_t ioas_id;
244 };
245 
FIXTURE_VARIANT(change_process)246 FIXTURE_VARIANT(change_process)
247 {
248 	int accounting;
249 };
250 
FIXTURE_SETUP(change_process)251 FIXTURE_SETUP(change_process)
252 {
253 	self->fd = open("/dev/iommu", O_RDWR);
254 	ASSERT_NE(-1, self->fd);
255 
256 	drop_cap_ipc_lock(_metadata);
257 	if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
258 		struct iommu_option set_limit_cmd = {
259 			.size = sizeof(set_limit_cmd),
260 			.option_id = IOMMU_OPTION_RLIMIT_MODE,
261 			.op = IOMMU_OPTION_OP_SET,
262 			.val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
263 		};
264 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
265 	}
266 
267 	test_ioctl_ioas_alloc(&self->ioas_id);
268 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
269 }
270 
FIXTURE_TEARDOWN(change_process)271 FIXTURE_TEARDOWN(change_process)
272 {
273 	teardown_iommufd(self->fd, _metadata);
274 }
275 
FIXTURE_VARIANT_ADD(change_process,account_none)276 FIXTURE_VARIANT_ADD(change_process, account_none)
277 {
278 	.accounting = IOPT_PAGES_ACCOUNT_NONE,
279 };
280 
FIXTURE_VARIANT_ADD(change_process,account_user)281 FIXTURE_VARIANT_ADD(change_process, account_user)
282 {
283 	.accounting = IOPT_PAGES_ACCOUNT_USER,
284 };
285 
FIXTURE_VARIANT_ADD(change_process,account_mm)286 FIXTURE_VARIANT_ADD(change_process, account_mm)
287 {
288 	.accounting = IOPT_PAGES_ACCOUNT_MM,
289 };
290 
TEST_F(change_process,basic)291 TEST_F(change_process, basic)
292 {
293 	pid_t parent = getpid();
294 	pid_t child;
295 	__u64 iova;
296 	struct iommu_ioas_change_process cmd = {
297 		.size = sizeof(cmd),
298 	};
299 
300 	/* Expect failure if non-file maps exist */
301 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
302 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
303 	test_ioctl_ioas_unmap(iova, PAGE_SIZE);
304 
305 	/* Change process works in current process. */
306 	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
307 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
308 
309 	/* Change process works in another process */
310 	child = fork();
311 	if (!child) {
312 		int nlock = PAGE_SIZE / 1024;
313 
314 		/* Parent accounts for locked memory before */
315 		ASSERT_EQ(nlock, get_vm_pinned(parent));
316 		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
317 			ASSERT_EQ(nlock, get_vm_locked(parent));
318 		ASSERT_EQ(0, get_vm_pinned(getpid()));
319 		ASSERT_EQ(0, get_vm_locked(getpid()));
320 
321 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
322 
323 		/* Child accounts for locked memory after */
324 		ASSERT_EQ(0, get_vm_pinned(parent));
325 		ASSERT_EQ(0, get_vm_locked(parent));
326 		ASSERT_EQ(nlock, get_vm_pinned(getpid()));
327 		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
328 			ASSERT_EQ(nlock, get_vm_locked(getpid()));
329 
330 		exit(0);
331 	}
332 	ASSERT_NE(-1, child);
333 	ASSERT_EQ(child, waitpid(child, NULL, 0));
334 }
335 
FIXTURE(iommufd_ioas)336 FIXTURE(iommufd_ioas)
337 {
338 	int fd;
339 	uint32_t ioas_id;
340 	uint32_t stdev_id;
341 	uint32_t hwpt_id;
342 	uint32_t device_id;
343 	uint64_t base_iova;
344 	uint32_t device_pasid_id;
345 };
346 
FIXTURE_VARIANT(iommufd_ioas)347 FIXTURE_VARIANT(iommufd_ioas)
348 {
349 	unsigned int mock_domains;
350 	unsigned int memory_limit;
351 	bool pasid_capable;
352 };
353 
FIXTURE_SETUP(iommufd_ioas)354 FIXTURE_SETUP(iommufd_ioas)
355 {
356 	unsigned int i;
357 
358 
359 	self->fd = open("/dev/iommu", O_RDWR);
360 	ASSERT_NE(-1, self->fd);
361 	test_ioctl_ioas_alloc(&self->ioas_id);
362 
363 	if (!variant->memory_limit) {
364 		test_ioctl_set_default_memory_limit();
365 	} else {
366 		test_ioctl_set_temp_memory_limit(variant->memory_limit);
367 	}
368 
369 	for (i = 0; i != variant->mock_domains; i++) {
370 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
371 				     &self->hwpt_id, &self->device_id);
372 		test_cmd_dev_check_cache_all(self->device_id,
373 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
374 		self->base_iova = MOCK_APERTURE_START;
375 	}
376 
377 	if (variant->pasid_capable)
378 		test_cmd_mock_domain_flags(self->ioas_id,
379 					   MOCK_FLAGS_DEVICE_PASID,
380 					   NULL, NULL,
381 					   &self->device_pasid_id);
382 }
383 
FIXTURE_TEARDOWN(iommufd_ioas)384 FIXTURE_TEARDOWN(iommufd_ioas)
385 {
386 	test_ioctl_set_default_memory_limit();
387 	teardown_iommufd(self->fd, _metadata);
388 }
389 
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)390 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
391 {
392 };
393 
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)394 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
395 {
396 	.mock_domains = 1,
397 	.pasid_capable = true,
398 };
399 
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)400 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
401 {
402 	.mock_domains = 2,
403 };
404 
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)405 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
406 {
407 	.mock_domains = 1,
408 	.memory_limit = 16,
409 };
410 
TEST_F(iommufd_ioas,ioas_auto_destroy)411 TEST_F(iommufd_ioas, ioas_auto_destroy)
412 {
413 }
414 
TEST_F(iommufd_ioas,ioas_destroy)415 TEST_F(iommufd_ioas, ioas_destroy)
416 {
417 	if (self->stdev_id) {
418 		/* IOAS cannot be freed while a device has a HWPT using it */
419 		EXPECT_ERRNO(EBUSY,
420 			     _test_ioctl_destroy(self->fd, self->ioas_id));
421 	} else {
422 		/* Can allocate and manually free an IOAS table */
423 		test_ioctl_destroy(self->ioas_id);
424 	}
425 }
426 
TEST_F(iommufd_ioas,alloc_hwpt_nested)427 TEST_F(iommufd_ioas, alloc_hwpt_nested)
428 {
429 	const uint32_t min_data_len =
430 		offsetofend(struct iommu_hwpt_selftest, iotlb);
431 	struct iommu_hwpt_selftest data = {
432 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
433 	};
434 	struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
435 	uint32_t nested_hwpt_id[2] = {};
436 	uint32_t num_inv;
437 	uint32_t parent_hwpt_id = 0;
438 	uint32_t parent_hwpt_id_not_work = 0;
439 	uint32_t test_hwpt_id = 0;
440 	uint32_t iopf_hwpt_id;
441 	uint32_t fault_id;
442 	uint32_t fault_fd;
443 
444 	if (self->device_id) {
445 		/* Negative tests */
446 		test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
447 				    &test_hwpt_id);
448 		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
449 				    &test_hwpt_id);
450 		test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
451 				    IOMMU_HWPT_ALLOC_NEST_PARENT |
452 						IOMMU_HWPT_FAULT_ID_VALID,
453 				    &test_hwpt_id);
454 
455 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
456 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
457 				    &parent_hwpt_id);
458 
459 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
460 				    &parent_hwpt_id_not_work);
461 
462 		/* Negative nested tests */
463 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
464 					   parent_hwpt_id, 0,
465 					   &nested_hwpt_id[0],
466 					   IOMMU_HWPT_DATA_NONE, &data,
467 					   sizeof(data));
468 		test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
469 					   parent_hwpt_id, 0,
470 					   &nested_hwpt_id[0],
471 					   IOMMU_HWPT_DATA_SELFTEST + 1, &data,
472 					   sizeof(data));
473 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
474 					   parent_hwpt_id, 0,
475 					   &nested_hwpt_id[0],
476 					   IOMMU_HWPT_DATA_SELFTEST, &data,
477 					   min_data_len - 1);
478 		test_err_hwpt_alloc_nested(EFAULT, self->device_id,
479 					   parent_hwpt_id, 0,
480 					   &nested_hwpt_id[0],
481 					   IOMMU_HWPT_DATA_SELFTEST, NULL,
482 					   sizeof(data));
483 		test_err_hwpt_alloc_nested(
484 			EOPNOTSUPP, self->device_id, parent_hwpt_id,
485 			IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
486 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
487 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
488 					   parent_hwpt_id_not_work, 0,
489 					   &nested_hwpt_id[0],
490 					   IOMMU_HWPT_DATA_SELFTEST, &data,
491 					   sizeof(data));
492 
493 		/* Allocate two nested hwpts sharing one common parent hwpt */
494 		test_ioctl_fault_alloc(&fault_id, &fault_fd);
495 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
496 					   &nested_hwpt_id[0],
497 					   IOMMU_HWPT_DATA_SELFTEST, &data,
498 					   sizeof(data));
499 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
500 					   &nested_hwpt_id[1],
501 					   IOMMU_HWPT_DATA_SELFTEST, &data,
502 					   sizeof(data));
503 		test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
504 					 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
505 					 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
506 					 &data, sizeof(data));
507 		test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
508 					 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
509 					 IOMMU_HWPT_DATA_SELFTEST, &data,
510 					 sizeof(data));
511 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
512 					      IOMMU_TEST_IOTLB_DEFAULT);
513 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
514 					      IOMMU_TEST_IOTLB_DEFAULT);
515 
516 		/* Negative test: a nested hwpt on top of a nested hwpt */
517 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
518 					   nested_hwpt_id[0], 0, &test_hwpt_id,
519 					   IOMMU_HWPT_DATA_SELFTEST, &data,
520 					   sizeof(data));
521 		/* Negative test: parent hwpt now cannot be freed */
522 		EXPECT_ERRNO(EBUSY,
523 			     _test_ioctl_destroy(self->fd, parent_hwpt_id));
524 
525 		/* hwpt_invalidate does not support a parent hwpt */
526 		num_inv = 1;
527 		test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
528 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
529 					 sizeof(*inv_reqs), &num_inv);
530 		assert(!num_inv);
531 
532 		/* Check data_type by passing zero-length array */
533 		num_inv = 0;
534 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
535 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
536 					 sizeof(*inv_reqs), &num_inv);
537 		assert(!num_inv);
538 
539 		/* Negative test: Invalid data_type */
540 		num_inv = 1;
541 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
542 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
543 					 sizeof(*inv_reqs), &num_inv);
544 		assert(!num_inv);
545 
546 		/* Negative test: structure size sanity */
547 		num_inv = 1;
548 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
549 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
550 					 sizeof(*inv_reqs) + 1, &num_inv);
551 		assert(!num_inv);
552 
553 		num_inv = 1;
554 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
555 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
556 					 1, &num_inv);
557 		assert(!num_inv);
558 
559 		/* Negative test: invalid flag is passed */
560 		num_inv = 1;
561 		inv_reqs[0].flags = 0xffffffff;
562 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
563 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
564 					 sizeof(*inv_reqs), &num_inv);
565 		assert(!num_inv);
566 
567 		/* Negative test: invalid data_uptr when array is not empty */
568 		num_inv = 1;
569 		inv_reqs[0].flags = 0;
570 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
571 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
572 					 sizeof(*inv_reqs), &num_inv);
573 		assert(!num_inv);
574 
575 		/* Negative test: invalid entry_len when array is not empty */
576 		num_inv = 1;
577 		inv_reqs[0].flags = 0;
578 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
579 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
580 					 0, &num_inv);
581 		assert(!num_inv);
582 
583 		/* Negative test: invalid iotlb_id */
584 		num_inv = 1;
585 		inv_reqs[0].flags = 0;
586 		inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
587 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
588 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
589 					 sizeof(*inv_reqs), &num_inv);
590 		assert(!num_inv);
591 
592 		/*
593 		 * Invalidate the 1st iotlb entry but fail the 2nd request
594 		 * due to invalid flags configuration in the 2nd request.
595 		 */
596 		num_inv = 2;
597 		inv_reqs[0].flags = 0;
598 		inv_reqs[0].iotlb_id = 0;
599 		inv_reqs[1].flags = 0xffffffff;
600 		inv_reqs[1].iotlb_id = 1;
601 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
602 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
603 					 sizeof(*inv_reqs), &num_inv);
604 		assert(num_inv == 1);
605 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
606 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
607 					  IOMMU_TEST_IOTLB_DEFAULT);
608 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
609 					  IOMMU_TEST_IOTLB_DEFAULT);
610 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
611 					  IOMMU_TEST_IOTLB_DEFAULT);
612 
613 		/*
614 		 * Invalidate the 1st iotlb entry but fail the 2nd request
615 		 * due to invalid iotlb_id configuration in the 2nd request.
616 		 */
617 		num_inv = 2;
618 		inv_reqs[0].flags = 0;
619 		inv_reqs[0].iotlb_id = 0;
620 		inv_reqs[1].flags = 0;
621 		inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
622 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
623 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
624 					 sizeof(*inv_reqs), &num_inv);
625 		assert(num_inv == 1);
626 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
627 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
628 					  IOMMU_TEST_IOTLB_DEFAULT);
629 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
630 					  IOMMU_TEST_IOTLB_DEFAULT);
631 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
632 					  IOMMU_TEST_IOTLB_DEFAULT);
633 
634 		/* Invalidate the 2nd iotlb entry and verify */
635 		num_inv = 1;
636 		inv_reqs[0].flags = 0;
637 		inv_reqs[0].iotlb_id = 1;
638 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
639 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
640 					 sizeof(*inv_reqs), &num_inv);
641 		assert(num_inv == 1);
642 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
643 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
644 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
645 					  IOMMU_TEST_IOTLB_DEFAULT);
646 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
647 					  IOMMU_TEST_IOTLB_DEFAULT);
648 
649 		/* Invalidate the 3rd and 4th iotlb entries and verify */
650 		num_inv = 2;
651 		inv_reqs[0].flags = 0;
652 		inv_reqs[0].iotlb_id = 2;
653 		inv_reqs[1].flags = 0;
654 		inv_reqs[1].iotlb_id = 3;
655 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
656 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
657 					 sizeof(*inv_reqs), &num_inv);
658 		assert(num_inv == 2);
659 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
660 
661 		/* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
662 		num_inv = 1;
663 		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
664 		test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
665 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
666 					 sizeof(*inv_reqs), &num_inv);
667 		assert(num_inv == 1);
668 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
669 
670 		/* Attach device to nested_hwpt_id[0] that then will be busy */
671 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
672 		EXPECT_ERRNO(EBUSY,
673 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
674 
675 		/* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
676 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
677 		EXPECT_ERRNO(EBUSY,
678 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
679 		test_ioctl_destroy(nested_hwpt_id[0]);
680 
681 		/* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
682 		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
683 		EXPECT_ERRNO(EBUSY,
684 			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
685 		/* Trigger an IOPF on the device */
686 		test_cmd_trigger_iopf(self->device_id, fault_fd);
687 
688 		/* Detach from nested_hwpt_id[1] and destroy it */
689 		test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
690 		test_ioctl_destroy(nested_hwpt_id[1]);
691 		test_ioctl_destroy(iopf_hwpt_id);
692 
693 		/* Detach from the parent hw_pagetable and destroy it */
694 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
695 		test_ioctl_destroy(parent_hwpt_id);
696 		test_ioctl_destroy(parent_hwpt_id_not_work);
697 		close(fault_fd);
698 		test_ioctl_destroy(fault_id);
699 	} else {
700 		test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
701 				    &parent_hwpt_id);
702 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
703 					   parent_hwpt_id, 0,
704 					   &nested_hwpt_id[0],
705 					   IOMMU_HWPT_DATA_SELFTEST, &data,
706 					   sizeof(data));
707 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
708 					   parent_hwpt_id, 0,
709 					   &nested_hwpt_id[1],
710 					   IOMMU_HWPT_DATA_SELFTEST, &data,
711 					   sizeof(data));
712 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
713 					     nested_hwpt_id[0]);
714 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
715 					     nested_hwpt_id[1]);
716 	}
717 }
718 
TEST_F(iommufd_ioas,hwpt_attach)719 TEST_F(iommufd_ioas, hwpt_attach)
720 {
721 	/* Create a device attached directly to a hwpt */
722 	if (self->stdev_id) {
723 		test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
724 	} else {
725 		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
726 	}
727 }
728 
TEST_F(iommufd_ioas,ioas_area_destroy)729 TEST_F(iommufd_ioas, ioas_area_destroy)
730 {
731 	/* Adding an area does not change ability to destroy */
732 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
733 	if (self->stdev_id)
734 		EXPECT_ERRNO(EBUSY,
735 			     _test_ioctl_destroy(self->fd, self->ioas_id));
736 	else
737 		test_ioctl_destroy(self->ioas_id);
738 }
739 
TEST_F(iommufd_ioas,ioas_area_auto_destroy)740 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
741 {
742 	int i;
743 
744 	/* Can allocate and automatically free an IOAS table with many areas */
745 	for (i = 0; i != 10; i++) {
746 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
747 					  self->base_iova + i * PAGE_SIZE);
748 	}
749 }
750 
TEST_F(iommufd_ioas,get_hw_info)751 TEST_F(iommufd_ioas, get_hw_info)
752 {
753 	struct iommu_test_hw_info buffer_exact;
754 	struct iommu_test_hw_info_buffer_larger {
755 		struct iommu_test_hw_info info;
756 		uint64_t trailing_bytes;
757 	} buffer_larger;
758 	struct iommu_test_hw_info_buffer_smaller {
759 		__u32 flags;
760 	} buffer_smaller;
761 
762 	if (self->device_id) {
763 		uint8_t max_pasid = 0;
764 
765 		/* Provide a zero-size user_buffer */
766 		test_cmd_get_hw_info(self->device_id,
767 				     IOMMU_HW_INFO_TYPE_DEFAULT, NULL, 0);
768 		/* Provide a user_buffer with exact size */
769 		test_cmd_get_hw_info(self->device_id,
770 				     IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
771 				     sizeof(buffer_exact));
772 
773 		/* Request for a wrong data_type, and a correct one */
774 		test_err_get_hw_info(EOPNOTSUPP, self->device_id,
775 				     IOMMU_HW_INFO_TYPE_SELFTEST + 1,
776 				     &buffer_exact, sizeof(buffer_exact));
777 		test_cmd_get_hw_info(self->device_id,
778 				     IOMMU_HW_INFO_TYPE_SELFTEST, &buffer_exact,
779 				     sizeof(buffer_exact));
780 		/*
781 		 * Provide a user_buffer with size larger than the exact size to check if
782 		 * kernel zero the trailing bytes.
783 		 */
784 		test_cmd_get_hw_info(self->device_id,
785 				     IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
786 				     sizeof(buffer_larger));
787 		/*
788 		 * Provide a user_buffer with size smaller than the exact size to check if
789 		 * the fields within the size range still gets updated.
790 		 */
791 		test_cmd_get_hw_info(self->device_id,
792 				     IOMMU_HW_INFO_TYPE_DEFAULT,
793 				     &buffer_smaller, sizeof(buffer_smaller));
794 		test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
795 		ASSERT_EQ(0, max_pasid);
796 		if (variant->pasid_capable) {
797 			test_cmd_get_hw_info_pasid(self->device_pasid_id,
798 						   &max_pasid);
799 			ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
800 		}
801 	} else {
802 		test_err_get_hw_info(ENOENT, self->device_id,
803 				     IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
804 				     sizeof(buffer_exact));
805 		test_err_get_hw_info(ENOENT, self->device_id,
806 				     IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
807 				     sizeof(buffer_larger));
808 	}
809 }
810 
TEST_F(iommufd_ioas,area)811 TEST_F(iommufd_ioas, area)
812 {
813 	int i;
814 
815 	/* Unmap fails if nothing is mapped */
816 	for (i = 0; i != 10; i++)
817 		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
818 
819 	/* Unmap works */
820 	for (i = 0; i != 10; i++)
821 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
822 					  self->base_iova + i * PAGE_SIZE);
823 	for (i = 0; i != 10; i++)
824 		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
825 				      PAGE_SIZE);
826 
827 	/* Split fails */
828 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
829 				  self->base_iova + 16 * PAGE_SIZE);
830 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
831 				  PAGE_SIZE);
832 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
833 				  PAGE_SIZE);
834 
835 	/* Over map fails */
836 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
837 				      self->base_iova + 16 * PAGE_SIZE);
838 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
839 				      self->base_iova + 16 * PAGE_SIZE);
840 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
841 				      self->base_iova + 17 * PAGE_SIZE);
842 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
843 				      self->base_iova + 15 * PAGE_SIZE);
844 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
845 				      self->base_iova + 15 * PAGE_SIZE);
846 
847 	/* unmap all works */
848 	test_ioctl_ioas_unmap(0, UINT64_MAX);
849 
850 	/* Unmap all succeeds on an empty IOAS */
851 	test_ioctl_ioas_unmap(0, UINT64_MAX);
852 }
853 
TEST_F(iommufd_ioas,unmap_fully_contained_areas)854 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
855 {
856 	uint64_t unmap_len;
857 	int i;
858 
859 	/* Give no_domain some space to rewind base_iova */
860 	self->base_iova += 4 * PAGE_SIZE;
861 
862 	for (i = 0; i != 4; i++)
863 		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
864 					  self->base_iova + i * 16 * PAGE_SIZE);
865 
866 	/* Unmap not fully contained area doesn't work */
867 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
868 				  8 * PAGE_SIZE);
869 	test_err_ioctl_ioas_unmap(ENOENT,
870 				  self->base_iova + 3 * 16 * PAGE_SIZE +
871 					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
872 				  8 * PAGE_SIZE);
873 
874 	/* Unmap fully contained areas works */
875 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
876 					    self->base_iova - 4 * PAGE_SIZE,
877 					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
878 						    4 * PAGE_SIZE,
879 					    &unmap_len));
880 	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
881 }
882 
TEST_F(iommufd_ioas,area_auto_iova)883 TEST_F(iommufd_ioas, area_auto_iova)
884 {
885 	struct iommu_test_cmd test_cmd = {
886 		.size = sizeof(test_cmd),
887 		.op = IOMMU_TEST_OP_ADD_RESERVED,
888 		.id = self->ioas_id,
889 		.add_reserved = { .start = PAGE_SIZE * 4,
890 				  .length = PAGE_SIZE * 100 },
891 	};
892 	struct iommu_iova_range ranges[1] = {};
893 	struct iommu_ioas_allow_iovas allow_cmd = {
894 		.size = sizeof(allow_cmd),
895 		.ioas_id = self->ioas_id,
896 		.num_iovas = 1,
897 		.allowed_iovas = (uintptr_t)ranges,
898 	};
899 	__u64 iovas[10];
900 	int i;
901 
902 	/* Simple 4k pages */
903 	for (i = 0; i != 10; i++)
904 		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
905 	for (i = 0; i != 10; i++)
906 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
907 
908 	/* Kernel automatically aligns IOVAs properly */
909 	for (i = 0; i != 10; i++) {
910 		size_t length = PAGE_SIZE * (i + 1);
911 
912 		if (self->stdev_id) {
913 			test_ioctl_ioas_map(buffer, length, &iovas[i]);
914 		} else {
915 			test_ioctl_ioas_map((void *)(1UL << 31), length,
916 					    &iovas[i]);
917 		}
918 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
919 	}
920 	for (i = 0; i != 10; i++)
921 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
922 
923 	/* Avoids a reserved region */
924 	ASSERT_EQ(0,
925 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
926 			&test_cmd));
927 	for (i = 0; i != 10; i++) {
928 		size_t length = PAGE_SIZE * (i + 1);
929 
930 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
931 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
932 		EXPECT_EQ(false,
933 			  iovas[i] > test_cmd.add_reserved.start &&
934 				  iovas[i] <
935 					  test_cmd.add_reserved.start +
936 						  test_cmd.add_reserved.length);
937 	}
938 	for (i = 0; i != 10; i++)
939 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
940 
941 	/* Allowed region intersects with a reserved region */
942 	ranges[0].start = PAGE_SIZE;
943 	ranges[0].last = PAGE_SIZE * 600;
944 	EXPECT_ERRNO(EADDRINUSE,
945 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
946 
947 	/* Allocate from an allowed region */
948 	if (self->stdev_id) {
949 		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
950 		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
951 	} else {
952 		ranges[0].start = PAGE_SIZE * 200;
953 		ranges[0].last = PAGE_SIZE * 600 - 1;
954 	}
955 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
956 	for (i = 0; i != 10; i++) {
957 		size_t length = PAGE_SIZE * (i + 1);
958 
959 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
960 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
961 		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
962 		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
963 		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
964 		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
965 	}
966 	for (i = 0; i != 10; i++)
967 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
968 }
969 
970 /*  https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com */
TEST_F(iommufd_ioas,reserved_overflow)971 TEST_F(iommufd_ioas, reserved_overflow)
972 {
973 	struct iommu_test_cmd test_cmd = {
974 		.size = sizeof(test_cmd),
975 		.op = IOMMU_TEST_OP_ADD_RESERVED,
976 		.id = self->ioas_id,
977 		.add_reserved.start = 6,
978 	};
979 	unsigned int map_len;
980 	__u64 iova;
981 
982 	if (PAGE_SIZE == 4096) {
983 		test_cmd.add_reserved.length = 0xffffffffffff8001;
984 		map_len = 0x5000;
985 	} else {
986 		test_cmd.add_reserved.length =
987 			0xffffffffffffffff - MOCK_PAGE_SIZE * 16;
988 		map_len = MOCK_PAGE_SIZE * 10;
989 	}
990 
991 	ASSERT_EQ(0,
992 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
993 			&test_cmd));
994 	test_err_ioctl_ioas_map(ENOSPC, buffer, map_len, &iova);
995 }
996 
TEST_F(iommufd_ioas,area_allowed)997 TEST_F(iommufd_ioas, area_allowed)
998 {
999 	struct iommu_test_cmd test_cmd = {
1000 		.size = sizeof(test_cmd),
1001 		.op = IOMMU_TEST_OP_ADD_RESERVED,
1002 		.id = self->ioas_id,
1003 		.add_reserved = { .start = PAGE_SIZE * 4,
1004 				  .length = PAGE_SIZE * 100 },
1005 	};
1006 	struct iommu_iova_range ranges[1] = {};
1007 	struct iommu_ioas_allow_iovas allow_cmd = {
1008 		.size = sizeof(allow_cmd),
1009 		.ioas_id = self->ioas_id,
1010 		.num_iovas = 1,
1011 		.allowed_iovas = (uintptr_t)ranges,
1012 	};
1013 
1014 	/* Reserved intersects an allowed */
1015 	allow_cmd.num_iovas = 1;
1016 	ranges[0].start = self->base_iova;
1017 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1018 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1019 	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
1020 	test_cmd.add_reserved.length = PAGE_SIZE;
1021 	EXPECT_ERRNO(EADDRINUSE,
1022 		     ioctl(self->fd,
1023 			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1024 			   &test_cmd));
1025 	allow_cmd.num_iovas = 0;
1026 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1027 
1028 	/* Allowed intersects a reserved */
1029 	ASSERT_EQ(0,
1030 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1031 			&test_cmd));
1032 	allow_cmd.num_iovas = 1;
1033 	ranges[0].start = self->base_iova;
1034 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1035 	EXPECT_ERRNO(EADDRINUSE,
1036 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1037 }
1038 
TEST_F(iommufd_ioas,copy_area)1039 TEST_F(iommufd_ioas, copy_area)
1040 {
1041 	struct iommu_ioas_copy copy_cmd = {
1042 		.size = sizeof(copy_cmd),
1043 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1044 		.dst_ioas_id = self->ioas_id,
1045 		.src_ioas_id = self->ioas_id,
1046 		.length = PAGE_SIZE,
1047 	};
1048 
1049 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
1050 
1051 	/* Copy inside a single IOAS */
1052 	copy_cmd.src_iova = self->base_iova;
1053 	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
1054 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1055 
1056 	/* Copy between IOAS's */
1057 	copy_cmd.src_iova = self->base_iova;
1058 	copy_cmd.dst_iova = 0;
1059 	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
1060 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1061 }
1062 
TEST_F(iommufd_ioas,iova_ranges)1063 TEST_F(iommufd_ioas, iova_ranges)
1064 {
1065 	struct iommu_test_cmd test_cmd = {
1066 		.size = sizeof(test_cmd),
1067 		.op = IOMMU_TEST_OP_ADD_RESERVED,
1068 		.id = self->ioas_id,
1069 		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1070 	};
1071 	struct iommu_iova_range *ranges = buffer;
1072 	struct iommu_ioas_iova_ranges ranges_cmd = {
1073 		.size = sizeof(ranges_cmd),
1074 		.ioas_id = self->ioas_id,
1075 		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
1076 		.allowed_iovas = (uintptr_t)ranges,
1077 	};
1078 
1079 	/* Range can be read */
1080 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1081 	EXPECT_EQ(1, ranges_cmd.num_iovas);
1082 	if (!self->stdev_id) {
1083 		EXPECT_EQ(0, ranges[0].start);
1084 		EXPECT_EQ(SIZE_MAX, ranges[0].last);
1085 		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1086 	} else {
1087 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1088 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1089 		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1090 	}
1091 
1092 	/* Buffer too small */
1093 	memset(ranges, 0, BUFFER_SIZE);
1094 	ranges_cmd.num_iovas = 0;
1095 	EXPECT_ERRNO(EMSGSIZE,
1096 		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1097 	EXPECT_EQ(1, ranges_cmd.num_iovas);
1098 	EXPECT_EQ(0, ranges[0].start);
1099 	EXPECT_EQ(0, ranges[0].last);
1100 
1101 	/* 2 ranges */
1102 	ASSERT_EQ(0,
1103 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1104 			&test_cmd));
1105 	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1106 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1107 	if (!self->stdev_id) {
1108 		EXPECT_EQ(2, ranges_cmd.num_iovas);
1109 		EXPECT_EQ(0, ranges[0].start);
1110 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1111 		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1112 		EXPECT_EQ(SIZE_MAX, ranges[1].last);
1113 	} else {
1114 		EXPECT_EQ(1, ranges_cmd.num_iovas);
1115 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1116 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1117 	}
1118 
1119 	/* Buffer too small */
1120 	memset(ranges, 0, BUFFER_SIZE);
1121 	ranges_cmd.num_iovas = 1;
1122 	if (!self->stdev_id) {
1123 		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1124 					     &ranges_cmd));
1125 		EXPECT_EQ(2, ranges_cmd.num_iovas);
1126 		EXPECT_EQ(0, ranges[0].start);
1127 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1128 	} else {
1129 		ASSERT_EQ(0,
1130 			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1131 		EXPECT_EQ(1, ranges_cmd.num_iovas);
1132 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1133 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1134 	}
1135 	EXPECT_EQ(0, ranges[1].start);
1136 	EXPECT_EQ(0, ranges[1].last);
1137 }
1138 
TEST_F(iommufd_ioas,access_domain_destory)1139 TEST_F(iommufd_ioas, access_domain_destory)
1140 {
1141 	struct iommu_test_cmd access_cmd = {
1142 		.size = sizeof(access_cmd),
1143 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1144 		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
1145 				  .length = PAGE_SIZE},
1146 	};
1147 	size_t buf_size = 2 * HUGEPAGE_SIZE;
1148 	uint8_t *buf;
1149 
1150 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1151 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1152 		   0);
1153 	ASSERT_NE(MAP_FAILED, buf);
1154 	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1155 
1156 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
1157 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1158 	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1159 	ASSERT_EQ(0,
1160 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1161 			&access_cmd));
1162 
1163 	/* Causes a complicated unpin across a huge page boundary */
1164 	if (self->stdev_id)
1165 		test_ioctl_destroy(self->stdev_id);
1166 
1167 	test_cmd_destroy_access_pages(
1168 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1169 	test_cmd_destroy_access(access_cmd.id);
1170 	ASSERT_EQ(0, munmap(buf, buf_size));
1171 }
1172 
TEST_F(iommufd_ioas,access_pin)1173 TEST_F(iommufd_ioas, access_pin)
1174 {
1175 	struct iommu_test_cmd access_cmd = {
1176 		.size = sizeof(access_cmd),
1177 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1178 		.access_pages = { .iova = MOCK_APERTURE_START,
1179 				  .length = BUFFER_SIZE,
1180 				  .uptr = (uintptr_t)buffer },
1181 	};
1182 	struct iommu_test_cmd check_map_cmd = {
1183 		.size = sizeof(check_map_cmd),
1184 		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
1185 		.check_map = { .iova = MOCK_APERTURE_START,
1186 			       .length = BUFFER_SIZE,
1187 			       .uptr = (uintptr_t)buffer },
1188 	};
1189 	uint32_t access_pages_id;
1190 	unsigned int npages;
1191 
1192 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
1193 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1194 
1195 	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1196 		uint32_t mock_stdev_id;
1197 		uint32_t mock_hwpt_id;
1198 
1199 		access_cmd.access_pages.length = npages * PAGE_SIZE;
1200 
1201 		/* Single map/unmap */
1202 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1203 					  MOCK_APERTURE_START);
1204 		ASSERT_EQ(0, ioctl(self->fd,
1205 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1206 				   &access_cmd));
1207 		test_cmd_destroy_access_pages(
1208 			access_cmd.id,
1209 			access_cmd.access_pages.out_access_pages_id);
1210 
1211 		/* Double user */
1212 		ASSERT_EQ(0, ioctl(self->fd,
1213 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1214 				   &access_cmd));
1215 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
1216 		ASSERT_EQ(0, ioctl(self->fd,
1217 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1218 				   &access_cmd));
1219 		test_cmd_destroy_access_pages(
1220 			access_cmd.id,
1221 			access_cmd.access_pages.out_access_pages_id);
1222 		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1223 
1224 		/* Add/remove a domain with a user */
1225 		ASSERT_EQ(0, ioctl(self->fd,
1226 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1227 				   &access_cmd));
1228 		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1229 				     &mock_hwpt_id, NULL);
1230 		check_map_cmd.id = mock_hwpt_id;
1231 		ASSERT_EQ(0, ioctl(self->fd,
1232 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1233 				   &check_map_cmd));
1234 
1235 		test_ioctl_destroy(mock_stdev_id);
1236 		test_cmd_destroy_access_pages(
1237 			access_cmd.id,
1238 			access_cmd.access_pages.out_access_pages_id);
1239 
1240 		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1241 	}
1242 	test_cmd_destroy_access(access_cmd.id);
1243 }
1244 
TEST_F(iommufd_ioas,access_pin_unmap)1245 TEST_F(iommufd_ioas, access_pin_unmap)
1246 {
1247 	struct iommu_test_cmd access_pages_cmd = {
1248 		.size = sizeof(access_pages_cmd),
1249 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1250 		.access_pages = { .iova = MOCK_APERTURE_START,
1251 				  .length = BUFFER_SIZE,
1252 				  .uptr = (uintptr_t)buffer },
1253 	};
1254 
1255 	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1256 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1257 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1258 	ASSERT_EQ(0,
1259 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1260 			&access_pages_cmd));
1261 
1262 	/* Trigger the unmap op */
1263 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1264 
1265 	/* kernel removed the item for us */
1266 	test_err_destroy_access_pages(
1267 		ENOENT, access_pages_cmd.id,
1268 		access_pages_cmd.access_pages.out_access_pages_id);
1269 }
1270 
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1271 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1272 			    unsigned int access_id, uint64_t iova,
1273 			    unsigned int def_flags)
1274 {
1275 	uint16_t tmp[32];
1276 	struct iommu_test_cmd access_cmd = {
1277 		.size = sizeof(access_cmd),
1278 		.op = IOMMU_TEST_OP_ACCESS_RW,
1279 		.id = access_id,
1280 		.access_rw = { .uptr = (uintptr_t)tmp },
1281 	};
1282 	uint16_t *buffer16 = buffer;
1283 	unsigned int i;
1284 	void *tmp2;
1285 
1286 	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1287 		buffer16[i] = rand();
1288 
1289 	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1290 	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1291 	     access_cmd.access_rw.iova++) {
1292 		for (access_cmd.access_rw.length = 1;
1293 		     access_cmd.access_rw.length < sizeof(tmp);
1294 		     access_cmd.access_rw.length++) {
1295 			access_cmd.access_rw.flags = def_flags;
1296 			ASSERT_EQ(0, ioctl(fd,
1297 					   _IOMMU_TEST_CMD(
1298 						   IOMMU_TEST_OP_ACCESS_RW),
1299 					   &access_cmd));
1300 			ASSERT_EQ(0,
1301 				  memcmp(buffer + (access_cmd.access_rw.iova -
1302 						   iova),
1303 					 tmp, access_cmd.access_rw.length));
1304 
1305 			for (i = 0; i != ARRAY_SIZE(tmp); i++)
1306 				tmp[i] = rand();
1307 			access_cmd.access_rw.flags = def_flags |
1308 						     MOCK_ACCESS_RW_WRITE;
1309 			ASSERT_EQ(0, ioctl(fd,
1310 					   _IOMMU_TEST_CMD(
1311 						   IOMMU_TEST_OP_ACCESS_RW),
1312 					   &access_cmd));
1313 			ASSERT_EQ(0,
1314 				  memcmp(buffer + (access_cmd.access_rw.iova -
1315 						   iova),
1316 					 tmp, access_cmd.access_rw.length));
1317 		}
1318 	}
1319 
1320 	/* Multi-page test */
1321 	tmp2 = malloc(BUFFER_SIZE);
1322 	ASSERT_NE(NULL, tmp2);
1323 	access_cmd.access_rw.iova = iova;
1324 	access_cmd.access_rw.length = BUFFER_SIZE;
1325 	access_cmd.access_rw.flags = def_flags;
1326 	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1327 	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1328 			   &access_cmd));
1329 	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1330 	free(tmp2);
1331 }
1332 
TEST_F(iommufd_ioas,access_rw)1333 TEST_F(iommufd_ioas, access_rw)
1334 {
1335 	__u32 access_id;
1336 	__u64 iova;
1337 
1338 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1339 	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1340 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1341 	check_access_rw(_metadata, self->fd, access_id, iova,
1342 			MOCK_ACCESS_RW_SLOW_PATH);
1343 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1344 	test_cmd_destroy_access(access_id);
1345 }
1346 
TEST_F(iommufd_ioas,access_rw_unaligned)1347 TEST_F(iommufd_ioas, access_rw_unaligned)
1348 {
1349 	__u32 access_id;
1350 	__u64 iova;
1351 
1352 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1353 
1354 	/* Unaligned pages */
1355 	iova = self->base_iova + MOCK_PAGE_SIZE;
1356 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1357 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1358 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1359 	test_cmd_destroy_access(access_id);
1360 }
1361 
TEST_F(iommufd_ioas,fork_gone)1362 TEST_F(iommufd_ioas, fork_gone)
1363 {
1364 	__u32 access_id;
1365 	pid_t child;
1366 
1367 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1368 
1369 	/* Create a mapping with a different mm */
1370 	child = fork();
1371 	if (!child) {
1372 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1373 					  MOCK_APERTURE_START);
1374 		exit(0);
1375 	}
1376 	ASSERT_NE(-1, child);
1377 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1378 
1379 	if (self->stdev_id) {
1380 		/*
1381 		 * If a domain already existed then everything was pinned within
1382 		 * the fork, so this copies from one domain to another.
1383 		 */
1384 		test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1385 		check_access_rw(_metadata, self->fd, access_id,
1386 				MOCK_APERTURE_START, 0);
1387 
1388 	} else {
1389 		/*
1390 		 * Otherwise we need to actually pin pages which can't happen
1391 		 * since the fork is gone.
1392 		 */
1393 		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1394 	}
1395 
1396 	test_cmd_destroy_access(access_id);
1397 }
1398 
TEST_F(iommufd_ioas,fork_present)1399 TEST_F(iommufd_ioas, fork_present)
1400 {
1401 	__u32 access_id;
1402 	int pipefds[2];
1403 	uint64_t tmp;
1404 	pid_t child;
1405 	int efd;
1406 
1407 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1408 
1409 	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1410 	efd = eventfd(0, EFD_CLOEXEC);
1411 	ASSERT_NE(-1, efd);
1412 
1413 	/* Create a mapping with a different mm */
1414 	child = fork();
1415 	if (!child) {
1416 		__u64 iova;
1417 		uint64_t one = 1;
1418 
1419 		close(pipefds[1]);
1420 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1421 					  MOCK_APERTURE_START);
1422 		if (write(efd, &one, sizeof(one)) != sizeof(one))
1423 			exit(100);
1424 		if (read(pipefds[0], &iova, 1) != 1)
1425 			exit(100);
1426 		exit(0);
1427 	}
1428 	close(pipefds[0]);
1429 	ASSERT_NE(-1, child);
1430 	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1431 
1432 	/* Read pages from the remote process */
1433 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1434 	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1435 
1436 	ASSERT_EQ(0, close(pipefds[1]));
1437 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1438 
1439 	test_cmd_destroy_access(access_id);
1440 }
1441 
TEST_F(iommufd_ioas,ioas_option_huge_pages)1442 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1443 {
1444 	struct iommu_option cmd = {
1445 		.size = sizeof(cmd),
1446 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1447 		.op = IOMMU_OPTION_OP_GET,
1448 		.val64 = 3,
1449 		.object_id = self->ioas_id,
1450 	};
1451 
1452 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1453 	ASSERT_EQ(1, cmd.val64);
1454 
1455 	cmd.op = IOMMU_OPTION_OP_SET;
1456 	cmd.val64 = 0;
1457 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1458 
1459 	cmd.op = IOMMU_OPTION_OP_GET;
1460 	cmd.val64 = 3;
1461 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1462 	ASSERT_EQ(0, cmd.val64);
1463 
1464 	cmd.op = IOMMU_OPTION_OP_SET;
1465 	cmd.val64 = 2;
1466 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1467 
1468 	cmd.op = IOMMU_OPTION_OP_SET;
1469 	cmd.val64 = 1;
1470 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1471 }
1472 
TEST_F(iommufd_ioas,ioas_iova_alloc)1473 TEST_F(iommufd_ioas, ioas_iova_alloc)
1474 {
1475 	unsigned int length;
1476 	__u64 iova;
1477 
1478 	for (length = 1; length != PAGE_SIZE * 2; length++) {
1479 		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1480 			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1481 		} else {
1482 			test_ioctl_ioas_map(buffer, length, &iova);
1483 			test_ioctl_ioas_unmap(iova, length);
1484 		}
1485 	}
1486 }
1487 
TEST_F(iommufd_ioas,ioas_align_change)1488 TEST_F(iommufd_ioas, ioas_align_change)
1489 {
1490 	struct iommu_option cmd = {
1491 		.size = sizeof(cmd),
1492 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1493 		.op = IOMMU_OPTION_OP_SET,
1494 		.object_id = self->ioas_id,
1495 		/* 0 means everything must be aligned to PAGE_SIZE */
1496 		.val64 = 0,
1497 	};
1498 
1499 	/*
1500 	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1501 	 * and map are present.
1502 	 */
1503 	if (variant->mock_domains)
1504 		return;
1505 
1506 	/*
1507 	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1508 	 */
1509 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1510 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1511 
1512 	/* Misalignment is rejected at map time */
1513 	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1514 				      PAGE_SIZE,
1515 				      MOCK_APERTURE_START + PAGE_SIZE);
1516 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1517 
1518 	/* Reduce alignment */
1519 	cmd.val64 = 1;
1520 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1521 
1522 	/* Confirm misalignment is rejected during alignment upgrade */
1523 	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1524 				  MOCK_APERTURE_START + PAGE_SIZE);
1525 	cmd.val64 = 0;
1526 	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1527 
1528 	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1529 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1530 }
1531 
TEST_F(iommufd_ioas,copy_sweep)1532 TEST_F(iommufd_ioas, copy_sweep)
1533 {
1534 	struct iommu_ioas_copy copy_cmd = {
1535 		.size = sizeof(copy_cmd),
1536 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1537 		.src_ioas_id = self->ioas_id,
1538 		.dst_iova = MOCK_APERTURE_START,
1539 		.length = MOCK_PAGE_SIZE,
1540 	};
1541 	unsigned int dst_ioas_id;
1542 	uint64_t last_iova;
1543 	uint64_t iova;
1544 
1545 	test_ioctl_ioas_alloc(&dst_ioas_id);
1546 	copy_cmd.dst_ioas_id = dst_ioas_id;
1547 
1548 	if (variant->mock_domains)
1549 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1550 	else
1551 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1552 
1553 	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1554 				  MOCK_APERTURE_START);
1555 
1556 	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1557 	     iova += 511) {
1558 		copy_cmd.src_iova = iova;
1559 		if (iova < MOCK_APERTURE_START ||
1560 		    iova + copy_cmd.length - 1 > last_iova) {
1561 			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1562 						   &copy_cmd));
1563 		} else {
1564 			ASSERT_EQ(0,
1565 				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1566 			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1567 						 copy_cmd.length);
1568 		}
1569 	}
1570 
1571 	test_ioctl_destroy(dst_ioas_id);
1572 }
1573 
TEST_F(iommufd_ioas,dmabuf_simple)1574 TEST_F(iommufd_ioas, dmabuf_simple)
1575 {
1576 	size_t buf_size = PAGE_SIZE*4;
1577 	__u64 iova;
1578 	int dfd;
1579 
1580 	test_cmd_get_dmabuf(buf_size, &dfd);
1581 	test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, 0, &iova);
1582 	test_err_ioctl_ioas_map_file(EINVAL, dfd, buf_size, buf_size, &iova);
1583 	test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, buf_size + 1, &iova);
1584 	test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
1585 
1586 	close(dfd);
1587 }
1588 
TEST_F(iommufd_ioas,dmabuf_revoke)1589 TEST_F(iommufd_ioas, dmabuf_revoke)
1590 {
1591 	size_t buf_size = PAGE_SIZE*4;
1592 	__u32 hwpt_id;
1593 	__u64 iova;
1594 	__u64 iova2;
1595 	int dfd;
1596 
1597 	test_cmd_get_dmabuf(buf_size, &dfd);
1598 	test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
1599 	test_cmd_revoke_dmabuf(dfd, true);
1600 
1601 	if (variant->mock_domains)
1602 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
1603 				    &hwpt_id);
1604 
1605 	test_err_ioctl_ioas_map_file(ENODEV, dfd, 0, buf_size, &iova2);
1606 
1607 	test_cmd_revoke_dmabuf(dfd, false);
1608 	test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova2);
1609 
1610 	/* Restore the iova back */
1611 	test_ioctl_ioas_unmap(iova, buf_size);
1612 	test_ioctl_ioas_map_fixed_file(dfd, 0, buf_size, iova);
1613 
1614 	close(dfd);
1615 }
1616 
FIXTURE(iommufd_mock_domain)1617 FIXTURE(iommufd_mock_domain)
1618 {
1619 	int fd;
1620 	uint32_t ioas_id;
1621 	uint32_t hwpt_id;
1622 	uint32_t hwpt_ids[2];
1623 	uint32_t stdev_ids[2];
1624 	uint32_t idev_ids[2];
1625 	int mmap_flags;
1626 	size_t mmap_buf_size;
1627 };
1628 
FIXTURE_VARIANT(iommufd_mock_domain)1629 FIXTURE_VARIANT(iommufd_mock_domain)
1630 {
1631 	unsigned int mock_domains;
1632 	bool hugepages;
1633 	bool file;
1634 };
1635 
FIXTURE_SETUP(iommufd_mock_domain)1636 FIXTURE_SETUP(iommufd_mock_domain)
1637 {
1638 	unsigned int i;
1639 
1640 	self->fd = open("/dev/iommu", O_RDWR);
1641 	ASSERT_NE(-1, self->fd);
1642 	test_ioctl_ioas_alloc(&self->ioas_id);
1643 
1644 	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1645 
1646 	for (i = 0; i != variant->mock_domains; i++) {
1647 		test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1648 				     &self->hwpt_ids[i], &self->idev_ids[i]);
1649 		test_cmd_dev_check_cache_all(self->idev_ids[0],
1650 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
1651 	}
1652 	self->hwpt_id = self->hwpt_ids[0];
1653 
1654 	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1655 	self->mmap_buf_size = PAGE_SIZE * 8;
1656 	if (variant->hugepages) {
1657 		/*
1658 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1659 		 * not available.
1660 		 */
1661 		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1662 		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1663 	}
1664 }
1665 
FIXTURE_TEARDOWN(iommufd_mock_domain)1666 FIXTURE_TEARDOWN(iommufd_mock_domain)
1667 {
1668 	teardown_iommufd(self->fd, _metadata);
1669 }
1670 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1671 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1672 {
1673 	.mock_domains = 1,
1674 	.hugepages = false,
1675 	.file = false,
1676 };
1677 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1678 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1679 {
1680 	.mock_domains = 2,
1681 	.hugepages = false,
1682 	.file = false,
1683 };
1684 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1685 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1686 {
1687 	.mock_domains = 1,
1688 	.hugepages = true,
1689 	.file = false,
1690 };
1691 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1692 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1693 {
1694 	.mock_domains = 2,
1695 	.hugepages = true,
1696 	.file = false,
1697 };
1698 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file)1699 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1700 {
1701 	.mock_domains = 1,
1702 	.hugepages = false,
1703 	.file = true,
1704 };
1705 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file_hugepage)1706 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1707 {
1708 	.mock_domains = 1,
1709 	.hugepages = true,
1710 	.file = true,
1711 };
1712 
1713 
1714 /* Have the kernel check that the user pages made it to the iommu_domain */
1715 #define check_mock_iova(_ptr, _iova, _length)                                \
1716 	({                                                                   \
1717 		struct iommu_test_cmd check_map_cmd = {                      \
1718 			.size = sizeof(check_map_cmd),                       \
1719 			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
1720 			.id = self->hwpt_id,                                 \
1721 			.check_map = { .iova = _iova,                        \
1722 				       .length = _length,                    \
1723 				       .uptr = (uintptr_t)(_ptr) },          \
1724 		};                                                           \
1725 		ASSERT_EQ(0,                                                 \
1726 			  ioctl(self->fd,                                    \
1727 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1728 				&check_map_cmd));                            \
1729 		if (self->hwpt_ids[1]) {                                     \
1730 			check_map_cmd.id = self->hwpt_ids[1];                \
1731 			ASSERT_EQ(0,                                         \
1732 				  ioctl(self->fd,                            \
1733 					_IOMMU_TEST_CMD(                     \
1734 						IOMMU_TEST_OP_MD_CHECK_MAP), \
1735 					&check_map_cmd));                    \
1736 		}                                                            \
1737 	})
1738 
1739 static void
test_basic_mmap(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1740 test_basic_mmap(struct __test_metadata *_metadata,
1741 		struct _test_data_iommufd_mock_domain *self,
1742 		const struct _fixture_variant_iommufd_mock_domain *variant)
1743 {
1744 	size_t buf_size = self->mmap_buf_size;
1745 	uint8_t *buf;
1746 	__u64 iova;
1747 
1748 	/* Simple one page map */
1749 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1750 	check_mock_iova(buffer, iova, PAGE_SIZE);
1751 
1752 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1753 		   0);
1754 	ASSERT_NE(MAP_FAILED, buf);
1755 
1756 	/* EFAULT half way through mapping */
1757 	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1758 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1759 
1760 	/* EFAULT on first page */
1761 	ASSERT_EQ(0, munmap(buf, buf_size / 2));
1762 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1763 }
1764 
1765 static void
test_basic_file(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1766 test_basic_file(struct __test_metadata *_metadata,
1767 		struct _test_data_iommufd_mock_domain *self,
1768 		const struct _fixture_variant_iommufd_mock_domain *variant)
1769 {
1770 	size_t buf_size = self->mmap_buf_size;
1771 	uint8_t *buf;
1772 	__u64 iova;
1773 	int mfd_tmp;
1774 	int prot = PROT_READ | PROT_WRITE;
1775 
1776 	/* Simple one page map */
1777 	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1778 	check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1779 
1780 	buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1781 	ASSERT_NE(MAP_FAILED, buf);
1782 
1783 	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1784 
1785 	ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1786 	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1787 
1788 	close(mfd_tmp);
1789 }
1790 
TEST_F(iommufd_mock_domain,basic)1791 TEST_F(iommufd_mock_domain, basic)
1792 {
1793 	if (variant->file)
1794 		test_basic_file(_metadata, self, variant);
1795 	else
1796 		test_basic_mmap(_metadata, self, variant);
1797 }
1798 
TEST_F(iommufd_mock_domain,ro_unshare)1799 TEST_F(iommufd_mock_domain, ro_unshare)
1800 {
1801 	uint8_t *buf;
1802 	__u64 iova;
1803 	int fd;
1804 
1805 	fd = open("/proc/self/exe", O_RDONLY);
1806 	ASSERT_NE(-1, fd);
1807 
1808 	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1809 	ASSERT_NE(MAP_FAILED, buf);
1810 	close(fd);
1811 
1812 	/*
1813 	 * There have been lots of changes to the "unshare" mechanism in
1814 	 * get_user_pages(), make sure it works right. The write to the page
1815 	 * after we map it for reading should not change the assigned PFN.
1816 	 */
1817 	ASSERT_EQ(0,
1818 		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1819 				       &iova, IOMMU_IOAS_MAP_READABLE));
1820 	check_mock_iova(buf, iova, PAGE_SIZE);
1821 	memset(buf, 1, PAGE_SIZE);
1822 	check_mock_iova(buf, iova, PAGE_SIZE);
1823 	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1824 }
1825 
TEST_F(iommufd_mock_domain,all_aligns)1826 TEST_F(iommufd_mock_domain, all_aligns)
1827 {
1828 	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1829 						MOCK_PAGE_SIZE;
1830 	size_t buf_size = self->mmap_buf_size;
1831 	unsigned int start;
1832 	unsigned int end;
1833 	uint8_t *buf;
1834 	int prot = PROT_READ | PROT_WRITE;
1835 	int mfd = -1;
1836 
1837 	if (variant->file)
1838 		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1839 	else
1840 		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1841 	ASSERT_NE(MAP_FAILED, buf);
1842 	if (variant->file)
1843 		ASSERT_GT(mfd, 0);
1844 	check_refs(buf, buf_size, 0);
1845 
1846 	/*
1847 	 * Map every combination of page size and alignment within a big region,
1848 	 * less for hugepage case as it takes so long to finish.
1849 	 */
1850 	for (start = 0; start < buf_size; start += test_step) {
1851 		if (variant->hugepages)
1852 			end = buf_size;
1853 		else
1854 			end = start + MOCK_PAGE_SIZE;
1855 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1856 			size_t length = end - start;
1857 			__u64 iova;
1858 
1859 			if (variant->file) {
1860 				test_ioctl_ioas_map_file(mfd, start, length,
1861 							 &iova);
1862 			} else {
1863 				test_ioctl_ioas_map(buf + start, length, &iova);
1864 			}
1865 			check_mock_iova(buf + start, iova, length);
1866 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1867 				   end / PAGE_SIZE * PAGE_SIZE -
1868 					   start / PAGE_SIZE * PAGE_SIZE,
1869 				   1);
1870 
1871 			test_ioctl_ioas_unmap(iova, length);
1872 		}
1873 	}
1874 	check_refs(buf, buf_size, 0);
1875 	ASSERT_EQ(0, munmap(buf, buf_size));
1876 	if (variant->file)
1877 		close(mfd);
1878 }
1879 
TEST_F(iommufd_mock_domain,all_aligns_copy)1880 TEST_F(iommufd_mock_domain, all_aligns_copy)
1881 {
1882 	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1883 						MOCK_PAGE_SIZE;
1884 	size_t buf_size = self->mmap_buf_size;
1885 	unsigned int start;
1886 	unsigned int end;
1887 	uint8_t *buf;
1888 	int prot = PROT_READ | PROT_WRITE;
1889 	int mfd = -1;
1890 
1891 	if (variant->file)
1892 		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1893 	else
1894 		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1895 	ASSERT_NE(MAP_FAILED, buf);
1896 	if (variant->file)
1897 		ASSERT_GT(mfd, 0);
1898 	check_refs(buf, buf_size, 0);
1899 
1900 	/*
1901 	 * Map every combination of page size and alignment within a big region,
1902 	 * less for hugepage case as it takes so long to finish.
1903 	 */
1904 	for (start = 0; start < buf_size; start += test_step) {
1905 		if (variant->hugepages)
1906 			end = buf_size;
1907 		else
1908 			end = start + MOCK_PAGE_SIZE;
1909 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1910 			size_t length = end - start;
1911 			unsigned int old_id;
1912 			uint32_t mock_stdev_id;
1913 			__u64 iova;
1914 
1915 			if (variant->file) {
1916 				test_ioctl_ioas_map_file(mfd, start, length,
1917 							 &iova);
1918 			} else {
1919 				test_ioctl_ioas_map(buf + start, length, &iova);
1920 			}
1921 
1922 			/* Add and destroy a domain while the area exists */
1923 			old_id = self->hwpt_ids[1];
1924 			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1925 					     &self->hwpt_ids[1], NULL);
1926 
1927 			check_mock_iova(buf + start, iova, length);
1928 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1929 				   end / PAGE_SIZE * PAGE_SIZE -
1930 					   start / PAGE_SIZE * PAGE_SIZE,
1931 				   1);
1932 
1933 			test_ioctl_destroy(mock_stdev_id);
1934 			self->hwpt_ids[1] = old_id;
1935 
1936 			test_ioctl_ioas_unmap(iova, length);
1937 		}
1938 	}
1939 	check_refs(buf, buf_size, 0);
1940 	ASSERT_EQ(0, munmap(buf, buf_size));
1941 	if (variant->file)
1942 		close(mfd);
1943 }
1944 
TEST_F(iommufd_mock_domain,user_copy)1945 TEST_F(iommufd_mock_domain, user_copy)
1946 {
1947 	void *buf = variant->file ? mfd_buffer : buffer;
1948 	struct iommu_test_cmd access_cmd = {
1949 		.size = sizeof(access_cmd),
1950 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1951 		.access_pages = { .length = BUFFER_SIZE,
1952 				  .uptr = (uintptr_t)buf },
1953 	};
1954 	struct iommu_ioas_copy copy_cmd = {
1955 		.size = sizeof(copy_cmd),
1956 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1957 		.dst_ioas_id = self->ioas_id,
1958 		.dst_iova = MOCK_APERTURE_START,
1959 		.length = BUFFER_SIZE,
1960 	};
1961 	struct iommu_ioas_unmap unmap_cmd = {
1962 		.size = sizeof(unmap_cmd),
1963 		.ioas_id = self->ioas_id,
1964 		.iova = MOCK_APERTURE_START,
1965 		.length = BUFFER_SIZE,
1966 	};
1967 	unsigned int new_ioas_id, ioas_id;
1968 
1969 	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1970 	test_ioctl_ioas_alloc(&ioas_id);
1971 	if (variant->file) {
1972 		test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1973 					    &copy_cmd.src_iova);
1974 	} else {
1975 		test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1976 				       &copy_cmd.src_iova);
1977 	}
1978 	test_cmd_create_access(ioas_id, &access_cmd.id,
1979 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1980 
1981 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1982 	ASSERT_EQ(0,
1983 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1984 			&access_cmd));
1985 	copy_cmd.src_ioas_id = ioas_id;
1986 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1987 	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1988 
1989 	/* Now replace the ioas with a new one */
1990 	test_ioctl_ioas_alloc(&new_ioas_id);
1991 	if (variant->file) {
1992 		test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1993 					    &copy_cmd.src_iova);
1994 	} else {
1995 		test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1996 				       &copy_cmd.src_iova);
1997 	}
1998 	test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1999 
2000 	/* Destroy the old ioas and cleanup copied mapping */
2001 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
2002 	test_ioctl_destroy(ioas_id);
2003 
2004 	/* Then run the same test again with the new ioas */
2005 	access_cmd.access_pages.iova = copy_cmd.src_iova;
2006 	ASSERT_EQ(0,
2007 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
2008 			&access_cmd));
2009 	copy_cmd.src_ioas_id = new_ioas_id;
2010 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
2011 	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
2012 
2013 	test_cmd_destroy_access_pages(
2014 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
2015 	test_cmd_destroy_access(access_cmd.id);
2016 
2017 	test_ioctl_destroy(new_ioas_id);
2018 }
2019 
TEST_F(iommufd_mock_domain,replace)2020 TEST_F(iommufd_mock_domain, replace)
2021 {
2022 	uint32_t ioas_id;
2023 
2024 	test_ioctl_ioas_alloc(&ioas_id);
2025 
2026 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
2027 
2028 	/*
2029 	 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
2030 	 * should get enoent when we try to use it.
2031 	 */
2032 	if (variant->mock_domains == 1)
2033 		test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
2034 					     self->hwpt_ids[0]);
2035 
2036 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
2037 	if (variant->mock_domains >= 2) {
2038 		test_cmd_mock_domain_replace(self->stdev_ids[0],
2039 					     self->hwpt_ids[1]);
2040 		test_cmd_mock_domain_replace(self->stdev_ids[0],
2041 					     self->hwpt_ids[1]);
2042 		test_cmd_mock_domain_replace(self->stdev_ids[0],
2043 					     self->hwpt_ids[0]);
2044 	}
2045 
2046 	test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
2047 	test_ioctl_destroy(ioas_id);
2048 }
2049 
TEST_F(iommufd_mock_domain,alloc_hwpt)2050 TEST_F(iommufd_mock_domain, alloc_hwpt)
2051 {
2052 	int i;
2053 
2054 	for (i = 0; i != variant->mock_domains; i++) {
2055 		uint32_t hwpt_id[2];
2056 		uint32_t stddev_id;
2057 
2058 		test_err_hwpt_alloc(EOPNOTSUPP,
2059 				    self->idev_ids[i], self->ioas_id,
2060 				    ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
2061 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2062 				    0, &hwpt_id[0]);
2063 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2064 				    IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
2065 
2066 		/* Do a hw_pagetable rotation test */
2067 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
2068 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
2069 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
2070 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
2071 		test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
2072 		test_ioctl_destroy(hwpt_id[1]);
2073 
2074 		test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
2075 		test_ioctl_destroy(stddev_id);
2076 		test_ioctl_destroy(hwpt_id[0]);
2077 	}
2078 }
2079 
FIXTURE(iommufd_dirty_tracking)2080 FIXTURE(iommufd_dirty_tracking)
2081 {
2082 	int fd;
2083 	uint32_t ioas_id;
2084 	uint32_t hwpt_id;
2085 	uint32_t stdev_id;
2086 	uint32_t idev_id;
2087 	unsigned long page_size;
2088 	unsigned long bitmap_size;
2089 	void *bitmap;
2090 	void *buffer;
2091 };
2092 
FIXTURE_VARIANT(iommufd_dirty_tracking)2093 FIXTURE_VARIANT(iommufd_dirty_tracking)
2094 {
2095 	unsigned long buffer_size;
2096 	bool hugepages;
2097 };
2098 
FIXTURE_SETUP(iommufd_dirty_tracking)2099 FIXTURE_SETUP(iommufd_dirty_tracking)
2100 {
2101 	struct iommu_option cmd = {
2102 		.size = sizeof(cmd),
2103 		.option_id = IOMMU_OPTION_HUGE_PAGES,
2104 		.op = IOMMU_OPTION_OP_SET,
2105 		.val64 = 0,
2106 	};
2107 	size_t mmap_buffer_size;
2108 	unsigned long size;
2109 	int mmap_flags;
2110 	void *vrc;
2111 	int rc;
2112 
2113 	if (variant->buffer_size < MOCK_PAGE_SIZE) {
2114 		SKIP(return,
2115 		     "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%u",
2116 		     variant->buffer_size, MOCK_PAGE_SIZE);
2117 	}
2118 
2119 	self->fd = open("/dev/iommu", O_RDWR);
2120 	ASSERT_NE(-1, self->fd);
2121 
2122 	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2123 	mmap_buffer_size = variant->buffer_size;
2124 	if (variant->hugepages) {
2125 		/*
2126 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2127 		 * not available.
2128 		 */
2129 		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2130 
2131 		/*
2132 		 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
2133 		 * following mmap() will automatically align the length to be a
2134 		 * multiple of the underlying huge page size. Failing to do the
2135 		 * same at this allocation will result in a memory overwrite by
2136 		 * the mmap().
2137 		 */
2138 		if (mmap_buffer_size < HUGEPAGE_SIZE)
2139 			mmap_buffer_size = HUGEPAGE_SIZE;
2140 	}
2141 
2142 	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
2143 	if (rc || !self->buffer) {
2144 		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2145 			   mmap_buffer_size, rc);
2146 	}
2147 	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2148 	vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
2149 		   mmap_flags, -1, 0);
2150 	assert(vrc == self->buffer);
2151 
2152 	self->page_size = MOCK_PAGE_SIZE;
2153 	self->bitmap_size = variant->buffer_size / self->page_size;
2154 
2155 	/* Provision with an extra (PAGE_SIZE) for the unaligned case */
2156 	size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2157 	rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2158 	assert(!rc);
2159 	assert(self->bitmap);
2160 	assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2161 
2162 	test_ioctl_ioas_alloc(&self->ioas_id);
2163 
2164 	/*
2165 	 * For dirty testing it is important that the page size fed into
2166 	 * the iommu page tables matches the size the dirty logic
2167 	 * expects, or set_dirty can touch too much stuff.
2168 	 */
2169 	cmd.object_id = self->ioas_id;
2170 	if (!variant->hugepages)
2171 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2172 
2173 	test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
2174 			     &self->idev_id);
2175 }
2176 
FIXTURE_TEARDOWN(iommufd_dirty_tracking)2177 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2178 {
2179 	free(self->buffer);
2180 	free(self->bitmap);
2181 	teardown_iommufd(self->fd, _metadata);
2182 }
2183 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)2184 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2185 {
2186 	/* half of an u8 index bitmap */
2187 	.buffer_size = 8UL * 1024UL,
2188 };
2189 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)2190 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2191 {
2192 	/* one u8 index bitmap */
2193 	.buffer_size = 16UL * 1024UL,
2194 };
2195 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)2196 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2197 {
2198 	/* one u32 index bitmap */
2199 	.buffer_size = 64UL * 1024UL,
2200 };
2201 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)2202 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2203 {
2204 	/* one u64 index bitmap */
2205 	.buffer_size = 128UL * 1024UL,
2206 };
2207 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)2208 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2209 {
2210 	/* two u64 index and trailing end bitmap */
2211 	.buffer_size = 320UL * 1024UL,
2212 };
2213 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)2214 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2215 {
2216 	/* 4K bitmap (64M IOVA range) */
2217 	.buffer_size = 64UL * 1024UL * 1024UL,
2218 };
2219 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)2220 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2221 {
2222 	/* 4K bitmap (64M IOVA range) */
2223 	.buffer_size = 64UL * 1024UL * 1024UL,
2224 	.hugepages = true,
2225 };
2226 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)2227 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2228 {
2229 	/* 8K bitmap (128M IOVA range) */
2230 	.buffer_size = 128UL * 1024UL * 1024UL,
2231 };
2232 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)2233 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2234 {
2235 	/* 8K bitmap (128M IOVA range) */
2236 	.buffer_size = 128UL * 1024UL * 1024UL,
2237 	.hugepages = true,
2238 };
2239 
TEST_F(iommufd_dirty_tracking,enforce_dirty)2240 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2241 {
2242 	uint32_t ioas_id, stddev_id, idev_id;
2243 	uint32_t hwpt_id, _hwpt_id;
2244 	uint32_t dev_flags;
2245 
2246 	/* Regular case */
2247 	dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2248 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2249 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2250 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2251 	test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2252 				   NULL);
2253 	test_ioctl_destroy(stddev_id);
2254 	test_ioctl_destroy(hwpt_id);
2255 
2256 	/* IOMMU device does not support dirty tracking */
2257 	test_ioctl_ioas_alloc(&ioas_id);
2258 	test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2259 				   &idev_id);
2260 	test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2261 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2262 	test_ioctl_destroy(stddev_id);
2263 }
2264 
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)2265 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2266 {
2267 	uint32_t stddev_id;
2268 	uint32_t hwpt_id;
2269 
2270 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2271 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2272 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2273 	test_cmd_set_dirty_tracking(hwpt_id, true);
2274 	test_cmd_set_dirty_tracking(hwpt_id, false);
2275 
2276 	test_ioctl_destroy(stddev_id);
2277 	test_ioctl_destroy(hwpt_id);
2278 }
2279 
TEST_F(iommufd_dirty_tracking,device_dirty_capability)2280 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2281 {
2282 	uint32_t caps = 0;
2283 	uint32_t stddev_id;
2284 	uint32_t hwpt_id;
2285 
2286 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2287 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2288 	test_cmd_get_hw_capabilities(self->idev_id, caps);
2289 	ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2290 		  caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2291 
2292 	test_ioctl_destroy(stddev_id);
2293 	test_ioctl_destroy(hwpt_id);
2294 }
2295 
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)2296 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2297 {
2298 	uint32_t page_size = MOCK_PAGE_SIZE;
2299 	uint32_t ioas_id = self->ioas_id;
2300 	uint32_t hwpt_id;
2301 
2302 	if (variant->hugepages)
2303 		page_size = MOCK_HUGE_PAGE_SIZE;
2304 
2305 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2306 				     variant->buffer_size, MOCK_APERTURE_START);
2307 
2308 	if (variant->hugepages)
2309 		test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2310 					    IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2311 					    MOCK_IOMMUPT_HUGE, &hwpt_id);
2312 	else
2313 		test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2314 					    IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2315 					    MOCK_IOMMUPT_DEFAULT, &hwpt_id);
2316 
2317 	test_cmd_set_dirty_tracking(hwpt_id, true);
2318 
2319 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2320 				MOCK_APERTURE_START, self->page_size, page_size,
2321 				self->bitmap, self->bitmap_size, 0, _metadata);
2322 
2323 	/* PAGE_SIZE unaligned bitmap */
2324 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2325 				MOCK_APERTURE_START, self->page_size, page_size,
2326 				self->bitmap + MOCK_PAGE_SIZE,
2327 				self->bitmap_size, 0, _metadata);
2328 
2329 	/* u64 unaligned bitmap */
2330 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2331 				MOCK_APERTURE_START, self->page_size, page_size,
2332 				self->bitmap + 0xff1, self->bitmap_size, 0,
2333 				_metadata);
2334 
2335 	test_ioctl_destroy(hwpt_id);
2336 }
2337 
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)2338 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2339 {
2340 	uint32_t page_size = MOCK_PAGE_SIZE;
2341 	uint32_t ioas_id = self->ioas_id;
2342 	uint32_t hwpt_id;
2343 
2344 	if (variant->hugepages)
2345 		page_size = MOCK_HUGE_PAGE_SIZE;
2346 
2347 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2348 				     variant->buffer_size, MOCK_APERTURE_START);
2349 
2350 
2351 	if (variant->hugepages)
2352 		test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2353 					    IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2354 					    MOCK_IOMMUPT_HUGE, &hwpt_id);
2355 	else
2356 		test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2357 					    IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2358 					    MOCK_IOMMUPT_DEFAULT, &hwpt_id);
2359 
2360 	test_cmd_set_dirty_tracking(hwpt_id, true);
2361 
2362 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2363 				MOCK_APERTURE_START, self->page_size, page_size,
2364 				self->bitmap, self->bitmap_size,
2365 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2366 				_metadata);
2367 
2368 	/* Unaligned bitmap */
2369 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2370 				MOCK_APERTURE_START, self->page_size, page_size,
2371 				self->bitmap + MOCK_PAGE_SIZE,
2372 				self->bitmap_size,
2373 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2374 				_metadata);
2375 
2376 	/* u64 unaligned bitmap */
2377 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2378 				MOCK_APERTURE_START, self->page_size, page_size,
2379 				self->bitmap + 0xff1, self->bitmap_size,
2380 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2381 				_metadata);
2382 
2383 	test_ioctl_destroy(hwpt_id);
2384 }
2385 
2386 /* VFIO compatibility IOCTLs */
2387 
TEST_F(iommufd,simple_ioctls)2388 TEST_F(iommufd, simple_ioctls)
2389 {
2390 	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2391 	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2392 }
2393 
TEST_F(iommufd,unmap_cmd)2394 TEST_F(iommufd, unmap_cmd)
2395 {
2396 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2397 		.iova = MOCK_APERTURE_START,
2398 		.size = PAGE_SIZE,
2399 	};
2400 
2401 	unmap_cmd.argsz = 1;
2402 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2403 
2404 	unmap_cmd.argsz = sizeof(unmap_cmd);
2405 	unmap_cmd.flags = 1 << 31;
2406 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2407 
2408 	unmap_cmd.flags = 0;
2409 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2410 }
2411 
TEST_F(iommufd,map_cmd)2412 TEST_F(iommufd, map_cmd)
2413 {
2414 	struct vfio_iommu_type1_dma_map map_cmd = {
2415 		.iova = MOCK_APERTURE_START,
2416 		.size = PAGE_SIZE,
2417 		.vaddr = (__u64)buffer,
2418 	};
2419 
2420 	map_cmd.argsz = 1;
2421 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2422 
2423 	map_cmd.argsz = sizeof(map_cmd);
2424 	map_cmd.flags = 1 << 31;
2425 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2426 
2427 	/* Requires a domain to be attached */
2428 	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2429 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2430 }
2431 
TEST_F(iommufd,info_cmd)2432 TEST_F(iommufd, info_cmd)
2433 {
2434 	struct vfio_iommu_type1_info info_cmd = {};
2435 
2436 	/* Invalid argsz */
2437 	info_cmd.argsz = 1;
2438 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2439 
2440 	info_cmd.argsz = sizeof(info_cmd);
2441 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2442 }
2443 
TEST_F(iommufd,set_iommu_cmd)2444 TEST_F(iommufd, set_iommu_cmd)
2445 {
2446 	/* Requires a domain to be attached */
2447 	EXPECT_ERRNO(ENODEV,
2448 		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2449 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2450 }
2451 
TEST_F(iommufd,vfio_ioas)2452 TEST_F(iommufd, vfio_ioas)
2453 {
2454 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2455 		.size = sizeof(vfio_ioas_cmd),
2456 		.op = IOMMU_VFIO_IOAS_GET,
2457 	};
2458 	__u32 ioas_id;
2459 
2460 	/* ENODEV if there is no compat ioas */
2461 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2462 
2463 	/* Invalid id for set */
2464 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2465 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2466 
2467 	/* Valid id for set*/
2468 	test_ioctl_ioas_alloc(&ioas_id);
2469 	vfio_ioas_cmd.ioas_id = ioas_id;
2470 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2471 
2472 	/* Same id comes back from get */
2473 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2474 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2475 	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2476 
2477 	/* Clear works */
2478 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2479 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2480 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2481 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2482 }
2483 
FIXTURE(vfio_compat_mock_domain)2484 FIXTURE(vfio_compat_mock_domain)
2485 {
2486 	int fd;
2487 	uint32_t ioas_id;
2488 };
2489 
FIXTURE_VARIANT(vfio_compat_mock_domain)2490 FIXTURE_VARIANT(vfio_compat_mock_domain)
2491 {
2492 	unsigned int version;
2493 };
2494 
FIXTURE_SETUP(vfio_compat_mock_domain)2495 FIXTURE_SETUP(vfio_compat_mock_domain)
2496 {
2497 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2498 		.size = sizeof(vfio_ioas_cmd),
2499 		.op = IOMMU_VFIO_IOAS_SET,
2500 	};
2501 
2502 	self->fd = open("/dev/iommu", O_RDWR);
2503 	ASSERT_NE(-1, self->fd);
2504 
2505 	/* Create what VFIO would consider a group */
2506 	test_ioctl_ioas_alloc(&self->ioas_id);
2507 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2508 
2509 	/* Attach it to the vfio compat */
2510 	vfio_ioas_cmd.ioas_id = self->ioas_id;
2511 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2512 	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2513 }
2514 
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2515 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2516 {
2517 	teardown_iommufd(self->fd, _metadata);
2518 }
2519 
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2520 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2521 {
2522 	.version = VFIO_TYPE1v2_IOMMU,
2523 };
2524 
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2525 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2526 {
2527 	.version = VFIO_TYPE1_IOMMU,
2528 };
2529 
TEST_F(vfio_compat_mock_domain,simple_close)2530 TEST_F(vfio_compat_mock_domain, simple_close)
2531 {
2532 }
2533 
TEST_F(vfio_compat_mock_domain,option_huge_pages)2534 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2535 {
2536 	struct iommu_option cmd = {
2537 		.size = sizeof(cmd),
2538 		.option_id = IOMMU_OPTION_HUGE_PAGES,
2539 		.op = IOMMU_OPTION_OP_GET,
2540 		.val64 = 3,
2541 		.object_id = self->ioas_id,
2542 	};
2543 
2544 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2545 	if (variant->version == VFIO_TYPE1_IOMMU) {
2546 		ASSERT_EQ(0, cmd.val64);
2547 	} else {
2548 		ASSERT_EQ(1, cmd.val64);
2549 	}
2550 }
2551 
2552 /*
2553  * Execute an ioctl command stored in buffer and check that the result does not
2554  * overflow memory.
2555  */
is_filled(const void * buf,uint8_t c,size_t len)2556 static bool is_filled(const void *buf, uint8_t c, size_t len)
2557 {
2558 	const uint8_t *cbuf = buf;
2559 
2560 	for (; len; cbuf++, len--)
2561 		if (*cbuf != c)
2562 			return false;
2563 	return true;
2564 }
2565 
2566 #define ioctl_check_buf(fd, cmd)                                         \
2567 	({                                                               \
2568 		size_t _cmd_len = *(__u32 *)buffer;                      \
2569 									 \
2570 		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2571 		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
2572 		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
2573 					  BUFFER_SIZE - _cmd_len));      \
2574 	})
2575 
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2576 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2577 				      struct vfio_iommu_type1_info *info_cmd)
2578 {
2579 	const struct vfio_info_cap_header *cap;
2580 
2581 	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2582 	cap = buffer + info_cmd->cap_offset;
2583 	while (true) {
2584 		size_t cap_size;
2585 
2586 		if (cap->next)
2587 			cap_size = (buffer + cap->next) - (void *)cap;
2588 		else
2589 			cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2590 
2591 		switch (cap->id) {
2592 		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2593 			struct vfio_iommu_type1_info_cap_iova_range *data =
2594 				(void *)cap;
2595 
2596 			ASSERT_EQ(1, data->header.version);
2597 			ASSERT_EQ(1, data->nr_iovas);
2598 			EXPECT_EQ(MOCK_APERTURE_START,
2599 				  data->iova_ranges[0].start);
2600 			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2601 			break;
2602 		}
2603 		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2604 			struct vfio_iommu_type1_info_dma_avail *data =
2605 				(void *)cap;
2606 
2607 			ASSERT_EQ(1, data->header.version);
2608 			ASSERT_EQ(sizeof(*data), cap_size);
2609 			break;
2610 		}
2611 		default:
2612 			ASSERT_EQ(false, true);
2613 			break;
2614 		}
2615 		if (!cap->next)
2616 			break;
2617 
2618 		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2619 		ASSERT_GE(buffer + cap->next, (void *)cap);
2620 		cap = buffer + cap->next;
2621 	}
2622 }
2623 
TEST_F(vfio_compat_mock_domain,get_info)2624 TEST_F(vfio_compat_mock_domain, get_info)
2625 {
2626 	struct vfio_iommu_type1_info *info_cmd = buffer;
2627 	unsigned int i;
2628 	size_t caplen;
2629 
2630 	/* Pre-cap ABI */
2631 	*info_cmd = (struct vfio_iommu_type1_info){
2632 		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2633 	};
2634 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2635 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2636 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2637 		  info_cmd->flags);
2638 
2639 	/* Read the cap chain size */
2640 	*info_cmd = (struct vfio_iommu_type1_info){
2641 		.argsz = sizeof(*info_cmd),
2642 	};
2643 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2644 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2645 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2646 		  info_cmd->flags);
2647 	ASSERT_EQ(0, info_cmd->cap_offset);
2648 	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2649 
2650 	/* Read the caps, kernel should never create a corrupted caps */
2651 	caplen = info_cmd->argsz;
2652 	for (i = sizeof(*info_cmd); i < caplen; i++) {
2653 		*info_cmd = (struct vfio_iommu_type1_info){
2654 			.argsz = i,
2655 		};
2656 		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2657 		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2658 			  info_cmd->flags);
2659 		if (!info_cmd->cap_offset)
2660 			continue;
2661 		check_vfio_info_cap_chain(_metadata, info_cmd);
2662 	}
2663 }
2664 
shuffle_array(unsigned long * array,size_t nelms)2665 static void shuffle_array(unsigned long *array, size_t nelms)
2666 {
2667 	unsigned int i;
2668 
2669 	/* Shuffle */
2670 	for (i = 0; i != nelms; i++) {
2671 		unsigned long tmp = array[i];
2672 		unsigned int other = rand() % (nelms - i);
2673 
2674 		array[i] = array[other];
2675 		array[other] = tmp;
2676 	}
2677 }
2678 
TEST_F(vfio_compat_mock_domain,map)2679 TEST_F(vfio_compat_mock_domain, map)
2680 {
2681 	struct vfio_iommu_type1_dma_map map_cmd = {
2682 		.argsz = sizeof(map_cmd),
2683 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2684 		.vaddr = (uintptr_t)buffer,
2685 		.size = BUFFER_SIZE,
2686 		.iova = MOCK_APERTURE_START,
2687 	};
2688 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2689 		.argsz = sizeof(unmap_cmd),
2690 		.size = BUFFER_SIZE,
2691 		.iova = MOCK_APERTURE_START,
2692 	};
2693 	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2694 	unsigned int i;
2695 
2696 	/* Simple map/unmap */
2697 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2698 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2699 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2700 	/* Unmap of empty is success */
2701 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2702 
2703 	/* UNMAP_FLAG_ALL requires 0 iova/size */
2704 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2705 	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2706 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2707 
2708 	unmap_cmd.iova = 0;
2709 	unmap_cmd.size = 0;
2710 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2711 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2712 
2713 	/* Small pages */
2714 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2715 		map_cmd.iova = pages_iova[i] =
2716 			MOCK_APERTURE_START + i * PAGE_SIZE;
2717 		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2718 		map_cmd.size = PAGE_SIZE;
2719 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2720 	}
2721 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2722 
2723 	unmap_cmd.flags = 0;
2724 	unmap_cmd.size = PAGE_SIZE;
2725 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2726 		unmap_cmd.iova = pages_iova[i];
2727 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2728 	}
2729 }
2730 
TEST_F(vfio_compat_mock_domain,huge_map)2731 TEST_F(vfio_compat_mock_domain, huge_map)
2732 {
2733 	size_t buf_size = HUGEPAGE_SIZE * 2;
2734 	struct vfio_iommu_type1_dma_map map_cmd = {
2735 		.argsz = sizeof(map_cmd),
2736 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2737 		.size = buf_size,
2738 		.iova = MOCK_APERTURE_START,
2739 	};
2740 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2741 		.argsz = sizeof(unmap_cmd),
2742 	};
2743 	unsigned long pages_iova[16];
2744 	unsigned int i;
2745 	void *buf;
2746 
2747 	/* Test huge pages and splitting */
2748 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2749 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2750 		   0);
2751 	ASSERT_NE(MAP_FAILED, buf);
2752 	map_cmd.vaddr = (uintptr_t)buf;
2753 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2754 
2755 	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2756 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2757 		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2758 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2759 
2760 	/* type1 mode can cut up larger mappings, type1v2 always fails */
2761 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2762 		unmap_cmd.iova = pages_iova[i];
2763 		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2764 		if (variant->version == VFIO_TYPE1_IOMMU) {
2765 			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2766 					   &unmap_cmd));
2767 		} else {
2768 			EXPECT_ERRNO(ENOENT,
2769 				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2770 					   &unmap_cmd));
2771 		}
2772 	}
2773 }
2774 
FIXTURE(iommufd_viommu)2775 FIXTURE(iommufd_viommu)
2776 {
2777 	int fd;
2778 	uint32_t ioas_id;
2779 	uint32_t stdev_id;
2780 	uint32_t hwpt_id;
2781 	uint32_t nested_hwpt_id;
2782 	uint32_t device_id;
2783 	uint32_t viommu_id;
2784 };
2785 
FIXTURE_VARIANT(iommufd_viommu)2786 FIXTURE_VARIANT(iommufd_viommu)
2787 {
2788 	unsigned int viommu;
2789 };
2790 
FIXTURE_SETUP(iommufd_viommu)2791 FIXTURE_SETUP(iommufd_viommu)
2792 {
2793 	self->fd = open("/dev/iommu", O_RDWR);
2794 	ASSERT_NE(-1, self->fd);
2795 	test_ioctl_ioas_alloc(&self->ioas_id);
2796 	test_ioctl_set_default_memory_limit();
2797 
2798 	if (variant->viommu) {
2799 		struct iommu_hwpt_selftest data = {
2800 			.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2801 		};
2802 
2803 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2804 				     &self->device_id);
2805 
2806 		/* Allocate a nesting parent hwpt */
2807 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2808 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
2809 				    &self->hwpt_id);
2810 
2811 		/* Allocate a vIOMMU taking refcount of the parent hwpt */
2812 		test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2813 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2814 				      &self->viommu_id);
2815 
2816 		/* Allocate a regular nested hwpt */
2817 		test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2818 					   &self->nested_hwpt_id,
2819 					   IOMMU_HWPT_DATA_SELFTEST, &data,
2820 					   sizeof(data));
2821 	}
2822 }
2823 
FIXTURE_TEARDOWN(iommufd_viommu)2824 FIXTURE_TEARDOWN(iommufd_viommu)
2825 {
2826 	teardown_iommufd(self->fd, _metadata);
2827 }
2828 
FIXTURE_VARIANT_ADD(iommufd_viommu,no_viommu)2829 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2830 {
2831 	.viommu = 0,
2832 };
2833 
FIXTURE_VARIANT_ADD(iommufd_viommu,mock_viommu)2834 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2835 {
2836 	.viommu = 1,
2837 };
2838 
TEST_F(iommufd_viommu,viommu_auto_destroy)2839 TEST_F(iommufd_viommu, viommu_auto_destroy)
2840 {
2841 }
2842 
TEST_F(iommufd_viommu,viommu_negative_tests)2843 TEST_F(iommufd_viommu, viommu_negative_tests)
2844 {
2845 	uint32_t device_id = self->device_id;
2846 	uint32_t ioas_id = self->ioas_id;
2847 	uint32_t hwpt_id;
2848 
2849 	if (self->device_id) {
2850 		/* Negative test -- invalid hwpt (hwpt_id=0) */
2851 		test_err_viommu_alloc(ENOENT, device_id, 0,
2852 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2853 				      NULL);
2854 
2855 		/* Negative test -- not a nesting parent hwpt */
2856 		test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2857 		test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2858 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2859 				      NULL);
2860 		test_ioctl_destroy(hwpt_id);
2861 
2862 		/* Negative test -- unsupported viommu type */
2863 		test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2864 				      0xdead, NULL, 0, NULL);
2865 		EXPECT_ERRNO(EBUSY,
2866 			     _test_ioctl_destroy(self->fd, self->hwpt_id));
2867 		EXPECT_ERRNO(EBUSY,
2868 			     _test_ioctl_destroy(self->fd, self->viommu_id));
2869 	} else {
2870 		test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2871 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2872 				      NULL);
2873 	}
2874 }
2875 
TEST_F(iommufd_viommu,viommu_alloc_nested_iopf)2876 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2877 {
2878 	struct iommu_hwpt_selftest data = {
2879 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2880 	};
2881 	uint32_t viommu_id = self->viommu_id;
2882 	uint32_t dev_id = self->device_id;
2883 	uint32_t iopf_hwpt_id;
2884 	uint32_t fault_id;
2885 	uint32_t fault_fd;
2886 	uint32_t vdev_id;
2887 
2888 	if (!dev_id)
2889 		SKIP(return, "Skipping test for variant no_viommu");
2890 
2891 	test_ioctl_fault_alloc(&fault_id, &fault_fd);
2892 	test_err_hwpt_alloc_iopf(ENOENT, dev_id, viommu_id, UINT32_MAX,
2893 				 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2894 				 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2895 	test_err_hwpt_alloc_iopf(EOPNOTSUPP, dev_id, viommu_id, fault_id,
2896 				 IOMMU_HWPT_FAULT_ID_VALID | (1 << 31),
2897 				 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2898 				 sizeof(data));
2899 	test_cmd_hwpt_alloc_iopf(dev_id, viommu_id, fault_id,
2900 				 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2901 				 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2902 
2903 	/* Must allocate vdevice before attaching to a nested hwpt */
2904 	test_err_mock_domain_replace(ENOENT, self->stdev_id, iopf_hwpt_id);
2905 	test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2906 	test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2907 	EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2908 	test_cmd_trigger_iopf(dev_id, fault_fd);
2909 
2910 	test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2911 	test_ioctl_destroy(iopf_hwpt_id);
2912 	close(fault_fd);
2913 	test_ioctl_destroy(fault_id);
2914 }
2915 
TEST_F(iommufd_viommu,viommu_alloc_with_data)2916 TEST_F(iommufd_viommu, viommu_alloc_with_data)
2917 {
2918 	struct iommu_viommu_selftest data = {
2919 		.in_data = 0xbeef,
2920 	};
2921 	uint32_t *test;
2922 
2923 	if (!self->device_id)
2924 		SKIP(return, "Skipping test for variant no_viommu");
2925 
2926 	test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2927 			      IOMMU_VIOMMU_TYPE_SELFTEST, &data, sizeof(data),
2928 			      &self->viommu_id);
2929 	ASSERT_EQ(data.out_data, data.in_data);
2930 
2931 	/* Negative mmap tests -- offset and length cannot be changed */
2932 	test_err_mmap(ENXIO, data.out_mmap_length,
2933 		      data.out_mmap_offset + PAGE_SIZE);
2934 	test_err_mmap(ENXIO, data.out_mmap_length,
2935 		      data.out_mmap_offset + PAGE_SIZE * 2);
2936 	test_err_mmap(ENXIO, data.out_mmap_length / 2, data.out_mmap_offset);
2937 	test_err_mmap(ENXIO, data.out_mmap_length * 2, data.out_mmap_offset);
2938 
2939 	/* Now do a correct mmap for a loopback test */
2940 	test = mmap(NULL, data.out_mmap_length, PROT_READ | PROT_WRITE,
2941 		    MAP_SHARED, self->fd, data.out_mmap_offset);
2942 	ASSERT_NE(MAP_FAILED, test);
2943 	ASSERT_EQ(data.in_data, *test);
2944 
2945 	/* The owner of the mmap region should be blocked */
2946 	EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, self->viommu_id));
2947 	munmap(test, data.out_mmap_length);
2948 }
2949 
TEST_F(iommufd_viommu,vdevice_alloc)2950 TEST_F(iommufd_viommu, vdevice_alloc)
2951 {
2952 	uint32_t viommu_id = self->viommu_id;
2953 	uint32_t dev_id = self->device_id;
2954 	uint32_t vdev_id = 0;
2955 	uint32_t veventq_id;
2956 	uint32_t veventq_fd;
2957 	int prev_seq = -1;
2958 
2959 	if (dev_id) {
2960 		/* Must allocate vdevice before attaching to a nested hwpt */
2961 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
2962 					     self->nested_hwpt_id);
2963 
2964 		/* Allocate a vEVENTQ with veventq_depth=2 */
2965 		test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
2966 				       &veventq_id, &veventq_fd);
2967 		test_err_veventq_alloc(EEXIST, viommu_id,
2968 				       IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
2969 		/* Set vdev_id to 0x99, unset it, and set to 0x88 */
2970 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2971 		test_cmd_mock_domain_replace(self->stdev_id,
2972 					     self->nested_hwpt_id);
2973 		test_cmd_trigger_vevents(dev_id, 1);
2974 		test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
2975 		test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
2976 				       &vdev_id);
2977 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2978 		test_ioctl_destroy(vdev_id);
2979 
2980 		/* Try again with 0x88 */
2981 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
2982 		test_cmd_mock_domain_replace(self->stdev_id,
2983 					     self->nested_hwpt_id);
2984 		/* Trigger an overflow with three events */
2985 		test_cmd_trigger_vevents(dev_id, 3);
2986 		test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
2987 				      &prev_seq);
2988 		/* Overflow must be gone after the previous reads */
2989 		test_cmd_trigger_vevents(dev_id, 1);
2990 		test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
2991 		close(veventq_fd);
2992 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2993 		test_ioctl_destroy(vdev_id);
2994 		test_ioctl_destroy(veventq_id);
2995 	} else {
2996 		test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
2997 	}
2998 }
2999 
TEST_F(iommufd_viommu,vdevice_cache)3000 TEST_F(iommufd_viommu, vdevice_cache)
3001 {
3002 	struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
3003 	uint32_t viommu_id = self->viommu_id;
3004 	uint32_t dev_id = self->device_id;
3005 	uint32_t vdev_id = 0;
3006 	uint32_t num_inv;
3007 
3008 	if (!dev_id)
3009 		SKIP(return, "Skipping test for variant no_viommu");
3010 
3011 	test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3012 
3013 	test_cmd_dev_check_cache_all(dev_id, IOMMU_TEST_DEV_CACHE_DEFAULT);
3014 
3015 	/* Check data_type by passing zero-length array */
3016 	num_inv = 0;
3017 	test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3018 				   &num_inv);
3019 	assert(!num_inv);
3020 
3021 	/* Negative test: Invalid data_type */
3022 	num_inv = 1;
3023 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3024 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
3025 				   sizeof(*inv_reqs), &num_inv);
3026 	assert(!num_inv);
3027 
3028 	/* Negative test: structure size sanity */
3029 	num_inv = 1;
3030 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3031 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3032 				   sizeof(*inv_reqs) + 1, &num_inv);
3033 	assert(!num_inv);
3034 
3035 	num_inv = 1;
3036 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3037 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 1,
3038 				   &num_inv);
3039 	assert(!num_inv);
3040 
3041 	/* Negative test: invalid flag is passed */
3042 	num_inv = 1;
3043 	inv_reqs[0].flags = 0xffffffff;
3044 	inv_reqs[0].vdev_id = 0x99;
3045 	test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3046 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3047 				   sizeof(*inv_reqs), &num_inv);
3048 	assert(!num_inv);
3049 
3050 	/* Negative test: invalid data_uptr when array is not empty */
3051 	num_inv = 1;
3052 	inv_reqs[0].flags = 0;
3053 	inv_reqs[0].vdev_id = 0x99;
3054 	test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
3055 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3056 				   sizeof(*inv_reqs), &num_inv);
3057 	assert(!num_inv);
3058 
3059 	/* Negative test: invalid entry_len when array is not empty */
3060 	num_inv = 1;
3061 	inv_reqs[0].flags = 0;
3062 	inv_reqs[0].vdev_id = 0x99;
3063 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3064 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 0,
3065 				   &num_inv);
3066 	assert(!num_inv);
3067 
3068 	/* Negative test: invalid cache_id */
3069 	num_inv = 1;
3070 	inv_reqs[0].flags = 0;
3071 	inv_reqs[0].vdev_id = 0x99;
3072 	inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3073 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3074 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3075 				   sizeof(*inv_reqs), &num_inv);
3076 	assert(!num_inv);
3077 
3078 	/* Negative test: invalid vdev_id */
3079 	num_inv = 1;
3080 	inv_reqs[0].flags = 0;
3081 	inv_reqs[0].vdev_id = 0x9;
3082 	inv_reqs[0].cache_id = 0;
3083 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3084 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3085 				   sizeof(*inv_reqs), &num_inv);
3086 	assert(!num_inv);
3087 
3088 	/*
3089 	 * Invalidate the 1st cache entry but fail the 2nd request
3090 	 * due to invalid flags configuration in the 2nd request.
3091 	 */
3092 	num_inv = 2;
3093 	inv_reqs[0].flags = 0;
3094 	inv_reqs[0].vdev_id = 0x99;
3095 	inv_reqs[0].cache_id = 0;
3096 	inv_reqs[1].flags = 0xffffffff;
3097 	inv_reqs[1].vdev_id = 0x99;
3098 	inv_reqs[1].cache_id = 1;
3099 	test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3100 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3101 				   sizeof(*inv_reqs), &num_inv);
3102 	assert(num_inv == 1);
3103 	test_cmd_dev_check_cache(dev_id, 0, 0);
3104 	test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3105 	test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3106 	test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3107 
3108 	/*
3109 	 * Invalidate the 1st cache entry but fail the 2nd request
3110 	 * due to invalid cache_id configuration in the 2nd request.
3111 	 */
3112 	num_inv = 2;
3113 	inv_reqs[0].flags = 0;
3114 	inv_reqs[0].vdev_id = 0x99;
3115 	inv_reqs[0].cache_id = 0;
3116 	inv_reqs[1].flags = 0;
3117 	inv_reqs[1].vdev_id = 0x99;
3118 	inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3119 	test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3120 				   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3121 				   sizeof(*inv_reqs), &num_inv);
3122 	assert(num_inv == 1);
3123 	test_cmd_dev_check_cache(dev_id, 0, 0);
3124 	test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3125 	test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3126 	test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3127 
3128 	/* Invalidate the 2nd cache entry and verify */
3129 	num_inv = 1;
3130 	inv_reqs[0].flags = 0;
3131 	inv_reqs[0].vdev_id = 0x99;
3132 	inv_reqs[0].cache_id = 1;
3133 	test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3134 				   &num_inv);
3135 	assert(num_inv == 1);
3136 	test_cmd_dev_check_cache(dev_id, 0, 0);
3137 	test_cmd_dev_check_cache(dev_id, 1, 0);
3138 	test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3139 	test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3140 
3141 	/* Invalidate the 3rd and 4th cache entries and verify */
3142 	num_inv = 2;
3143 	inv_reqs[0].flags = 0;
3144 	inv_reqs[0].vdev_id = 0x99;
3145 	inv_reqs[0].cache_id = 2;
3146 	inv_reqs[1].flags = 0;
3147 	inv_reqs[1].vdev_id = 0x99;
3148 	inv_reqs[1].cache_id = 3;
3149 	test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3150 				   &num_inv);
3151 	assert(num_inv == 2);
3152 	test_cmd_dev_check_cache_all(dev_id, 0);
3153 
3154 	/* Invalidate all cache entries for nested_dev_id[1] and verify */
3155 	num_inv = 1;
3156 	inv_reqs[0].vdev_id = 0x99;
3157 	inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
3158 	test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3159 				   &num_inv);
3160 	assert(num_inv == 1);
3161 	test_cmd_dev_check_cache_all(dev_id, 0);
3162 	test_ioctl_destroy(vdev_id);
3163 }
3164 
TEST_F(iommufd_viommu,hw_queue)3165 TEST_F(iommufd_viommu, hw_queue)
3166 {
3167 	__u64 iova = MOCK_APERTURE_START, iova2;
3168 	uint32_t viommu_id = self->viommu_id;
3169 	uint32_t hw_queue_id[2];
3170 
3171 	if (!viommu_id)
3172 		SKIP(return, "Skipping test for variant no_viommu");
3173 
3174 	/* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
3175 	test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
3176 				IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
3177 				&hw_queue_id[0]);
3178 	/* Fail queue addr and length */
3179 	test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3180 				0, iova, 0, &hw_queue_id[0]);
3181 	test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
3182 				IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
3183 				PAGE_SIZE, &hw_queue_id[0]);
3184 	/* Fail missing iova */
3185 	test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3186 				0, iova, PAGE_SIZE, &hw_queue_id[0]);
3187 
3188 	/* Map iova */
3189 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
3190 	test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
3191 
3192 	/* Fail index=1 and =MAX; must start from index=0 */
3193 	test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3194 				iova, PAGE_SIZE, &hw_queue_id[0]);
3195 	test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3196 				IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
3197 				&hw_queue_id[0]);
3198 
3199 	/* Allocate index=0, declare ownership of the iova */
3200 	test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
3201 				iova, PAGE_SIZE, &hw_queue_id[0]);
3202 	/* Fail duplicated index */
3203 	test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3204 				0, iova, PAGE_SIZE, &hw_queue_id[0]);
3205 	/* Fail unmap, due to iova ownership */
3206 	test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
3207 	/* The 2nd page is not pinned, so it can be unmmap */
3208 	test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
3209 
3210 	/* Allocate index=1, with an unaligned case */
3211 	test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3212 				iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
3213 				&hw_queue_id[1]);
3214 	/* Fail to destroy, due to dependency */
3215 	EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
3216 
3217 	/* Destroy in descending order */
3218 	test_ioctl_destroy(hw_queue_id[1]);
3219 	test_ioctl_destroy(hw_queue_id[0]);
3220 	/* Now it can unmap the first page */
3221 	test_ioctl_ioas_unmap(iova, PAGE_SIZE);
3222 }
3223 
TEST_F(iommufd_viommu,vdevice_tombstone)3224 TEST_F(iommufd_viommu, vdevice_tombstone)
3225 {
3226 	uint32_t viommu_id = self->viommu_id;
3227 	uint32_t dev_id = self->device_id;
3228 	uint32_t vdev_id = 0;
3229 
3230 	if (!dev_id)
3231 		SKIP(return, "Skipping test for variant no_viommu");
3232 
3233 	test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3234 	test_ioctl_destroy(self->stdev_id);
3235 	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, vdev_id));
3236 }
3237 
FIXTURE(iommufd_device_pasid)3238 FIXTURE(iommufd_device_pasid)
3239 {
3240 	int fd;
3241 	uint32_t ioas_id;
3242 	uint32_t hwpt_id;
3243 	uint32_t stdev_id;
3244 	uint32_t device_id;
3245 	uint32_t no_pasid_stdev_id;
3246 	uint32_t no_pasid_device_id;
3247 };
3248 
FIXTURE_VARIANT(iommufd_device_pasid)3249 FIXTURE_VARIANT(iommufd_device_pasid)
3250 {
3251 	bool pasid_capable;
3252 };
3253 
FIXTURE_SETUP(iommufd_device_pasid)3254 FIXTURE_SETUP(iommufd_device_pasid)
3255 {
3256 	self->fd = open("/dev/iommu", O_RDWR);
3257 	ASSERT_NE(-1, self->fd);
3258 	test_ioctl_ioas_alloc(&self->ioas_id);
3259 
3260 	test_cmd_mock_domain_flags(self->ioas_id,
3261 				   MOCK_FLAGS_DEVICE_PASID,
3262 				   &self->stdev_id, &self->hwpt_id,
3263 				   &self->device_id);
3264 	if (!variant->pasid_capable)
3265 		test_cmd_mock_domain_flags(self->ioas_id, 0,
3266 					   &self->no_pasid_stdev_id, NULL,
3267 					   &self->no_pasid_device_id);
3268 }
3269 
FIXTURE_TEARDOWN(iommufd_device_pasid)3270 FIXTURE_TEARDOWN(iommufd_device_pasid)
3271 {
3272 	teardown_iommufd(self->fd, _metadata);
3273 }
3274 
FIXTURE_VARIANT_ADD(iommufd_device_pasid,no_pasid)3275 FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
3276 {
3277 	.pasid_capable = false,
3278 };
3279 
FIXTURE_VARIANT_ADD(iommufd_device_pasid,has_pasid)3280 FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
3281 {
3282 	.pasid_capable = true,
3283 };
3284 
TEST_F(iommufd_device_pasid,pasid_attach)3285 TEST_F(iommufd_device_pasid, pasid_attach)
3286 {
3287 	struct iommu_hwpt_selftest data = {
3288 		.iotlb =  IOMMU_TEST_IOTLB_DEFAULT,
3289 	};
3290 	uint32_t nested_hwpt_id[3] = {};
3291 	uint32_t parent_hwpt_id = 0;
3292 	uint32_t fault_id, fault_fd;
3293 	uint32_t s2_hwpt_id = 0;
3294 	uint32_t iopf_hwpt_id;
3295 	uint32_t pasid = 100;
3296 	uint32_t viommu_id;
3297 
3298 	/*
3299 	 * Negative, detach pasid without attaching, this is not expected.
3300 	 * But it should not result in failure anyway.
3301 	 */
3302 	test_cmd_pasid_detach(pasid);
3303 
3304 	/* Allocate two nested hwpts sharing one common parent hwpt */
3305 	test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3306 			    IOMMU_HWPT_ALLOC_NEST_PARENT,
3307 			    &parent_hwpt_id);
3308 	test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3309 				   IOMMU_HWPT_ALLOC_PASID,
3310 				   &nested_hwpt_id[0],
3311 				   IOMMU_HWPT_DATA_SELFTEST,
3312 				   &data, sizeof(data));
3313 	test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3314 				   IOMMU_HWPT_ALLOC_PASID,
3315 				   &nested_hwpt_id[1],
3316 				   IOMMU_HWPT_DATA_SELFTEST,
3317 				   &data, sizeof(data));
3318 
3319 	/* Fault related preparation */
3320 	test_ioctl_fault_alloc(&fault_id, &fault_fd);
3321 	test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
3322 				 IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
3323 				 &iopf_hwpt_id,
3324 				 IOMMU_HWPT_DATA_SELFTEST, &data,
3325 				 sizeof(data));
3326 
3327 	/* Allocate a regular nested hwpt based on viommu */
3328 	test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
3329 			      IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0, &viommu_id);
3330 	test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
3331 				   IOMMU_HWPT_ALLOC_PASID,
3332 				   &nested_hwpt_id[2],
3333 				   IOMMU_HWPT_DATA_SELFTEST, &data,
3334 				   sizeof(data));
3335 
3336 	test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3337 			    IOMMU_HWPT_ALLOC_PASID,
3338 			    &s2_hwpt_id);
3339 
3340 	/* Attach RID to non-pasid compat domain, */
3341 	test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
3342 	/* then attach to pasid should fail */
3343 	test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
3344 
3345 	/* Attach RID to pasid compat domain, */
3346 	test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
3347 	/* then attach to pasid should succeed, */
3348 	test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3349 	/* but attach RID to non-pasid compat domain should fail now. */
3350 	test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
3351 	/*
3352 	 * Detach hwpt from pasid 100, and check if the pasid 100
3353 	 * has null domain.
3354 	 */
3355 	test_cmd_pasid_detach(pasid);
3356 	ASSERT_EQ(0,
3357 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3358 					    pasid, 0));
3359 	/* RID is attached to pasid-comapt domain, pasid path is not used */
3360 
3361 	if (!variant->pasid_capable) {
3362 		/*
3363 		 * PASID-compatible domain can be used by non-PASID-capable
3364 		 * device.
3365 		 */
3366 		test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
3367 		test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
3368 		/*
3369 		 * Attach hwpt to pasid 100 of non-PASID-capable device,
3370 		 * should fail, no matter domain is pasid-comapt or not.
3371 		 */
3372 		EXPECT_ERRNO(EINVAL,
3373 			     _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3374 						    pasid, parent_hwpt_id));
3375 		EXPECT_ERRNO(EINVAL,
3376 			     _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3377 						    pasid, s2_hwpt_id));
3378 	}
3379 
3380 	/*
3381 	 * Attach non pasid compat hwpt to pasid-capable device, should
3382 	 * fail, and have null domain.
3383 	 */
3384 	test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
3385 	ASSERT_EQ(0,
3386 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3387 					    pasid, 0));
3388 
3389 	/*
3390 	 * Attach ioas to pasid 100, should fail, domain should
3391 	 * be null.
3392 	 */
3393 	test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
3394 	ASSERT_EQ(0,
3395 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3396 					    pasid, 0));
3397 
3398 	/*
3399 	 * Attach the s2_hwpt to pasid 100, should succeed, domain should
3400 	 * be valid.
3401 	 */
3402 	test_cmd_pasid_attach(pasid, s2_hwpt_id);
3403 	ASSERT_EQ(0,
3404 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3405 					    pasid, s2_hwpt_id));
3406 
3407 	/*
3408 	 * Try attach pasid 100 with another hwpt, should FAIL
3409 	 * as attach does not allow overwrite, use REPLACE instead.
3410 	 */
3411 	test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3412 
3413 	/*
3414 	 * Detach hwpt from pasid 100 for next test, should succeed,
3415 	 * and have null domain.
3416 	 */
3417 	test_cmd_pasid_detach(pasid);
3418 	ASSERT_EQ(0,
3419 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3420 					    pasid, 0));
3421 
3422 	/*
3423 	 * Attach nested hwpt to pasid 100, should succeed, domain
3424 	 * should be valid.
3425 	 */
3426 	test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3427 	ASSERT_EQ(0,
3428 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3429 					    pasid, nested_hwpt_id[0]));
3430 
3431 	/* Attach to pasid 100 which has been attached, should fail. */
3432 	test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3433 
3434 	/* cleanup pasid 100 */
3435 	test_cmd_pasid_detach(pasid);
3436 
3437 	/* Replace tests */
3438 
3439 	pasid = 200;
3440 	/*
3441 	 * Replace pasid 200 without attaching it, should fail
3442 	 * with -EINVAL.
3443 	 */
3444 	test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
3445 
3446 	/*
3447 	 * Attach the s2 hwpt to pasid 200, should succeed, domain should
3448 	 * be valid.
3449 	 */
3450 	test_cmd_pasid_attach(pasid, s2_hwpt_id);
3451 	ASSERT_EQ(0,
3452 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3453 					    pasid, s2_hwpt_id));
3454 
3455 	/*
3456 	 * Replace pasid 200 with self->ioas_id, should fail
3457 	 * and domain should be the prior s2 hwpt.
3458 	 */
3459 	test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
3460 	ASSERT_EQ(0,
3461 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3462 					    pasid, s2_hwpt_id));
3463 
3464 	/*
3465 	 * Replace a nested hwpt for pasid 200, should succeed,
3466 	 * and have valid domain.
3467 	 */
3468 	test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
3469 	ASSERT_EQ(0,
3470 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3471 					    pasid, nested_hwpt_id[0]));
3472 
3473 	/*
3474 	 * Replace with another nested hwpt for pasid 200, should
3475 	 * succeed, and have valid domain.
3476 	 */
3477 	test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
3478 	ASSERT_EQ(0,
3479 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3480 					    pasid, nested_hwpt_id[1]));
3481 
3482 	/* cleanup pasid 200 */
3483 	test_cmd_pasid_detach(pasid);
3484 
3485 	/* Negative Tests for pasid replace, use pasid 1024 */
3486 
3487 	/*
3488 	 * Attach the s2 hwpt to pasid 1024, should succeed, domain should
3489 	 * be valid.
3490 	 */
3491 	pasid = 1024;
3492 	test_cmd_pasid_attach(pasid, s2_hwpt_id);
3493 	ASSERT_EQ(0,
3494 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3495 					    pasid, s2_hwpt_id));
3496 
3497 	/*
3498 	 * Replace pasid 1024 with nested_hwpt_id[0], should fail,
3499 	 * but have the old valid domain. This is a designed
3500 	 * negative case. Normally, this shall succeed.
3501 	 */
3502 	test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
3503 	ASSERT_EQ(0,
3504 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3505 					    pasid, s2_hwpt_id));
3506 
3507 	/* cleanup pasid 1024 */
3508 	test_cmd_pasid_detach(pasid);
3509 
3510 	/* Attach to iopf-capable hwpt */
3511 
3512 	/*
3513 	 * Attach an iopf hwpt to pasid 2048, should succeed, domain should
3514 	 * be valid.
3515 	 */
3516 	pasid = 2048;
3517 	test_cmd_pasid_attach(pasid, iopf_hwpt_id);
3518 	ASSERT_EQ(0,
3519 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3520 					    pasid, iopf_hwpt_id));
3521 
3522 	test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
3523 
3524 	/*
3525 	 * Replace with s2_hwpt_id for pasid 2048, should
3526 	 * succeed, and have valid domain.
3527 	 */
3528 	test_cmd_pasid_replace(pasid, s2_hwpt_id);
3529 	ASSERT_EQ(0,
3530 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3531 					    pasid, s2_hwpt_id));
3532 
3533 	/* cleanup pasid 2048 */
3534 	test_cmd_pasid_detach(pasid);
3535 
3536 	test_ioctl_destroy(iopf_hwpt_id);
3537 	close(fault_fd);
3538 	test_ioctl_destroy(fault_id);
3539 
3540 	/* Detach the s2_hwpt_id from RID */
3541 	test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3542 }
3543 
3544 TEST_HARNESS_MAIN
3545