xref: /linux/tools/testing/selftests/iommu/iommufd.c (revision 3c894cb29bbf4e36c5f2497cf8ea6fb09e157920)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8 
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11 
12 #include "iommufd_utils.h"
13 
14 static unsigned long HUGEPAGE_SIZE;
15 
16 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
17 #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
18 
get_huge_page_size(void)19 static unsigned long get_huge_page_size(void)
20 {
21 	char buf[80];
22 	int ret;
23 	int fd;
24 
25 	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
26 		  O_RDONLY);
27 	if (fd < 0)
28 		return 2 * 1024 * 1024;
29 
30 	ret = read(fd, buf, sizeof(buf));
31 	close(fd);
32 	if (ret <= 0 || ret == sizeof(buf))
33 		return 2 * 1024 * 1024;
34 	buf[ret] = 0;
35 	return strtoul(buf, NULL, 10);
36 }
37 
setup_sizes(void)38 static __attribute__((constructor)) void setup_sizes(void)
39 {
40 	void *vrc;
41 	int rc;
42 
43 	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
44 	HUGEPAGE_SIZE = get_huge_page_size();
45 
46 	BUFFER_SIZE = PAGE_SIZE * 16;
47 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
48 	assert(!rc);
49 	assert(buffer);
50 	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
51 	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
52 		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
53 	assert(vrc == buffer);
54 
55 	mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
56 				&mfd);
57 	assert(mfd_buffer != MAP_FAILED);
58 	assert(mfd > 0);
59 }
60 
FIXTURE(iommufd)61 FIXTURE(iommufd)
62 {
63 	int fd;
64 };
65 
FIXTURE_SETUP(iommufd)66 FIXTURE_SETUP(iommufd)
67 {
68 	self->fd = open("/dev/iommu", O_RDWR);
69 	ASSERT_NE(-1, self->fd);
70 }
71 
FIXTURE_TEARDOWN(iommufd)72 FIXTURE_TEARDOWN(iommufd)
73 {
74 	teardown_iommufd(self->fd, _metadata);
75 }
76 
TEST_F(iommufd,simple_close)77 TEST_F(iommufd, simple_close)
78 {
79 }
80 
TEST_F(iommufd,cmd_fail)81 TEST_F(iommufd, cmd_fail)
82 {
83 	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
84 
85 	/* object id is invalid */
86 	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
87 	/* Bad pointer */
88 	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
89 	/* Unknown ioctl */
90 	EXPECT_ERRNO(ENOTTY,
91 		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
92 			   &cmd));
93 }
94 
TEST_F(iommufd,cmd_length)95 TEST_F(iommufd, cmd_length)
96 {
97 #define TEST_LENGTH(_struct, _ioctl, _last)                              \
98 	{                                                                \
99 		size_t min_size = offsetofend(struct _struct, _last);    \
100 		struct {                                                 \
101 			struct _struct cmd;                              \
102 			uint8_t extra;                                   \
103 		} cmd = { .cmd = { .size = min_size - 1 },               \
104 			  .extra = UINT8_MAX };                          \
105 		int old_errno;                                           \
106 		int rc;                                                  \
107 									 \
108 		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
109 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
110 		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
111 		cmd.cmd.size = sizeof(struct _struct);                   \
112 		rc = ioctl(self->fd, _ioctl, &cmd);                      \
113 		old_errno = errno;                                       \
114 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
115 		cmd.extra = 0;                                           \
116 		if (rc) {                                                \
117 			EXPECT_ERRNO(old_errno,                          \
118 				     ioctl(self->fd, _ioctl, &cmd));     \
119 		} else {                                                 \
120 			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
121 		}                                                        \
122 	}
123 
124 	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
125 	TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
126 	TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
127 	TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
128 	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
129 	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
130 		    out_iova_alignment);
131 	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
132 		    allowed_iovas);
133 	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
134 	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
135 	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
136 	TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
137 	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
138 	TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
139 	TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
140 	TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
141 	TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
142 		    __reserved);
143 #undef TEST_LENGTH
144 }
145 
TEST_F(iommufd,cmd_ex_fail)146 TEST_F(iommufd, cmd_ex_fail)
147 {
148 	struct {
149 		struct iommu_destroy cmd;
150 		__u64 future;
151 	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
152 
153 	/* object id is invalid and command is longer */
154 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
155 	/* future area is non-zero */
156 	cmd.future = 1;
157 	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
158 	/* Original command "works" */
159 	cmd.cmd.size = sizeof(cmd.cmd);
160 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
161 	/* Short command fails */
162 	cmd.cmd.size = sizeof(cmd.cmd) - 1;
163 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
164 }
165 
TEST_F(iommufd,global_options)166 TEST_F(iommufd, global_options)
167 {
168 	struct iommu_option cmd = {
169 		.size = sizeof(cmd),
170 		.option_id = IOMMU_OPTION_RLIMIT_MODE,
171 		.op = IOMMU_OPTION_OP_GET,
172 		.val64 = 1,
173 	};
174 
175 	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
176 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
177 	ASSERT_EQ(0, cmd.val64);
178 
179 	/* This requires root */
180 	cmd.op = IOMMU_OPTION_OP_SET;
181 	cmd.val64 = 1;
182 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
183 	cmd.val64 = 2;
184 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
185 
186 	cmd.op = IOMMU_OPTION_OP_GET;
187 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
188 	ASSERT_EQ(1, cmd.val64);
189 
190 	cmd.op = IOMMU_OPTION_OP_SET;
191 	cmd.val64 = 0;
192 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
193 
194 	cmd.op = IOMMU_OPTION_OP_GET;
195 	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
196 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
197 	cmd.op = IOMMU_OPTION_OP_SET;
198 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
199 }
200 
drop_cap_ipc_lock(struct __test_metadata * _metadata)201 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
202 {
203 	cap_t caps;
204 	cap_value_t cap_list[1] = { CAP_IPC_LOCK };
205 
206 	caps = cap_get_proc();
207 	ASSERT_NE(caps, NULL);
208 	ASSERT_NE(-1,
209 		  cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
210 	ASSERT_NE(-1, cap_set_proc(caps));
211 	cap_free(caps);
212 }
213 
get_proc_status_value(pid_t pid,const char * var)214 static long get_proc_status_value(pid_t pid, const char *var)
215 {
216 	FILE *fp;
217 	char buf[80], tag[80];
218 	long val = -1;
219 
220 	snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
221 	fp = fopen(buf, "r");
222 	if (!fp)
223 		return val;
224 
225 	while (fgets(buf, sizeof(buf), fp))
226 		if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
227 			break;
228 
229 	fclose(fp);
230 	return val;
231 }
232 
get_vm_pinned(pid_t pid)233 static long get_vm_pinned(pid_t pid)
234 {
235 	return get_proc_status_value(pid, "VmPin:");
236 }
237 
get_vm_locked(pid_t pid)238 static long get_vm_locked(pid_t pid)
239 {
240 	return get_proc_status_value(pid, "VmLck:");
241 }
242 
FIXTURE(change_process)243 FIXTURE(change_process)
244 {
245 	int fd;
246 	uint32_t ioas_id;
247 };
248 
FIXTURE_VARIANT(change_process)249 FIXTURE_VARIANT(change_process)
250 {
251 	int accounting;
252 };
253 
FIXTURE_SETUP(change_process)254 FIXTURE_SETUP(change_process)
255 {
256 	self->fd = open("/dev/iommu", O_RDWR);
257 	ASSERT_NE(-1, self->fd);
258 
259 	drop_cap_ipc_lock(_metadata);
260 	if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
261 		struct iommu_option set_limit_cmd = {
262 			.size = sizeof(set_limit_cmd),
263 			.option_id = IOMMU_OPTION_RLIMIT_MODE,
264 			.op = IOMMU_OPTION_OP_SET,
265 			.val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
266 		};
267 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
268 	}
269 
270 	test_ioctl_ioas_alloc(&self->ioas_id);
271 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
272 }
273 
FIXTURE_TEARDOWN(change_process)274 FIXTURE_TEARDOWN(change_process)
275 {
276 	teardown_iommufd(self->fd, _metadata);
277 }
278 
FIXTURE_VARIANT_ADD(change_process,account_none)279 FIXTURE_VARIANT_ADD(change_process, account_none)
280 {
281 	.accounting = IOPT_PAGES_ACCOUNT_NONE,
282 };
283 
FIXTURE_VARIANT_ADD(change_process,account_user)284 FIXTURE_VARIANT_ADD(change_process, account_user)
285 {
286 	.accounting = IOPT_PAGES_ACCOUNT_USER,
287 };
288 
FIXTURE_VARIANT_ADD(change_process,account_mm)289 FIXTURE_VARIANT_ADD(change_process, account_mm)
290 {
291 	.accounting = IOPT_PAGES_ACCOUNT_MM,
292 };
293 
TEST_F(change_process,basic)294 TEST_F(change_process, basic)
295 {
296 	pid_t parent = getpid();
297 	pid_t child;
298 	__u64 iova;
299 	struct iommu_ioas_change_process cmd = {
300 		.size = sizeof(cmd),
301 	};
302 
303 	/* Expect failure if non-file maps exist */
304 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
305 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
306 	test_ioctl_ioas_unmap(iova, PAGE_SIZE);
307 
308 	/* Change process works in current process. */
309 	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
310 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
311 
312 	/* Change process works in another process */
313 	child = fork();
314 	if (!child) {
315 		int nlock = PAGE_SIZE / 1024;
316 
317 		/* Parent accounts for locked memory before */
318 		ASSERT_EQ(nlock, get_vm_pinned(parent));
319 		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
320 			ASSERT_EQ(nlock, get_vm_locked(parent));
321 		ASSERT_EQ(0, get_vm_pinned(getpid()));
322 		ASSERT_EQ(0, get_vm_locked(getpid()));
323 
324 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
325 
326 		/* Child accounts for locked memory after */
327 		ASSERT_EQ(0, get_vm_pinned(parent));
328 		ASSERT_EQ(0, get_vm_locked(parent));
329 		ASSERT_EQ(nlock, get_vm_pinned(getpid()));
330 		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
331 			ASSERT_EQ(nlock, get_vm_locked(getpid()));
332 
333 		exit(0);
334 	}
335 	ASSERT_NE(-1, child);
336 	ASSERT_EQ(child, waitpid(child, NULL, 0));
337 }
338 
FIXTURE(iommufd_ioas)339 FIXTURE(iommufd_ioas)
340 {
341 	int fd;
342 	uint32_t ioas_id;
343 	uint32_t stdev_id;
344 	uint32_t hwpt_id;
345 	uint32_t device_id;
346 	uint64_t base_iova;
347 	uint32_t device_pasid_id;
348 };
349 
FIXTURE_VARIANT(iommufd_ioas)350 FIXTURE_VARIANT(iommufd_ioas)
351 {
352 	unsigned int mock_domains;
353 	unsigned int memory_limit;
354 	bool pasid_capable;
355 };
356 
FIXTURE_SETUP(iommufd_ioas)357 FIXTURE_SETUP(iommufd_ioas)
358 {
359 	unsigned int i;
360 
361 
362 	self->fd = open("/dev/iommu", O_RDWR);
363 	ASSERT_NE(-1, self->fd);
364 	test_ioctl_ioas_alloc(&self->ioas_id);
365 
366 	if (!variant->memory_limit) {
367 		test_ioctl_set_default_memory_limit();
368 	} else {
369 		test_ioctl_set_temp_memory_limit(variant->memory_limit);
370 	}
371 
372 	for (i = 0; i != variant->mock_domains; i++) {
373 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
374 				     &self->hwpt_id, &self->device_id);
375 		test_cmd_dev_check_cache_all(self->device_id,
376 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
377 		self->base_iova = MOCK_APERTURE_START;
378 	}
379 
380 	if (variant->pasid_capable)
381 		test_cmd_mock_domain_flags(self->ioas_id,
382 					   MOCK_FLAGS_DEVICE_PASID,
383 					   NULL, NULL,
384 					   &self->device_pasid_id);
385 }
386 
FIXTURE_TEARDOWN(iommufd_ioas)387 FIXTURE_TEARDOWN(iommufd_ioas)
388 {
389 	test_ioctl_set_default_memory_limit();
390 	teardown_iommufd(self->fd, _metadata);
391 }
392 
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)393 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
394 {
395 };
396 
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)397 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
398 {
399 	.mock_domains = 1,
400 	.pasid_capable = true,
401 };
402 
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)403 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
404 {
405 	.mock_domains = 2,
406 };
407 
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)408 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
409 {
410 	.mock_domains = 1,
411 	.memory_limit = 16,
412 };
413 
TEST_F(iommufd_ioas,ioas_auto_destroy)414 TEST_F(iommufd_ioas, ioas_auto_destroy)
415 {
416 }
417 
TEST_F(iommufd_ioas,ioas_destroy)418 TEST_F(iommufd_ioas, ioas_destroy)
419 {
420 	if (self->stdev_id) {
421 		/* IOAS cannot be freed while a device has a HWPT using it */
422 		EXPECT_ERRNO(EBUSY,
423 			     _test_ioctl_destroy(self->fd, self->ioas_id));
424 	} else {
425 		/* Can allocate and manually free an IOAS table */
426 		test_ioctl_destroy(self->ioas_id);
427 	}
428 }
429 
TEST_F(iommufd_ioas,alloc_hwpt_nested)430 TEST_F(iommufd_ioas, alloc_hwpt_nested)
431 {
432 	const uint32_t min_data_len =
433 		offsetofend(struct iommu_hwpt_selftest, iotlb);
434 	struct iommu_hwpt_selftest data = {
435 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
436 	};
437 	struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
438 	uint32_t nested_hwpt_id[2] = {};
439 	uint32_t num_inv;
440 	uint32_t parent_hwpt_id = 0;
441 	uint32_t parent_hwpt_id_not_work = 0;
442 	uint32_t test_hwpt_id = 0;
443 	uint32_t iopf_hwpt_id;
444 	uint32_t fault_id;
445 	uint32_t fault_fd;
446 
447 	if (self->device_id) {
448 		/* Negative tests */
449 		test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
450 				    &test_hwpt_id);
451 		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
452 				    &test_hwpt_id);
453 		test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
454 				    IOMMU_HWPT_ALLOC_NEST_PARENT |
455 						IOMMU_HWPT_FAULT_ID_VALID,
456 				    &test_hwpt_id);
457 
458 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
459 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
460 				    &parent_hwpt_id);
461 
462 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
463 				    &parent_hwpt_id_not_work);
464 
465 		/* Negative nested tests */
466 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
467 					   parent_hwpt_id, 0,
468 					   &nested_hwpt_id[0],
469 					   IOMMU_HWPT_DATA_NONE, &data,
470 					   sizeof(data));
471 		test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
472 					   parent_hwpt_id, 0,
473 					   &nested_hwpt_id[0],
474 					   IOMMU_HWPT_DATA_SELFTEST + 1, &data,
475 					   sizeof(data));
476 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
477 					   parent_hwpt_id, 0,
478 					   &nested_hwpt_id[0],
479 					   IOMMU_HWPT_DATA_SELFTEST, &data,
480 					   min_data_len - 1);
481 		test_err_hwpt_alloc_nested(EFAULT, self->device_id,
482 					   parent_hwpt_id, 0,
483 					   &nested_hwpt_id[0],
484 					   IOMMU_HWPT_DATA_SELFTEST, NULL,
485 					   sizeof(data));
486 		test_err_hwpt_alloc_nested(
487 			EOPNOTSUPP, self->device_id, parent_hwpt_id,
488 			IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
489 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
490 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
491 					   parent_hwpt_id_not_work, 0,
492 					   &nested_hwpt_id[0],
493 					   IOMMU_HWPT_DATA_SELFTEST, &data,
494 					   sizeof(data));
495 
496 		/* Allocate two nested hwpts sharing one common parent hwpt */
497 		test_ioctl_fault_alloc(&fault_id, &fault_fd);
498 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
499 					   &nested_hwpt_id[0],
500 					   IOMMU_HWPT_DATA_SELFTEST, &data,
501 					   sizeof(data));
502 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
503 					   &nested_hwpt_id[1],
504 					   IOMMU_HWPT_DATA_SELFTEST, &data,
505 					   sizeof(data));
506 		test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
507 					 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
508 					 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
509 					 &data, sizeof(data));
510 		test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
511 					 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
512 					 IOMMU_HWPT_DATA_SELFTEST, &data,
513 					 sizeof(data));
514 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
515 					      IOMMU_TEST_IOTLB_DEFAULT);
516 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
517 					      IOMMU_TEST_IOTLB_DEFAULT);
518 
519 		/* Negative test: a nested hwpt on top of a nested hwpt */
520 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
521 					   nested_hwpt_id[0], 0, &test_hwpt_id,
522 					   IOMMU_HWPT_DATA_SELFTEST, &data,
523 					   sizeof(data));
524 		/* Negative test: parent hwpt now cannot be freed */
525 		EXPECT_ERRNO(EBUSY,
526 			     _test_ioctl_destroy(self->fd, parent_hwpt_id));
527 
528 		/* hwpt_invalidate does not support a parent hwpt */
529 		num_inv = 1;
530 		test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
531 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
532 					 sizeof(*inv_reqs), &num_inv);
533 		assert(!num_inv);
534 
535 		/* Check data_type by passing zero-length array */
536 		num_inv = 0;
537 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
538 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
539 					 sizeof(*inv_reqs), &num_inv);
540 		assert(!num_inv);
541 
542 		/* Negative test: Invalid data_type */
543 		num_inv = 1;
544 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
545 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
546 					 sizeof(*inv_reqs), &num_inv);
547 		assert(!num_inv);
548 
549 		/* Negative test: structure size sanity */
550 		num_inv = 1;
551 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
552 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
553 					 sizeof(*inv_reqs) + 1, &num_inv);
554 		assert(!num_inv);
555 
556 		num_inv = 1;
557 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
558 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
559 					 1, &num_inv);
560 		assert(!num_inv);
561 
562 		/* Negative test: invalid flag is passed */
563 		num_inv = 1;
564 		inv_reqs[0].flags = 0xffffffff;
565 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
566 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
567 					 sizeof(*inv_reqs), &num_inv);
568 		assert(!num_inv);
569 
570 		/* Negative test: invalid data_uptr when array is not empty */
571 		num_inv = 1;
572 		inv_reqs[0].flags = 0;
573 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
574 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
575 					 sizeof(*inv_reqs), &num_inv);
576 		assert(!num_inv);
577 
578 		/* Negative test: invalid entry_len when array is not empty */
579 		num_inv = 1;
580 		inv_reqs[0].flags = 0;
581 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
582 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
583 					 0, &num_inv);
584 		assert(!num_inv);
585 
586 		/* Negative test: invalid iotlb_id */
587 		num_inv = 1;
588 		inv_reqs[0].flags = 0;
589 		inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
590 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
591 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
592 					 sizeof(*inv_reqs), &num_inv);
593 		assert(!num_inv);
594 
595 		/*
596 		 * Invalidate the 1st iotlb entry but fail the 2nd request
597 		 * due to invalid flags configuration in the 2nd request.
598 		 */
599 		num_inv = 2;
600 		inv_reqs[0].flags = 0;
601 		inv_reqs[0].iotlb_id = 0;
602 		inv_reqs[1].flags = 0xffffffff;
603 		inv_reqs[1].iotlb_id = 1;
604 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
605 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
606 					 sizeof(*inv_reqs), &num_inv);
607 		assert(num_inv == 1);
608 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
609 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
610 					  IOMMU_TEST_IOTLB_DEFAULT);
611 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
612 					  IOMMU_TEST_IOTLB_DEFAULT);
613 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
614 					  IOMMU_TEST_IOTLB_DEFAULT);
615 
616 		/*
617 		 * Invalidate the 1st iotlb entry but fail the 2nd request
618 		 * due to invalid iotlb_id configuration in the 2nd request.
619 		 */
620 		num_inv = 2;
621 		inv_reqs[0].flags = 0;
622 		inv_reqs[0].iotlb_id = 0;
623 		inv_reqs[1].flags = 0;
624 		inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
625 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
626 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
627 					 sizeof(*inv_reqs), &num_inv);
628 		assert(num_inv == 1);
629 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
630 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
631 					  IOMMU_TEST_IOTLB_DEFAULT);
632 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
633 					  IOMMU_TEST_IOTLB_DEFAULT);
634 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
635 					  IOMMU_TEST_IOTLB_DEFAULT);
636 
637 		/* Invalidate the 2nd iotlb entry and verify */
638 		num_inv = 1;
639 		inv_reqs[0].flags = 0;
640 		inv_reqs[0].iotlb_id = 1;
641 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
642 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
643 					 sizeof(*inv_reqs), &num_inv);
644 		assert(num_inv == 1);
645 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
646 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
647 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
648 					  IOMMU_TEST_IOTLB_DEFAULT);
649 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
650 					  IOMMU_TEST_IOTLB_DEFAULT);
651 
652 		/* Invalidate the 3rd and 4th iotlb entries and verify */
653 		num_inv = 2;
654 		inv_reqs[0].flags = 0;
655 		inv_reqs[0].iotlb_id = 2;
656 		inv_reqs[1].flags = 0;
657 		inv_reqs[1].iotlb_id = 3;
658 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
659 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
660 					 sizeof(*inv_reqs), &num_inv);
661 		assert(num_inv == 2);
662 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
663 
664 		/* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
665 		num_inv = 1;
666 		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
667 		test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
668 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
669 					 sizeof(*inv_reqs), &num_inv);
670 		assert(num_inv == 1);
671 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
672 
673 		/* Attach device to nested_hwpt_id[0] that then will be busy */
674 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
675 		EXPECT_ERRNO(EBUSY,
676 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
677 
678 		/* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
679 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
680 		EXPECT_ERRNO(EBUSY,
681 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
682 		test_ioctl_destroy(nested_hwpt_id[0]);
683 
684 		/* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
685 		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
686 		EXPECT_ERRNO(EBUSY,
687 			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
688 		/* Trigger an IOPF on the device */
689 		test_cmd_trigger_iopf(self->device_id, fault_fd);
690 
691 		/* Detach from nested_hwpt_id[1] and destroy it */
692 		test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
693 		test_ioctl_destroy(nested_hwpt_id[1]);
694 		test_ioctl_destroy(iopf_hwpt_id);
695 
696 		/* Detach from the parent hw_pagetable and destroy it */
697 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
698 		test_ioctl_destroy(parent_hwpt_id);
699 		test_ioctl_destroy(parent_hwpt_id_not_work);
700 		close(fault_fd);
701 		test_ioctl_destroy(fault_id);
702 	} else {
703 		test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
704 				    &parent_hwpt_id);
705 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
706 					   parent_hwpt_id, 0,
707 					   &nested_hwpt_id[0],
708 					   IOMMU_HWPT_DATA_SELFTEST, &data,
709 					   sizeof(data));
710 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
711 					   parent_hwpt_id, 0,
712 					   &nested_hwpt_id[1],
713 					   IOMMU_HWPT_DATA_SELFTEST, &data,
714 					   sizeof(data));
715 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
716 					     nested_hwpt_id[0]);
717 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
718 					     nested_hwpt_id[1]);
719 	}
720 }
721 
TEST_F(iommufd_ioas,hwpt_attach)722 TEST_F(iommufd_ioas, hwpt_attach)
723 {
724 	/* Create a device attached directly to a hwpt */
725 	if (self->stdev_id) {
726 		test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
727 	} else {
728 		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
729 	}
730 }
731 
TEST_F(iommufd_ioas,ioas_area_destroy)732 TEST_F(iommufd_ioas, ioas_area_destroy)
733 {
734 	/* Adding an area does not change ability to destroy */
735 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
736 	if (self->stdev_id)
737 		EXPECT_ERRNO(EBUSY,
738 			     _test_ioctl_destroy(self->fd, self->ioas_id));
739 	else
740 		test_ioctl_destroy(self->ioas_id);
741 }
742 
TEST_F(iommufd_ioas,ioas_area_auto_destroy)743 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
744 {
745 	int i;
746 
747 	/* Can allocate and automatically free an IOAS table with many areas */
748 	for (i = 0; i != 10; i++) {
749 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
750 					  self->base_iova + i * PAGE_SIZE);
751 	}
752 }
753 
TEST_F(iommufd_ioas,get_hw_info)754 TEST_F(iommufd_ioas, get_hw_info)
755 {
756 	struct iommu_test_hw_info buffer_exact;
757 	struct iommu_test_hw_info_buffer_larger {
758 		struct iommu_test_hw_info info;
759 		uint64_t trailing_bytes;
760 	} buffer_larger;
761 	struct iommu_test_hw_info_buffer_smaller {
762 		__u32 flags;
763 	} buffer_smaller;
764 
765 	if (self->device_id) {
766 		uint8_t max_pasid = 0;
767 
768 		/* Provide a zero-size user_buffer */
769 		test_cmd_get_hw_info(self->device_id, NULL, 0);
770 		/* Provide a user_buffer with exact size */
771 		test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
772 		/*
773 		 * Provide a user_buffer with size larger than the exact size to check if
774 		 * kernel zero the trailing bytes.
775 		 */
776 		test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
777 		/*
778 		 * Provide a user_buffer with size smaller than the exact size to check if
779 		 * the fields within the size range still gets updated.
780 		 */
781 		test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
782 		test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
783 		ASSERT_EQ(0, max_pasid);
784 		if (variant->pasid_capable) {
785 			test_cmd_get_hw_info_pasid(self->device_pasid_id,
786 						   &max_pasid);
787 			ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
788 		}
789 	} else {
790 		test_err_get_hw_info(ENOENT, self->device_id,
791 				     &buffer_exact, sizeof(buffer_exact));
792 		test_err_get_hw_info(ENOENT, self->device_id,
793 				     &buffer_larger, sizeof(buffer_larger));
794 	}
795 }
796 
TEST_F(iommufd_ioas,area)797 TEST_F(iommufd_ioas, area)
798 {
799 	int i;
800 
801 	/* Unmap fails if nothing is mapped */
802 	for (i = 0; i != 10; i++)
803 		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
804 
805 	/* Unmap works */
806 	for (i = 0; i != 10; i++)
807 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
808 					  self->base_iova + i * PAGE_SIZE);
809 	for (i = 0; i != 10; i++)
810 		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
811 				      PAGE_SIZE);
812 
813 	/* Split fails */
814 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
815 				  self->base_iova + 16 * PAGE_SIZE);
816 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
817 				  PAGE_SIZE);
818 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
819 				  PAGE_SIZE);
820 
821 	/* Over map fails */
822 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
823 				      self->base_iova + 16 * PAGE_SIZE);
824 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
825 				      self->base_iova + 16 * PAGE_SIZE);
826 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
827 				      self->base_iova + 17 * PAGE_SIZE);
828 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
829 				      self->base_iova + 15 * PAGE_SIZE);
830 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
831 				      self->base_iova + 15 * PAGE_SIZE);
832 
833 	/* unmap all works */
834 	test_ioctl_ioas_unmap(0, UINT64_MAX);
835 
836 	/* Unmap all succeeds on an empty IOAS */
837 	test_ioctl_ioas_unmap(0, UINT64_MAX);
838 }
839 
TEST_F(iommufd_ioas,unmap_fully_contained_areas)840 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
841 {
842 	uint64_t unmap_len;
843 	int i;
844 
845 	/* Give no_domain some space to rewind base_iova */
846 	self->base_iova += 4 * PAGE_SIZE;
847 
848 	for (i = 0; i != 4; i++)
849 		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
850 					  self->base_iova + i * 16 * PAGE_SIZE);
851 
852 	/* Unmap not fully contained area doesn't work */
853 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
854 				  8 * PAGE_SIZE);
855 	test_err_ioctl_ioas_unmap(ENOENT,
856 				  self->base_iova + 3 * 16 * PAGE_SIZE +
857 					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
858 				  8 * PAGE_SIZE);
859 
860 	/* Unmap fully contained areas works */
861 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
862 					    self->base_iova - 4 * PAGE_SIZE,
863 					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
864 						    4 * PAGE_SIZE,
865 					    &unmap_len));
866 	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
867 }
868 
TEST_F(iommufd_ioas,area_auto_iova)869 TEST_F(iommufd_ioas, area_auto_iova)
870 {
871 	struct iommu_test_cmd test_cmd = {
872 		.size = sizeof(test_cmd),
873 		.op = IOMMU_TEST_OP_ADD_RESERVED,
874 		.id = self->ioas_id,
875 		.add_reserved = { .start = PAGE_SIZE * 4,
876 				  .length = PAGE_SIZE * 100 },
877 	};
878 	struct iommu_iova_range ranges[1] = {};
879 	struct iommu_ioas_allow_iovas allow_cmd = {
880 		.size = sizeof(allow_cmd),
881 		.ioas_id = self->ioas_id,
882 		.num_iovas = 1,
883 		.allowed_iovas = (uintptr_t)ranges,
884 	};
885 	__u64 iovas[10];
886 	int i;
887 
888 	/* Simple 4k pages */
889 	for (i = 0; i != 10; i++)
890 		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
891 	for (i = 0; i != 10; i++)
892 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
893 
894 	/* Kernel automatically aligns IOVAs properly */
895 	for (i = 0; i != 10; i++) {
896 		size_t length = PAGE_SIZE * (i + 1);
897 
898 		if (self->stdev_id) {
899 			test_ioctl_ioas_map(buffer, length, &iovas[i]);
900 		} else {
901 			test_ioctl_ioas_map((void *)(1UL << 31), length,
902 					    &iovas[i]);
903 		}
904 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
905 	}
906 	for (i = 0; i != 10; i++)
907 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
908 
909 	/* Avoids a reserved region */
910 	ASSERT_EQ(0,
911 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
912 			&test_cmd));
913 	for (i = 0; i != 10; i++) {
914 		size_t length = PAGE_SIZE * (i + 1);
915 
916 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
917 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
918 		EXPECT_EQ(false,
919 			  iovas[i] > test_cmd.add_reserved.start &&
920 				  iovas[i] <
921 					  test_cmd.add_reserved.start +
922 						  test_cmd.add_reserved.length);
923 	}
924 	for (i = 0; i != 10; i++)
925 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
926 
927 	/* Allowed region intersects with a reserved region */
928 	ranges[0].start = PAGE_SIZE;
929 	ranges[0].last = PAGE_SIZE * 600;
930 	EXPECT_ERRNO(EADDRINUSE,
931 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
932 
933 	/* Allocate from an allowed region */
934 	if (self->stdev_id) {
935 		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
936 		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
937 	} else {
938 		ranges[0].start = PAGE_SIZE * 200;
939 		ranges[0].last = PAGE_SIZE * 600 - 1;
940 	}
941 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
942 	for (i = 0; i != 10; i++) {
943 		size_t length = PAGE_SIZE * (i + 1);
944 
945 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
946 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
947 		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
948 		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
949 		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
950 		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
951 	}
952 	for (i = 0; i != 10; i++)
953 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
954 }
955 
TEST_F(iommufd_ioas,area_allowed)956 TEST_F(iommufd_ioas, area_allowed)
957 {
958 	struct iommu_test_cmd test_cmd = {
959 		.size = sizeof(test_cmd),
960 		.op = IOMMU_TEST_OP_ADD_RESERVED,
961 		.id = self->ioas_id,
962 		.add_reserved = { .start = PAGE_SIZE * 4,
963 				  .length = PAGE_SIZE * 100 },
964 	};
965 	struct iommu_iova_range ranges[1] = {};
966 	struct iommu_ioas_allow_iovas allow_cmd = {
967 		.size = sizeof(allow_cmd),
968 		.ioas_id = self->ioas_id,
969 		.num_iovas = 1,
970 		.allowed_iovas = (uintptr_t)ranges,
971 	};
972 
973 	/* Reserved intersects an allowed */
974 	allow_cmd.num_iovas = 1;
975 	ranges[0].start = self->base_iova;
976 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
977 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
978 	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
979 	test_cmd.add_reserved.length = PAGE_SIZE;
980 	EXPECT_ERRNO(EADDRINUSE,
981 		     ioctl(self->fd,
982 			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
983 			   &test_cmd));
984 	allow_cmd.num_iovas = 0;
985 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
986 
987 	/* Allowed intersects a reserved */
988 	ASSERT_EQ(0,
989 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
990 			&test_cmd));
991 	allow_cmd.num_iovas = 1;
992 	ranges[0].start = self->base_iova;
993 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
994 	EXPECT_ERRNO(EADDRINUSE,
995 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
996 }
997 
TEST_F(iommufd_ioas,copy_area)998 TEST_F(iommufd_ioas, copy_area)
999 {
1000 	struct iommu_ioas_copy copy_cmd = {
1001 		.size = sizeof(copy_cmd),
1002 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1003 		.dst_ioas_id = self->ioas_id,
1004 		.src_ioas_id = self->ioas_id,
1005 		.length = PAGE_SIZE,
1006 	};
1007 
1008 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
1009 
1010 	/* Copy inside a single IOAS */
1011 	copy_cmd.src_iova = self->base_iova;
1012 	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
1013 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1014 
1015 	/* Copy between IOAS's */
1016 	copy_cmd.src_iova = self->base_iova;
1017 	copy_cmd.dst_iova = 0;
1018 	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
1019 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1020 }
1021 
TEST_F(iommufd_ioas,iova_ranges)1022 TEST_F(iommufd_ioas, iova_ranges)
1023 {
1024 	struct iommu_test_cmd test_cmd = {
1025 		.size = sizeof(test_cmd),
1026 		.op = IOMMU_TEST_OP_ADD_RESERVED,
1027 		.id = self->ioas_id,
1028 		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1029 	};
1030 	struct iommu_iova_range *ranges = buffer;
1031 	struct iommu_ioas_iova_ranges ranges_cmd = {
1032 		.size = sizeof(ranges_cmd),
1033 		.ioas_id = self->ioas_id,
1034 		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
1035 		.allowed_iovas = (uintptr_t)ranges,
1036 	};
1037 
1038 	/* Range can be read */
1039 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1040 	EXPECT_EQ(1, ranges_cmd.num_iovas);
1041 	if (!self->stdev_id) {
1042 		EXPECT_EQ(0, ranges[0].start);
1043 		EXPECT_EQ(SIZE_MAX, ranges[0].last);
1044 		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1045 	} else {
1046 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1047 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1048 		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1049 	}
1050 
1051 	/* Buffer too small */
1052 	memset(ranges, 0, BUFFER_SIZE);
1053 	ranges_cmd.num_iovas = 0;
1054 	EXPECT_ERRNO(EMSGSIZE,
1055 		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1056 	EXPECT_EQ(1, ranges_cmd.num_iovas);
1057 	EXPECT_EQ(0, ranges[0].start);
1058 	EXPECT_EQ(0, ranges[0].last);
1059 
1060 	/* 2 ranges */
1061 	ASSERT_EQ(0,
1062 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1063 			&test_cmd));
1064 	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1065 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1066 	if (!self->stdev_id) {
1067 		EXPECT_EQ(2, ranges_cmd.num_iovas);
1068 		EXPECT_EQ(0, ranges[0].start);
1069 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1070 		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1071 		EXPECT_EQ(SIZE_MAX, ranges[1].last);
1072 	} else {
1073 		EXPECT_EQ(1, ranges_cmd.num_iovas);
1074 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1075 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1076 	}
1077 
1078 	/* Buffer too small */
1079 	memset(ranges, 0, BUFFER_SIZE);
1080 	ranges_cmd.num_iovas = 1;
1081 	if (!self->stdev_id) {
1082 		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1083 					     &ranges_cmd));
1084 		EXPECT_EQ(2, ranges_cmd.num_iovas);
1085 		EXPECT_EQ(0, ranges[0].start);
1086 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1087 	} else {
1088 		ASSERT_EQ(0,
1089 			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1090 		EXPECT_EQ(1, ranges_cmd.num_iovas);
1091 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1092 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1093 	}
1094 	EXPECT_EQ(0, ranges[1].start);
1095 	EXPECT_EQ(0, ranges[1].last);
1096 }
1097 
TEST_F(iommufd_ioas,access_domain_destory)1098 TEST_F(iommufd_ioas, access_domain_destory)
1099 {
1100 	struct iommu_test_cmd access_cmd = {
1101 		.size = sizeof(access_cmd),
1102 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1103 		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
1104 				  .length = PAGE_SIZE},
1105 	};
1106 	size_t buf_size = 2 * HUGEPAGE_SIZE;
1107 	uint8_t *buf;
1108 
1109 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1110 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1111 		   0);
1112 	ASSERT_NE(MAP_FAILED, buf);
1113 	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1114 
1115 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
1116 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1117 	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1118 	ASSERT_EQ(0,
1119 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1120 			&access_cmd));
1121 
1122 	/* Causes a complicated unpin across a huge page boundary */
1123 	if (self->stdev_id)
1124 		test_ioctl_destroy(self->stdev_id);
1125 
1126 	test_cmd_destroy_access_pages(
1127 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1128 	test_cmd_destroy_access(access_cmd.id);
1129 	ASSERT_EQ(0, munmap(buf, buf_size));
1130 }
1131 
TEST_F(iommufd_ioas,access_pin)1132 TEST_F(iommufd_ioas, access_pin)
1133 {
1134 	struct iommu_test_cmd access_cmd = {
1135 		.size = sizeof(access_cmd),
1136 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1137 		.access_pages = { .iova = MOCK_APERTURE_START,
1138 				  .length = BUFFER_SIZE,
1139 				  .uptr = (uintptr_t)buffer },
1140 	};
1141 	struct iommu_test_cmd check_map_cmd = {
1142 		.size = sizeof(check_map_cmd),
1143 		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
1144 		.check_map = { .iova = MOCK_APERTURE_START,
1145 			       .length = BUFFER_SIZE,
1146 			       .uptr = (uintptr_t)buffer },
1147 	};
1148 	uint32_t access_pages_id;
1149 	unsigned int npages;
1150 
1151 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
1152 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1153 
1154 	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1155 		uint32_t mock_stdev_id;
1156 		uint32_t mock_hwpt_id;
1157 
1158 		access_cmd.access_pages.length = npages * PAGE_SIZE;
1159 
1160 		/* Single map/unmap */
1161 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1162 					  MOCK_APERTURE_START);
1163 		ASSERT_EQ(0, ioctl(self->fd,
1164 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1165 				   &access_cmd));
1166 		test_cmd_destroy_access_pages(
1167 			access_cmd.id,
1168 			access_cmd.access_pages.out_access_pages_id);
1169 
1170 		/* Double user */
1171 		ASSERT_EQ(0, ioctl(self->fd,
1172 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1173 				   &access_cmd));
1174 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
1175 		ASSERT_EQ(0, ioctl(self->fd,
1176 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1177 				   &access_cmd));
1178 		test_cmd_destroy_access_pages(
1179 			access_cmd.id,
1180 			access_cmd.access_pages.out_access_pages_id);
1181 		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1182 
1183 		/* Add/remove a domain with a user */
1184 		ASSERT_EQ(0, ioctl(self->fd,
1185 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1186 				   &access_cmd));
1187 		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1188 				     &mock_hwpt_id, NULL);
1189 		check_map_cmd.id = mock_hwpt_id;
1190 		ASSERT_EQ(0, ioctl(self->fd,
1191 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1192 				   &check_map_cmd));
1193 
1194 		test_ioctl_destroy(mock_stdev_id);
1195 		test_cmd_destroy_access_pages(
1196 			access_cmd.id,
1197 			access_cmd.access_pages.out_access_pages_id);
1198 
1199 		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1200 	}
1201 	test_cmd_destroy_access(access_cmd.id);
1202 }
1203 
TEST_F(iommufd_ioas,access_pin_unmap)1204 TEST_F(iommufd_ioas, access_pin_unmap)
1205 {
1206 	struct iommu_test_cmd access_pages_cmd = {
1207 		.size = sizeof(access_pages_cmd),
1208 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1209 		.access_pages = { .iova = MOCK_APERTURE_START,
1210 				  .length = BUFFER_SIZE,
1211 				  .uptr = (uintptr_t)buffer },
1212 	};
1213 
1214 	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1215 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1216 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1217 	ASSERT_EQ(0,
1218 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1219 			&access_pages_cmd));
1220 
1221 	/* Trigger the unmap op */
1222 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1223 
1224 	/* kernel removed the item for us */
1225 	test_err_destroy_access_pages(
1226 		ENOENT, access_pages_cmd.id,
1227 		access_pages_cmd.access_pages.out_access_pages_id);
1228 }
1229 
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1230 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1231 			    unsigned int access_id, uint64_t iova,
1232 			    unsigned int def_flags)
1233 {
1234 	uint16_t tmp[32];
1235 	struct iommu_test_cmd access_cmd = {
1236 		.size = sizeof(access_cmd),
1237 		.op = IOMMU_TEST_OP_ACCESS_RW,
1238 		.id = access_id,
1239 		.access_rw = { .uptr = (uintptr_t)tmp },
1240 	};
1241 	uint16_t *buffer16 = buffer;
1242 	unsigned int i;
1243 	void *tmp2;
1244 
1245 	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1246 		buffer16[i] = rand();
1247 
1248 	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1249 	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1250 	     access_cmd.access_rw.iova++) {
1251 		for (access_cmd.access_rw.length = 1;
1252 		     access_cmd.access_rw.length < sizeof(tmp);
1253 		     access_cmd.access_rw.length++) {
1254 			access_cmd.access_rw.flags = def_flags;
1255 			ASSERT_EQ(0, ioctl(fd,
1256 					   _IOMMU_TEST_CMD(
1257 						   IOMMU_TEST_OP_ACCESS_RW),
1258 					   &access_cmd));
1259 			ASSERT_EQ(0,
1260 				  memcmp(buffer + (access_cmd.access_rw.iova -
1261 						   iova),
1262 					 tmp, access_cmd.access_rw.length));
1263 
1264 			for (i = 0; i != ARRAY_SIZE(tmp); i++)
1265 				tmp[i] = rand();
1266 			access_cmd.access_rw.flags = def_flags |
1267 						     MOCK_ACCESS_RW_WRITE;
1268 			ASSERT_EQ(0, ioctl(fd,
1269 					   _IOMMU_TEST_CMD(
1270 						   IOMMU_TEST_OP_ACCESS_RW),
1271 					   &access_cmd));
1272 			ASSERT_EQ(0,
1273 				  memcmp(buffer + (access_cmd.access_rw.iova -
1274 						   iova),
1275 					 tmp, access_cmd.access_rw.length));
1276 		}
1277 	}
1278 
1279 	/* Multi-page test */
1280 	tmp2 = malloc(BUFFER_SIZE);
1281 	ASSERT_NE(NULL, tmp2);
1282 	access_cmd.access_rw.iova = iova;
1283 	access_cmd.access_rw.length = BUFFER_SIZE;
1284 	access_cmd.access_rw.flags = def_flags;
1285 	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1286 	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1287 			   &access_cmd));
1288 	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1289 	free(tmp2);
1290 }
1291 
TEST_F(iommufd_ioas,access_rw)1292 TEST_F(iommufd_ioas, access_rw)
1293 {
1294 	__u32 access_id;
1295 	__u64 iova;
1296 
1297 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1298 	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1299 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1300 	check_access_rw(_metadata, self->fd, access_id, iova,
1301 			MOCK_ACCESS_RW_SLOW_PATH);
1302 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1303 	test_cmd_destroy_access(access_id);
1304 }
1305 
TEST_F(iommufd_ioas,access_rw_unaligned)1306 TEST_F(iommufd_ioas, access_rw_unaligned)
1307 {
1308 	__u32 access_id;
1309 	__u64 iova;
1310 
1311 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1312 
1313 	/* Unaligned pages */
1314 	iova = self->base_iova + MOCK_PAGE_SIZE;
1315 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1316 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1317 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1318 	test_cmd_destroy_access(access_id);
1319 }
1320 
TEST_F(iommufd_ioas,fork_gone)1321 TEST_F(iommufd_ioas, fork_gone)
1322 {
1323 	__u32 access_id;
1324 	pid_t child;
1325 
1326 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1327 
1328 	/* Create a mapping with a different mm */
1329 	child = fork();
1330 	if (!child) {
1331 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1332 					  MOCK_APERTURE_START);
1333 		exit(0);
1334 	}
1335 	ASSERT_NE(-1, child);
1336 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1337 
1338 	if (self->stdev_id) {
1339 		/*
1340 		 * If a domain already existed then everything was pinned within
1341 		 * the fork, so this copies from one domain to another.
1342 		 */
1343 		test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1344 		check_access_rw(_metadata, self->fd, access_id,
1345 				MOCK_APERTURE_START, 0);
1346 
1347 	} else {
1348 		/*
1349 		 * Otherwise we need to actually pin pages which can't happen
1350 		 * since the fork is gone.
1351 		 */
1352 		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1353 	}
1354 
1355 	test_cmd_destroy_access(access_id);
1356 }
1357 
TEST_F(iommufd_ioas,fork_present)1358 TEST_F(iommufd_ioas, fork_present)
1359 {
1360 	__u32 access_id;
1361 	int pipefds[2];
1362 	uint64_t tmp;
1363 	pid_t child;
1364 	int efd;
1365 
1366 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1367 
1368 	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1369 	efd = eventfd(0, EFD_CLOEXEC);
1370 	ASSERT_NE(-1, efd);
1371 
1372 	/* Create a mapping with a different mm */
1373 	child = fork();
1374 	if (!child) {
1375 		__u64 iova;
1376 		uint64_t one = 1;
1377 
1378 		close(pipefds[1]);
1379 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1380 					  MOCK_APERTURE_START);
1381 		if (write(efd, &one, sizeof(one)) != sizeof(one))
1382 			exit(100);
1383 		if (read(pipefds[0], &iova, 1) != 1)
1384 			exit(100);
1385 		exit(0);
1386 	}
1387 	close(pipefds[0]);
1388 	ASSERT_NE(-1, child);
1389 	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1390 
1391 	/* Read pages from the remote process */
1392 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1393 	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1394 
1395 	ASSERT_EQ(0, close(pipefds[1]));
1396 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1397 
1398 	test_cmd_destroy_access(access_id);
1399 }
1400 
TEST_F(iommufd_ioas,ioas_option_huge_pages)1401 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1402 {
1403 	struct iommu_option cmd = {
1404 		.size = sizeof(cmd),
1405 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1406 		.op = IOMMU_OPTION_OP_GET,
1407 		.val64 = 3,
1408 		.object_id = self->ioas_id,
1409 	};
1410 
1411 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1412 	ASSERT_EQ(1, cmd.val64);
1413 
1414 	cmd.op = IOMMU_OPTION_OP_SET;
1415 	cmd.val64 = 0;
1416 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1417 
1418 	cmd.op = IOMMU_OPTION_OP_GET;
1419 	cmd.val64 = 3;
1420 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1421 	ASSERT_EQ(0, cmd.val64);
1422 
1423 	cmd.op = IOMMU_OPTION_OP_SET;
1424 	cmd.val64 = 2;
1425 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1426 
1427 	cmd.op = IOMMU_OPTION_OP_SET;
1428 	cmd.val64 = 1;
1429 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1430 }
1431 
TEST_F(iommufd_ioas,ioas_iova_alloc)1432 TEST_F(iommufd_ioas, ioas_iova_alloc)
1433 {
1434 	unsigned int length;
1435 	__u64 iova;
1436 
1437 	for (length = 1; length != PAGE_SIZE * 2; length++) {
1438 		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1439 			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1440 		} else {
1441 			test_ioctl_ioas_map(buffer, length, &iova);
1442 			test_ioctl_ioas_unmap(iova, length);
1443 		}
1444 	}
1445 }
1446 
TEST_F(iommufd_ioas,ioas_align_change)1447 TEST_F(iommufd_ioas, ioas_align_change)
1448 {
1449 	struct iommu_option cmd = {
1450 		.size = sizeof(cmd),
1451 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1452 		.op = IOMMU_OPTION_OP_SET,
1453 		.object_id = self->ioas_id,
1454 		/* 0 means everything must be aligned to PAGE_SIZE */
1455 		.val64 = 0,
1456 	};
1457 
1458 	/*
1459 	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1460 	 * and map are present.
1461 	 */
1462 	if (variant->mock_domains)
1463 		return;
1464 
1465 	/*
1466 	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1467 	 */
1468 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1469 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1470 
1471 	/* Misalignment is rejected at map time */
1472 	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1473 				      PAGE_SIZE,
1474 				      MOCK_APERTURE_START + PAGE_SIZE);
1475 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1476 
1477 	/* Reduce alignment */
1478 	cmd.val64 = 1;
1479 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1480 
1481 	/* Confirm misalignment is rejected during alignment upgrade */
1482 	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1483 				  MOCK_APERTURE_START + PAGE_SIZE);
1484 	cmd.val64 = 0;
1485 	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1486 
1487 	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1488 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1489 }
1490 
TEST_F(iommufd_ioas,copy_sweep)1491 TEST_F(iommufd_ioas, copy_sweep)
1492 {
1493 	struct iommu_ioas_copy copy_cmd = {
1494 		.size = sizeof(copy_cmd),
1495 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1496 		.src_ioas_id = self->ioas_id,
1497 		.dst_iova = MOCK_APERTURE_START,
1498 		.length = MOCK_PAGE_SIZE,
1499 	};
1500 	unsigned int dst_ioas_id;
1501 	uint64_t last_iova;
1502 	uint64_t iova;
1503 
1504 	test_ioctl_ioas_alloc(&dst_ioas_id);
1505 	copy_cmd.dst_ioas_id = dst_ioas_id;
1506 
1507 	if (variant->mock_domains)
1508 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1509 	else
1510 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1511 
1512 	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1513 				  MOCK_APERTURE_START);
1514 
1515 	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1516 	     iova += 511) {
1517 		copy_cmd.src_iova = iova;
1518 		if (iova < MOCK_APERTURE_START ||
1519 		    iova + copy_cmd.length - 1 > last_iova) {
1520 			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1521 						   &copy_cmd));
1522 		} else {
1523 			ASSERT_EQ(0,
1524 				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1525 			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1526 						 copy_cmd.length);
1527 		}
1528 	}
1529 
1530 	test_ioctl_destroy(dst_ioas_id);
1531 }
1532 
FIXTURE(iommufd_mock_domain)1533 FIXTURE(iommufd_mock_domain)
1534 {
1535 	int fd;
1536 	uint32_t ioas_id;
1537 	uint32_t hwpt_id;
1538 	uint32_t hwpt_ids[2];
1539 	uint32_t stdev_ids[2];
1540 	uint32_t idev_ids[2];
1541 	int mmap_flags;
1542 	size_t mmap_buf_size;
1543 };
1544 
FIXTURE_VARIANT(iommufd_mock_domain)1545 FIXTURE_VARIANT(iommufd_mock_domain)
1546 {
1547 	unsigned int mock_domains;
1548 	bool hugepages;
1549 	bool file;
1550 };
1551 
FIXTURE_SETUP(iommufd_mock_domain)1552 FIXTURE_SETUP(iommufd_mock_domain)
1553 {
1554 	unsigned int i;
1555 
1556 	self->fd = open("/dev/iommu", O_RDWR);
1557 	ASSERT_NE(-1, self->fd);
1558 	test_ioctl_ioas_alloc(&self->ioas_id);
1559 
1560 	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1561 
1562 	for (i = 0; i != variant->mock_domains; i++) {
1563 		test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1564 				     &self->hwpt_ids[i], &self->idev_ids[i]);
1565 		test_cmd_dev_check_cache_all(self->idev_ids[0],
1566 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
1567 	}
1568 	self->hwpt_id = self->hwpt_ids[0];
1569 
1570 	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1571 	self->mmap_buf_size = PAGE_SIZE * 8;
1572 	if (variant->hugepages) {
1573 		/*
1574 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1575 		 * not available.
1576 		 */
1577 		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1578 		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1579 	}
1580 }
1581 
FIXTURE_TEARDOWN(iommufd_mock_domain)1582 FIXTURE_TEARDOWN(iommufd_mock_domain)
1583 {
1584 	teardown_iommufd(self->fd, _metadata);
1585 }
1586 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1587 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1588 {
1589 	.mock_domains = 1,
1590 	.hugepages = false,
1591 	.file = false,
1592 };
1593 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1594 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1595 {
1596 	.mock_domains = 2,
1597 	.hugepages = false,
1598 	.file = false,
1599 };
1600 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1601 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1602 {
1603 	.mock_domains = 1,
1604 	.hugepages = true,
1605 	.file = false,
1606 };
1607 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1608 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1609 {
1610 	.mock_domains = 2,
1611 	.hugepages = true,
1612 	.file = false,
1613 };
1614 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file)1615 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1616 {
1617 	.mock_domains = 1,
1618 	.hugepages = false,
1619 	.file = true,
1620 };
1621 
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file_hugepage)1622 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1623 {
1624 	.mock_domains = 1,
1625 	.hugepages = true,
1626 	.file = true,
1627 };
1628 
1629 
1630 /* Have the kernel check that the user pages made it to the iommu_domain */
1631 #define check_mock_iova(_ptr, _iova, _length)                                \
1632 	({                                                                   \
1633 		struct iommu_test_cmd check_map_cmd = {                      \
1634 			.size = sizeof(check_map_cmd),                       \
1635 			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
1636 			.id = self->hwpt_id,                                 \
1637 			.check_map = { .iova = _iova,                        \
1638 				       .length = _length,                    \
1639 				       .uptr = (uintptr_t)(_ptr) },          \
1640 		};                                                           \
1641 		ASSERT_EQ(0,                                                 \
1642 			  ioctl(self->fd,                                    \
1643 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1644 				&check_map_cmd));                            \
1645 		if (self->hwpt_ids[1]) {                                     \
1646 			check_map_cmd.id = self->hwpt_ids[1];                \
1647 			ASSERT_EQ(0,                                         \
1648 				  ioctl(self->fd,                            \
1649 					_IOMMU_TEST_CMD(                     \
1650 						IOMMU_TEST_OP_MD_CHECK_MAP), \
1651 					&check_map_cmd));                    \
1652 		}                                                            \
1653 	})
1654 
1655 static void
test_basic_mmap(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1656 test_basic_mmap(struct __test_metadata *_metadata,
1657 		struct _test_data_iommufd_mock_domain *self,
1658 		const struct _fixture_variant_iommufd_mock_domain *variant)
1659 {
1660 	size_t buf_size = self->mmap_buf_size;
1661 	uint8_t *buf;
1662 	__u64 iova;
1663 
1664 	/* Simple one page map */
1665 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1666 	check_mock_iova(buffer, iova, PAGE_SIZE);
1667 
1668 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1669 		   0);
1670 	ASSERT_NE(MAP_FAILED, buf);
1671 
1672 	/* EFAULT half way through mapping */
1673 	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1674 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1675 
1676 	/* EFAULT on first page */
1677 	ASSERT_EQ(0, munmap(buf, buf_size / 2));
1678 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1679 }
1680 
1681 static void
test_basic_file(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1682 test_basic_file(struct __test_metadata *_metadata,
1683 		struct _test_data_iommufd_mock_domain *self,
1684 		const struct _fixture_variant_iommufd_mock_domain *variant)
1685 {
1686 	size_t buf_size = self->mmap_buf_size;
1687 	uint8_t *buf;
1688 	__u64 iova;
1689 	int mfd_tmp;
1690 	int prot = PROT_READ | PROT_WRITE;
1691 
1692 	/* Simple one page map */
1693 	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1694 	check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1695 
1696 	buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1697 	ASSERT_NE(MAP_FAILED, buf);
1698 
1699 	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1700 
1701 	ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1702 	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1703 
1704 	close(mfd_tmp);
1705 }
1706 
TEST_F(iommufd_mock_domain,basic)1707 TEST_F(iommufd_mock_domain, basic)
1708 {
1709 	if (variant->file)
1710 		test_basic_file(_metadata, self, variant);
1711 	else
1712 		test_basic_mmap(_metadata, self, variant);
1713 }
1714 
TEST_F(iommufd_mock_domain,ro_unshare)1715 TEST_F(iommufd_mock_domain, ro_unshare)
1716 {
1717 	uint8_t *buf;
1718 	__u64 iova;
1719 	int fd;
1720 
1721 	fd = open("/proc/self/exe", O_RDONLY);
1722 	ASSERT_NE(-1, fd);
1723 
1724 	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1725 	ASSERT_NE(MAP_FAILED, buf);
1726 	close(fd);
1727 
1728 	/*
1729 	 * There have been lots of changes to the "unshare" mechanism in
1730 	 * get_user_pages(), make sure it works right. The write to the page
1731 	 * after we map it for reading should not change the assigned PFN.
1732 	 */
1733 	ASSERT_EQ(0,
1734 		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1735 				       &iova, IOMMU_IOAS_MAP_READABLE));
1736 	check_mock_iova(buf, iova, PAGE_SIZE);
1737 	memset(buf, 1, PAGE_SIZE);
1738 	check_mock_iova(buf, iova, PAGE_SIZE);
1739 	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1740 }
1741 
TEST_F(iommufd_mock_domain,all_aligns)1742 TEST_F(iommufd_mock_domain, all_aligns)
1743 {
1744 	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1745 						MOCK_PAGE_SIZE;
1746 	size_t buf_size = self->mmap_buf_size;
1747 	unsigned int start;
1748 	unsigned int end;
1749 	uint8_t *buf;
1750 	int prot = PROT_READ | PROT_WRITE;
1751 	int mfd = -1;
1752 
1753 	if (variant->file)
1754 		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1755 	else
1756 		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1757 	ASSERT_NE(MAP_FAILED, buf);
1758 	if (variant->file)
1759 		ASSERT_GT(mfd, 0);
1760 	check_refs(buf, buf_size, 0);
1761 
1762 	/*
1763 	 * Map every combination of page size and alignment within a big region,
1764 	 * less for hugepage case as it takes so long to finish.
1765 	 */
1766 	for (start = 0; start < buf_size; start += test_step) {
1767 		if (variant->hugepages)
1768 			end = buf_size;
1769 		else
1770 			end = start + MOCK_PAGE_SIZE;
1771 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1772 			size_t length = end - start;
1773 			__u64 iova;
1774 
1775 			if (variant->file) {
1776 				test_ioctl_ioas_map_file(mfd, start, length,
1777 							 &iova);
1778 			} else {
1779 				test_ioctl_ioas_map(buf + start, length, &iova);
1780 			}
1781 			check_mock_iova(buf + start, iova, length);
1782 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1783 				   end / PAGE_SIZE * PAGE_SIZE -
1784 					   start / PAGE_SIZE * PAGE_SIZE,
1785 				   1);
1786 
1787 			test_ioctl_ioas_unmap(iova, length);
1788 		}
1789 	}
1790 	check_refs(buf, buf_size, 0);
1791 	ASSERT_EQ(0, munmap(buf, buf_size));
1792 	if (variant->file)
1793 		close(mfd);
1794 }
1795 
TEST_F(iommufd_mock_domain,all_aligns_copy)1796 TEST_F(iommufd_mock_domain, all_aligns_copy)
1797 {
1798 	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1799 						MOCK_PAGE_SIZE;
1800 	size_t buf_size = self->mmap_buf_size;
1801 	unsigned int start;
1802 	unsigned int end;
1803 	uint8_t *buf;
1804 	int prot = PROT_READ | PROT_WRITE;
1805 	int mfd = -1;
1806 
1807 	if (variant->file)
1808 		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1809 	else
1810 		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1811 	ASSERT_NE(MAP_FAILED, buf);
1812 	if (variant->file)
1813 		ASSERT_GT(mfd, 0);
1814 	check_refs(buf, buf_size, 0);
1815 
1816 	/*
1817 	 * Map every combination of page size and alignment within a big region,
1818 	 * less for hugepage case as it takes so long to finish.
1819 	 */
1820 	for (start = 0; start < buf_size; start += test_step) {
1821 		if (variant->hugepages)
1822 			end = buf_size;
1823 		else
1824 			end = start + MOCK_PAGE_SIZE;
1825 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1826 			size_t length = end - start;
1827 			unsigned int old_id;
1828 			uint32_t mock_stdev_id;
1829 			__u64 iova;
1830 
1831 			if (variant->file) {
1832 				test_ioctl_ioas_map_file(mfd, start, length,
1833 							 &iova);
1834 			} else {
1835 				test_ioctl_ioas_map(buf + start, length, &iova);
1836 			}
1837 
1838 			/* Add and destroy a domain while the area exists */
1839 			old_id = self->hwpt_ids[1];
1840 			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1841 					     &self->hwpt_ids[1], NULL);
1842 
1843 			check_mock_iova(buf + start, iova, length);
1844 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1845 				   end / PAGE_SIZE * PAGE_SIZE -
1846 					   start / PAGE_SIZE * PAGE_SIZE,
1847 				   1);
1848 
1849 			test_ioctl_destroy(mock_stdev_id);
1850 			self->hwpt_ids[1] = old_id;
1851 
1852 			test_ioctl_ioas_unmap(iova, length);
1853 		}
1854 	}
1855 	check_refs(buf, buf_size, 0);
1856 	ASSERT_EQ(0, munmap(buf, buf_size));
1857 	if (variant->file)
1858 		close(mfd);
1859 }
1860 
TEST_F(iommufd_mock_domain,user_copy)1861 TEST_F(iommufd_mock_domain, user_copy)
1862 {
1863 	void *buf = variant->file ? mfd_buffer : buffer;
1864 	struct iommu_test_cmd access_cmd = {
1865 		.size = sizeof(access_cmd),
1866 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1867 		.access_pages = { .length = BUFFER_SIZE,
1868 				  .uptr = (uintptr_t)buf },
1869 	};
1870 	struct iommu_ioas_copy copy_cmd = {
1871 		.size = sizeof(copy_cmd),
1872 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1873 		.dst_ioas_id = self->ioas_id,
1874 		.dst_iova = MOCK_APERTURE_START,
1875 		.length = BUFFER_SIZE,
1876 	};
1877 	struct iommu_ioas_unmap unmap_cmd = {
1878 		.size = sizeof(unmap_cmd),
1879 		.ioas_id = self->ioas_id,
1880 		.iova = MOCK_APERTURE_START,
1881 		.length = BUFFER_SIZE,
1882 	};
1883 	unsigned int new_ioas_id, ioas_id;
1884 
1885 	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1886 	test_ioctl_ioas_alloc(&ioas_id);
1887 	if (variant->file) {
1888 		test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1889 					    &copy_cmd.src_iova);
1890 	} else {
1891 		test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1892 				       &copy_cmd.src_iova);
1893 	}
1894 	test_cmd_create_access(ioas_id, &access_cmd.id,
1895 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1896 
1897 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1898 	ASSERT_EQ(0,
1899 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1900 			&access_cmd));
1901 	copy_cmd.src_ioas_id = ioas_id;
1902 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1903 	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1904 
1905 	/* Now replace the ioas with a new one */
1906 	test_ioctl_ioas_alloc(&new_ioas_id);
1907 	if (variant->file) {
1908 		test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1909 					    &copy_cmd.src_iova);
1910 	} else {
1911 		test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1912 				       &copy_cmd.src_iova);
1913 	}
1914 	test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1915 
1916 	/* Destroy the old ioas and cleanup copied mapping */
1917 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1918 	test_ioctl_destroy(ioas_id);
1919 
1920 	/* Then run the same test again with the new ioas */
1921 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1922 	ASSERT_EQ(0,
1923 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1924 			&access_cmd));
1925 	copy_cmd.src_ioas_id = new_ioas_id;
1926 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1927 	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1928 
1929 	test_cmd_destroy_access_pages(
1930 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1931 	test_cmd_destroy_access(access_cmd.id);
1932 
1933 	test_ioctl_destroy(new_ioas_id);
1934 }
1935 
TEST_F(iommufd_mock_domain,replace)1936 TEST_F(iommufd_mock_domain, replace)
1937 {
1938 	uint32_t ioas_id;
1939 
1940 	test_ioctl_ioas_alloc(&ioas_id);
1941 
1942 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1943 
1944 	/*
1945 	 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1946 	 * should get enoent when we try to use it.
1947 	 */
1948 	if (variant->mock_domains == 1)
1949 		test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1950 					     self->hwpt_ids[0]);
1951 
1952 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1953 	if (variant->mock_domains >= 2) {
1954 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1955 					     self->hwpt_ids[1]);
1956 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1957 					     self->hwpt_ids[1]);
1958 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1959 					     self->hwpt_ids[0]);
1960 	}
1961 
1962 	test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1963 	test_ioctl_destroy(ioas_id);
1964 }
1965 
TEST_F(iommufd_mock_domain,alloc_hwpt)1966 TEST_F(iommufd_mock_domain, alloc_hwpt)
1967 {
1968 	int i;
1969 
1970 	for (i = 0; i != variant->mock_domains; i++) {
1971 		uint32_t hwpt_id[2];
1972 		uint32_t stddev_id;
1973 
1974 		test_err_hwpt_alloc(EOPNOTSUPP,
1975 				    self->idev_ids[i], self->ioas_id,
1976 				    ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1977 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1978 				    0, &hwpt_id[0]);
1979 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1980 				    IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1981 
1982 		/* Do a hw_pagetable rotation test */
1983 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1984 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1985 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1986 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1987 		test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1988 		test_ioctl_destroy(hwpt_id[1]);
1989 
1990 		test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1991 		test_ioctl_destroy(stddev_id);
1992 		test_ioctl_destroy(hwpt_id[0]);
1993 	}
1994 }
1995 
FIXTURE(iommufd_dirty_tracking)1996 FIXTURE(iommufd_dirty_tracking)
1997 {
1998 	int fd;
1999 	uint32_t ioas_id;
2000 	uint32_t hwpt_id;
2001 	uint32_t stdev_id;
2002 	uint32_t idev_id;
2003 	unsigned long page_size;
2004 	unsigned long bitmap_size;
2005 	void *bitmap;
2006 	void *buffer;
2007 };
2008 
FIXTURE_VARIANT(iommufd_dirty_tracking)2009 FIXTURE_VARIANT(iommufd_dirty_tracking)
2010 {
2011 	unsigned long buffer_size;
2012 	bool hugepages;
2013 };
2014 
FIXTURE_SETUP(iommufd_dirty_tracking)2015 FIXTURE_SETUP(iommufd_dirty_tracking)
2016 {
2017 	size_t mmap_buffer_size;
2018 	unsigned long size;
2019 	int mmap_flags;
2020 	void *vrc;
2021 	int rc;
2022 
2023 	if (variant->buffer_size < MOCK_PAGE_SIZE) {
2024 		SKIP(return,
2025 		     "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
2026 		     variant->buffer_size, MOCK_PAGE_SIZE);
2027 	}
2028 
2029 	self->fd = open("/dev/iommu", O_RDWR);
2030 	ASSERT_NE(-1, self->fd);
2031 
2032 	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2033 	mmap_buffer_size = variant->buffer_size;
2034 	if (variant->hugepages) {
2035 		/*
2036 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2037 		 * not available.
2038 		 */
2039 		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2040 
2041 		/*
2042 		 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
2043 		 * following mmap() will automatically align the length to be a
2044 		 * multiple of the underlying huge page size. Failing to do the
2045 		 * same at this allocation will result in a memory overwrite by
2046 		 * the mmap().
2047 		 */
2048 		if (mmap_buffer_size < HUGEPAGE_SIZE)
2049 			mmap_buffer_size = HUGEPAGE_SIZE;
2050 	}
2051 
2052 	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
2053 	if (rc || !self->buffer) {
2054 		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2055 			   mmap_buffer_size, rc);
2056 	}
2057 	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2058 	vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
2059 		   mmap_flags, -1, 0);
2060 	assert(vrc == self->buffer);
2061 
2062 	self->page_size = MOCK_PAGE_SIZE;
2063 	self->bitmap_size = variant->buffer_size / self->page_size;
2064 
2065 	/* Provision with an extra (PAGE_SIZE) for the unaligned case */
2066 	size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2067 	rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2068 	assert(!rc);
2069 	assert(self->bitmap);
2070 	assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2071 
2072 	test_ioctl_ioas_alloc(&self->ioas_id);
2073 	/* Enable 1M mock IOMMU hugepages */
2074 	if (variant->hugepages) {
2075 		test_cmd_mock_domain_flags(self->ioas_id,
2076 					   MOCK_FLAGS_DEVICE_HUGE_IOVA,
2077 					   &self->stdev_id, &self->hwpt_id,
2078 					   &self->idev_id);
2079 	} else {
2080 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
2081 				     &self->hwpt_id, &self->idev_id);
2082 	}
2083 }
2084 
FIXTURE_TEARDOWN(iommufd_dirty_tracking)2085 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2086 {
2087 	free(self->buffer);
2088 	free(self->bitmap);
2089 	teardown_iommufd(self->fd, _metadata);
2090 }
2091 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)2092 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2093 {
2094 	/* half of an u8 index bitmap */
2095 	.buffer_size = 8UL * 1024UL,
2096 };
2097 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)2098 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2099 {
2100 	/* one u8 index bitmap */
2101 	.buffer_size = 16UL * 1024UL,
2102 };
2103 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)2104 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2105 {
2106 	/* one u32 index bitmap */
2107 	.buffer_size = 64UL * 1024UL,
2108 };
2109 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)2110 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2111 {
2112 	/* one u64 index bitmap */
2113 	.buffer_size = 128UL * 1024UL,
2114 };
2115 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)2116 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2117 {
2118 	/* two u64 index and trailing end bitmap */
2119 	.buffer_size = 320UL * 1024UL,
2120 };
2121 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)2122 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2123 {
2124 	/* 4K bitmap (64M IOVA range) */
2125 	.buffer_size = 64UL * 1024UL * 1024UL,
2126 };
2127 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)2128 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2129 {
2130 	/* 4K bitmap (64M IOVA range) */
2131 	.buffer_size = 64UL * 1024UL * 1024UL,
2132 	.hugepages = true,
2133 };
2134 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)2135 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2136 {
2137 	/* 8K bitmap (128M IOVA range) */
2138 	.buffer_size = 128UL * 1024UL * 1024UL,
2139 };
2140 
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)2141 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2142 {
2143 	/* 8K bitmap (128M IOVA range) */
2144 	.buffer_size = 128UL * 1024UL * 1024UL,
2145 	.hugepages = true,
2146 };
2147 
TEST_F(iommufd_dirty_tracking,enforce_dirty)2148 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2149 {
2150 	uint32_t ioas_id, stddev_id, idev_id;
2151 	uint32_t hwpt_id, _hwpt_id;
2152 	uint32_t dev_flags;
2153 
2154 	/* Regular case */
2155 	dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2156 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2157 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2158 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2159 	test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2160 				   NULL);
2161 	test_ioctl_destroy(stddev_id);
2162 	test_ioctl_destroy(hwpt_id);
2163 
2164 	/* IOMMU device does not support dirty tracking */
2165 	test_ioctl_ioas_alloc(&ioas_id);
2166 	test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2167 				   &idev_id);
2168 	test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2169 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2170 	test_ioctl_destroy(stddev_id);
2171 }
2172 
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)2173 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2174 {
2175 	uint32_t stddev_id;
2176 	uint32_t hwpt_id;
2177 
2178 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2179 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2180 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2181 	test_cmd_set_dirty_tracking(hwpt_id, true);
2182 	test_cmd_set_dirty_tracking(hwpt_id, false);
2183 
2184 	test_ioctl_destroy(stddev_id);
2185 	test_ioctl_destroy(hwpt_id);
2186 }
2187 
TEST_F(iommufd_dirty_tracking,device_dirty_capability)2188 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2189 {
2190 	uint32_t caps = 0;
2191 	uint32_t stddev_id;
2192 	uint32_t hwpt_id;
2193 
2194 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2195 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2196 	test_cmd_get_hw_capabilities(self->idev_id, caps,
2197 				     IOMMU_HW_CAP_DIRTY_TRACKING);
2198 	ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2199 		  caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2200 
2201 	test_ioctl_destroy(stddev_id);
2202 	test_ioctl_destroy(hwpt_id);
2203 }
2204 
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)2205 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2206 {
2207 	uint32_t page_size = MOCK_PAGE_SIZE;
2208 	uint32_t hwpt_id;
2209 	uint32_t ioas_id;
2210 
2211 	if (variant->hugepages)
2212 		page_size = MOCK_HUGE_PAGE_SIZE;
2213 
2214 	test_ioctl_ioas_alloc(&ioas_id);
2215 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2216 				     variant->buffer_size, MOCK_APERTURE_START);
2217 
2218 	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2219 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2220 
2221 	test_cmd_set_dirty_tracking(hwpt_id, true);
2222 
2223 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2224 				MOCK_APERTURE_START, self->page_size, page_size,
2225 				self->bitmap, self->bitmap_size, 0, _metadata);
2226 
2227 	/* PAGE_SIZE unaligned bitmap */
2228 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2229 				MOCK_APERTURE_START, self->page_size, page_size,
2230 				self->bitmap + MOCK_PAGE_SIZE,
2231 				self->bitmap_size, 0, _metadata);
2232 
2233 	/* u64 unaligned bitmap */
2234 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2235 				MOCK_APERTURE_START, self->page_size, page_size,
2236 				self->bitmap + 0xff1, self->bitmap_size, 0,
2237 				_metadata);
2238 
2239 	test_ioctl_destroy(hwpt_id);
2240 }
2241 
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)2242 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2243 {
2244 	uint32_t page_size = MOCK_PAGE_SIZE;
2245 	uint32_t hwpt_id;
2246 	uint32_t ioas_id;
2247 
2248 	if (variant->hugepages)
2249 		page_size = MOCK_HUGE_PAGE_SIZE;
2250 
2251 	test_ioctl_ioas_alloc(&ioas_id);
2252 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2253 				     variant->buffer_size, MOCK_APERTURE_START);
2254 
2255 	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2256 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2257 
2258 	test_cmd_set_dirty_tracking(hwpt_id, true);
2259 
2260 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2261 				MOCK_APERTURE_START, self->page_size, page_size,
2262 				self->bitmap, self->bitmap_size,
2263 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2264 				_metadata);
2265 
2266 	/* Unaligned bitmap */
2267 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2268 				MOCK_APERTURE_START, self->page_size, page_size,
2269 				self->bitmap + MOCK_PAGE_SIZE,
2270 				self->bitmap_size,
2271 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2272 				_metadata);
2273 
2274 	/* u64 unaligned bitmap */
2275 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2276 				MOCK_APERTURE_START, self->page_size, page_size,
2277 				self->bitmap + 0xff1, self->bitmap_size,
2278 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2279 				_metadata);
2280 
2281 	test_ioctl_destroy(hwpt_id);
2282 }
2283 
2284 /* VFIO compatibility IOCTLs */
2285 
TEST_F(iommufd,simple_ioctls)2286 TEST_F(iommufd, simple_ioctls)
2287 {
2288 	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2289 	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2290 }
2291 
TEST_F(iommufd,unmap_cmd)2292 TEST_F(iommufd, unmap_cmd)
2293 {
2294 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2295 		.iova = MOCK_APERTURE_START,
2296 		.size = PAGE_SIZE,
2297 	};
2298 
2299 	unmap_cmd.argsz = 1;
2300 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2301 
2302 	unmap_cmd.argsz = sizeof(unmap_cmd);
2303 	unmap_cmd.flags = 1 << 31;
2304 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2305 
2306 	unmap_cmd.flags = 0;
2307 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2308 }
2309 
TEST_F(iommufd,map_cmd)2310 TEST_F(iommufd, map_cmd)
2311 {
2312 	struct vfio_iommu_type1_dma_map map_cmd = {
2313 		.iova = MOCK_APERTURE_START,
2314 		.size = PAGE_SIZE,
2315 		.vaddr = (__u64)buffer,
2316 	};
2317 
2318 	map_cmd.argsz = 1;
2319 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2320 
2321 	map_cmd.argsz = sizeof(map_cmd);
2322 	map_cmd.flags = 1 << 31;
2323 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2324 
2325 	/* Requires a domain to be attached */
2326 	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2327 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2328 }
2329 
TEST_F(iommufd,info_cmd)2330 TEST_F(iommufd, info_cmd)
2331 {
2332 	struct vfio_iommu_type1_info info_cmd = {};
2333 
2334 	/* Invalid argsz */
2335 	info_cmd.argsz = 1;
2336 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2337 
2338 	info_cmd.argsz = sizeof(info_cmd);
2339 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2340 }
2341 
TEST_F(iommufd,set_iommu_cmd)2342 TEST_F(iommufd, set_iommu_cmd)
2343 {
2344 	/* Requires a domain to be attached */
2345 	EXPECT_ERRNO(ENODEV,
2346 		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2347 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2348 }
2349 
TEST_F(iommufd,vfio_ioas)2350 TEST_F(iommufd, vfio_ioas)
2351 {
2352 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2353 		.size = sizeof(vfio_ioas_cmd),
2354 		.op = IOMMU_VFIO_IOAS_GET,
2355 	};
2356 	__u32 ioas_id;
2357 
2358 	/* ENODEV if there is no compat ioas */
2359 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2360 
2361 	/* Invalid id for set */
2362 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2363 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2364 
2365 	/* Valid id for set*/
2366 	test_ioctl_ioas_alloc(&ioas_id);
2367 	vfio_ioas_cmd.ioas_id = ioas_id;
2368 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2369 
2370 	/* Same id comes back from get */
2371 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2372 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2373 	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2374 
2375 	/* Clear works */
2376 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2377 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2378 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2379 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2380 }
2381 
FIXTURE(vfio_compat_mock_domain)2382 FIXTURE(vfio_compat_mock_domain)
2383 {
2384 	int fd;
2385 	uint32_t ioas_id;
2386 };
2387 
FIXTURE_VARIANT(vfio_compat_mock_domain)2388 FIXTURE_VARIANT(vfio_compat_mock_domain)
2389 {
2390 	unsigned int version;
2391 };
2392 
FIXTURE_SETUP(vfio_compat_mock_domain)2393 FIXTURE_SETUP(vfio_compat_mock_domain)
2394 {
2395 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2396 		.size = sizeof(vfio_ioas_cmd),
2397 		.op = IOMMU_VFIO_IOAS_SET,
2398 	};
2399 
2400 	self->fd = open("/dev/iommu", O_RDWR);
2401 	ASSERT_NE(-1, self->fd);
2402 
2403 	/* Create what VFIO would consider a group */
2404 	test_ioctl_ioas_alloc(&self->ioas_id);
2405 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2406 
2407 	/* Attach it to the vfio compat */
2408 	vfio_ioas_cmd.ioas_id = self->ioas_id;
2409 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2410 	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2411 }
2412 
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2413 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2414 {
2415 	teardown_iommufd(self->fd, _metadata);
2416 }
2417 
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2418 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2419 {
2420 	.version = VFIO_TYPE1v2_IOMMU,
2421 };
2422 
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2423 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2424 {
2425 	.version = VFIO_TYPE1_IOMMU,
2426 };
2427 
TEST_F(vfio_compat_mock_domain,simple_close)2428 TEST_F(vfio_compat_mock_domain, simple_close)
2429 {
2430 }
2431 
TEST_F(vfio_compat_mock_domain,option_huge_pages)2432 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2433 {
2434 	struct iommu_option cmd = {
2435 		.size = sizeof(cmd),
2436 		.option_id = IOMMU_OPTION_HUGE_PAGES,
2437 		.op = IOMMU_OPTION_OP_GET,
2438 		.val64 = 3,
2439 		.object_id = self->ioas_id,
2440 	};
2441 
2442 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2443 	if (variant->version == VFIO_TYPE1_IOMMU) {
2444 		ASSERT_EQ(0, cmd.val64);
2445 	} else {
2446 		ASSERT_EQ(1, cmd.val64);
2447 	}
2448 }
2449 
2450 /*
2451  * Execute an ioctl command stored in buffer and check that the result does not
2452  * overflow memory.
2453  */
is_filled(const void * buf,uint8_t c,size_t len)2454 static bool is_filled(const void *buf, uint8_t c, size_t len)
2455 {
2456 	const uint8_t *cbuf = buf;
2457 
2458 	for (; len; cbuf++, len--)
2459 		if (*cbuf != c)
2460 			return false;
2461 	return true;
2462 }
2463 
2464 #define ioctl_check_buf(fd, cmd)                                         \
2465 	({                                                               \
2466 		size_t _cmd_len = *(__u32 *)buffer;                      \
2467 									 \
2468 		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2469 		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
2470 		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
2471 					  BUFFER_SIZE - _cmd_len));      \
2472 	})
2473 
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2474 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2475 				      struct vfio_iommu_type1_info *info_cmd)
2476 {
2477 	const struct vfio_info_cap_header *cap;
2478 
2479 	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2480 	cap = buffer + info_cmd->cap_offset;
2481 	while (true) {
2482 		size_t cap_size;
2483 
2484 		if (cap->next)
2485 			cap_size = (buffer + cap->next) - (void *)cap;
2486 		else
2487 			cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2488 
2489 		switch (cap->id) {
2490 		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2491 			struct vfio_iommu_type1_info_cap_iova_range *data =
2492 				(void *)cap;
2493 
2494 			ASSERT_EQ(1, data->header.version);
2495 			ASSERT_EQ(1, data->nr_iovas);
2496 			EXPECT_EQ(MOCK_APERTURE_START,
2497 				  data->iova_ranges[0].start);
2498 			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2499 			break;
2500 		}
2501 		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2502 			struct vfio_iommu_type1_info_dma_avail *data =
2503 				(void *)cap;
2504 
2505 			ASSERT_EQ(1, data->header.version);
2506 			ASSERT_EQ(sizeof(*data), cap_size);
2507 			break;
2508 		}
2509 		default:
2510 			ASSERT_EQ(false, true);
2511 			break;
2512 		}
2513 		if (!cap->next)
2514 			break;
2515 
2516 		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2517 		ASSERT_GE(buffer + cap->next, (void *)cap);
2518 		cap = buffer + cap->next;
2519 	}
2520 }
2521 
TEST_F(vfio_compat_mock_domain,get_info)2522 TEST_F(vfio_compat_mock_domain, get_info)
2523 {
2524 	struct vfio_iommu_type1_info *info_cmd = buffer;
2525 	unsigned int i;
2526 	size_t caplen;
2527 
2528 	/* Pre-cap ABI */
2529 	*info_cmd = (struct vfio_iommu_type1_info){
2530 		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2531 	};
2532 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2533 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2534 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2535 		  info_cmd->flags);
2536 
2537 	/* Read the cap chain size */
2538 	*info_cmd = (struct vfio_iommu_type1_info){
2539 		.argsz = sizeof(*info_cmd),
2540 	};
2541 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2542 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2543 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2544 		  info_cmd->flags);
2545 	ASSERT_EQ(0, info_cmd->cap_offset);
2546 	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2547 
2548 	/* Read the caps, kernel should never create a corrupted caps */
2549 	caplen = info_cmd->argsz;
2550 	for (i = sizeof(*info_cmd); i < caplen; i++) {
2551 		*info_cmd = (struct vfio_iommu_type1_info){
2552 			.argsz = i,
2553 		};
2554 		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2555 		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2556 			  info_cmd->flags);
2557 		if (!info_cmd->cap_offset)
2558 			continue;
2559 		check_vfio_info_cap_chain(_metadata, info_cmd);
2560 	}
2561 }
2562 
shuffle_array(unsigned long * array,size_t nelms)2563 static void shuffle_array(unsigned long *array, size_t nelms)
2564 {
2565 	unsigned int i;
2566 
2567 	/* Shuffle */
2568 	for (i = 0; i != nelms; i++) {
2569 		unsigned long tmp = array[i];
2570 		unsigned int other = rand() % (nelms - i);
2571 
2572 		array[i] = array[other];
2573 		array[other] = tmp;
2574 	}
2575 }
2576 
TEST_F(vfio_compat_mock_domain,map)2577 TEST_F(vfio_compat_mock_domain, map)
2578 {
2579 	struct vfio_iommu_type1_dma_map map_cmd = {
2580 		.argsz = sizeof(map_cmd),
2581 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2582 		.vaddr = (uintptr_t)buffer,
2583 		.size = BUFFER_SIZE,
2584 		.iova = MOCK_APERTURE_START,
2585 	};
2586 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2587 		.argsz = sizeof(unmap_cmd),
2588 		.size = BUFFER_SIZE,
2589 		.iova = MOCK_APERTURE_START,
2590 	};
2591 	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2592 	unsigned int i;
2593 
2594 	/* Simple map/unmap */
2595 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2596 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2597 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2598 
2599 	/* UNMAP_FLAG_ALL requires 0 iova/size */
2600 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2601 	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2602 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2603 
2604 	unmap_cmd.iova = 0;
2605 	unmap_cmd.size = 0;
2606 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2607 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2608 
2609 	/* Small pages */
2610 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2611 		map_cmd.iova = pages_iova[i] =
2612 			MOCK_APERTURE_START + i * PAGE_SIZE;
2613 		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2614 		map_cmd.size = PAGE_SIZE;
2615 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2616 	}
2617 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2618 
2619 	unmap_cmd.flags = 0;
2620 	unmap_cmd.size = PAGE_SIZE;
2621 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2622 		unmap_cmd.iova = pages_iova[i];
2623 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2624 	}
2625 }
2626 
TEST_F(vfio_compat_mock_domain,huge_map)2627 TEST_F(vfio_compat_mock_domain, huge_map)
2628 {
2629 	size_t buf_size = HUGEPAGE_SIZE * 2;
2630 	struct vfio_iommu_type1_dma_map map_cmd = {
2631 		.argsz = sizeof(map_cmd),
2632 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2633 		.size = buf_size,
2634 		.iova = MOCK_APERTURE_START,
2635 	};
2636 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2637 		.argsz = sizeof(unmap_cmd),
2638 	};
2639 	unsigned long pages_iova[16];
2640 	unsigned int i;
2641 	void *buf;
2642 
2643 	/* Test huge pages and splitting */
2644 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2645 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2646 		   0);
2647 	ASSERT_NE(MAP_FAILED, buf);
2648 	map_cmd.vaddr = (uintptr_t)buf;
2649 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2650 
2651 	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2652 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2653 		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2654 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2655 
2656 	/* type1 mode can cut up larger mappings, type1v2 always fails */
2657 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2658 		unmap_cmd.iova = pages_iova[i];
2659 		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2660 		if (variant->version == VFIO_TYPE1_IOMMU) {
2661 			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2662 					   &unmap_cmd));
2663 		} else {
2664 			EXPECT_ERRNO(ENOENT,
2665 				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2666 					   &unmap_cmd));
2667 		}
2668 	}
2669 }
2670 
FIXTURE(iommufd_viommu)2671 FIXTURE(iommufd_viommu)
2672 {
2673 	int fd;
2674 	uint32_t ioas_id;
2675 	uint32_t stdev_id;
2676 	uint32_t hwpt_id;
2677 	uint32_t nested_hwpt_id;
2678 	uint32_t device_id;
2679 	uint32_t viommu_id;
2680 };
2681 
FIXTURE_VARIANT(iommufd_viommu)2682 FIXTURE_VARIANT(iommufd_viommu)
2683 {
2684 	unsigned int viommu;
2685 };
2686 
FIXTURE_SETUP(iommufd_viommu)2687 FIXTURE_SETUP(iommufd_viommu)
2688 {
2689 	self->fd = open("/dev/iommu", O_RDWR);
2690 	ASSERT_NE(-1, self->fd);
2691 	test_ioctl_ioas_alloc(&self->ioas_id);
2692 	test_ioctl_set_default_memory_limit();
2693 
2694 	if (variant->viommu) {
2695 		struct iommu_hwpt_selftest data = {
2696 			.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2697 		};
2698 
2699 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2700 				     &self->device_id);
2701 
2702 		/* Allocate a nesting parent hwpt */
2703 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2704 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
2705 				    &self->hwpt_id);
2706 
2707 		/* Allocate a vIOMMU taking refcount of the parent hwpt */
2708 		test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2709 				      IOMMU_VIOMMU_TYPE_SELFTEST,
2710 				      &self->viommu_id);
2711 
2712 		/* Allocate a regular nested hwpt */
2713 		test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2714 					   &self->nested_hwpt_id,
2715 					   IOMMU_HWPT_DATA_SELFTEST, &data,
2716 					   sizeof(data));
2717 	}
2718 }
2719 
FIXTURE_TEARDOWN(iommufd_viommu)2720 FIXTURE_TEARDOWN(iommufd_viommu)
2721 {
2722 	teardown_iommufd(self->fd, _metadata);
2723 }
2724 
FIXTURE_VARIANT_ADD(iommufd_viommu,no_viommu)2725 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2726 {
2727 	.viommu = 0,
2728 };
2729 
FIXTURE_VARIANT_ADD(iommufd_viommu,mock_viommu)2730 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2731 {
2732 	.viommu = 1,
2733 };
2734 
TEST_F(iommufd_viommu,viommu_auto_destroy)2735 TEST_F(iommufd_viommu, viommu_auto_destroy)
2736 {
2737 }
2738 
TEST_F(iommufd_viommu,viommu_negative_tests)2739 TEST_F(iommufd_viommu, viommu_negative_tests)
2740 {
2741 	uint32_t device_id = self->device_id;
2742 	uint32_t ioas_id = self->ioas_id;
2743 	uint32_t hwpt_id;
2744 
2745 	if (self->device_id) {
2746 		/* Negative test -- invalid hwpt (hwpt_id=0) */
2747 		test_err_viommu_alloc(ENOENT, device_id, 0,
2748 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2749 
2750 		/* Negative test -- not a nesting parent hwpt */
2751 		test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2752 		test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2753 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2754 		test_ioctl_destroy(hwpt_id);
2755 
2756 		/* Negative test -- unsupported viommu type */
2757 		test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2758 				      0xdead, NULL);
2759 		EXPECT_ERRNO(EBUSY,
2760 			     _test_ioctl_destroy(self->fd, self->hwpt_id));
2761 		EXPECT_ERRNO(EBUSY,
2762 			     _test_ioctl_destroy(self->fd, self->viommu_id));
2763 	} else {
2764 		test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2765 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2766 	}
2767 }
2768 
TEST_F(iommufd_viommu,viommu_alloc_nested_iopf)2769 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2770 {
2771 	struct iommu_hwpt_selftest data = {
2772 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2773 	};
2774 	uint32_t viommu_id = self->viommu_id;
2775 	uint32_t dev_id = self->device_id;
2776 	uint32_t iopf_hwpt_id;
2777 	uint32_t fault_id;
2778 	uint32_t fault_fd;
2779 	uint32_t vdev_id;
2780 
2781 	if (self->device_id) {
2782 		test_ioctl_fault_alloc(&fault_id, &fault_fd);
2783 		test_err_hwpt_alloc_iopf(
2784 			ENOENT, dev_id, viommu_id, UINT32_MAX,
2785 			IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2786 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2787 		test_err_hwpt_alloc_iopf(
2788 			EOPNOTSUPP, dev_id, viommu_id, fault_id,
2789 			IOMMU_HWPT_FAULT_ID_VALID | (1 << 31), &iopf_hwpt_id,
2790 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2791 		test_cmd_hwpt_alloc_iopf(
2792 			dev_id, viommu_id, fault_id, IOMMU_HWPT_FAULT_ID_VALID,
2793 			&iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2794 			sizeof(data));
2795 
2796 		/* Must allocate vdevice before attaching to a nested hwpt */
2797 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
2798 					     iopf_hwpt_id);
2799 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2800 		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2801 		EXPECT_ERRNO(EBUSY,
2802 			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2803 		test_cmd_trigger_iopf(dev_id, fault_fd);
2804 
2805 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2806 		test_ioctl_destroy(iopf_hwpt_id);
2807 		close(fault_fd);
2808 		test_ioctl_destroy(fault_id);
2809 	}
2810 }
2811 
TEST_F(iommufd_viommu,vdevice_alloc)2812 TEST_F(iommufd_viommu, vdevice_alloc)
2813 {
2814 	uint32_t viommu_id = self->viommu_id;
2815 	uint32_t dev_id = self->device_id;
2816 	uint32_t vdev_id = 0;
2817 	uint32_t veventq_id;
2818 	uint32_t veventq_fd;
2819 	int prev_seq = -1;
2820 
2821 	if (dev_id) {
2822 		/* Must allocate vdevice before attaching to a nested hwpt */
2823 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
2824 					     self->nested_hwpt_id);
2825 
2826 		/* Allocate a vEVENTQ with veventq_depth=2 */
2827 		test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
2828 				       &veventq_id, &veventq_fd);
2829 		test_err_veventq_alloc(EEXIST, viommu_id,
2830 				       IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
2831 		/* Set vdev_id to 0x99, unset it, and set to 0x88 */
2832 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2833 		test_cmd_mock_domain_replace(self->stdev_id,
2834 					     self->nested_hwpt_id);
2835 		test_cmd_trigger_vevents(dev_id, 1);
2836 		test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
2837 		test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
2838 				       &vdev_id);
2839 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2840 		test_ioctl_destroy(vdev_id);
2841 
2842 		/* Try again with 0x88 */
2843 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
2844 		test_cmd_mock_domain_replace(self->stdev_id,
2845 					     self->nested_hwpt_id);
2846 		/* Trigger an overflow with three events */
2847 		test_cmd_trigger_vevents(dev_id, 3);
2848 		test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
2849 				      &prev_seq);
2850 		/* Overflow must be gone after the previous reads */
2851 		test_cmd_trigger_vevents(dev_id, 1);
2852 		test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
2853 		close(veventq_fd);
2854 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2855 		test_ioctl_destroy(vdev_id);
2856 		test_ioctl_destroy(veventq_id);
2857 	} else {
2858 		test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
2859 	}
2860 }
2861 
TEST_F(iommufd_viommu,vdevice_cache)2862 TEST_F(iommufd_viommu, vdevice_cache)
2863 {
2864 	struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
2865 	uint32_t viommu_id = self->viommu_id;
2866 	uint32_t dev_id = self->device_id;
2867 	uint32_t vdev_id = 0;
2868 	uint32_t num_inv;
2869 
2870 	if (dev_id) {
2871 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2872 
2873 		test_cmd_dev_check_cache_all(dev_id,
2874 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
2875 
2876 		/* Check data_type by passing zero-length array */
2877 		num_inv = 0;
2878 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2879 					   sizeof(*inv_reqs), &num_inv);
2880 		assert(!num_inv);
2881 
2882 		/* Negative test: Invalid data_type */
2883 		num_inv = 1;
2884 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2885 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
2886 					   sizeof(*inv_reqs), &num_inv);
2887 		assert(!num_inv);
2888 
2889 		/* Negative test: structure size sanity */
2890 		num_inv = 1;
2891 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2892 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2893 					   sizeof(*inv_reqs) + 1, &num_inv);
2894 		assert(!num_inv);
2895 
2896 		num_inv = 1;
2897 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2898 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2899 					   1, &num_inv);
2900 		assert(!num_inv);
2901 
2902 		/* Negative test: invalid flag is passed */
2903 		num_inv = 1;
2904 		inv_reqs[0].flags = 0xffffffff;
2905 		inv_reqs[0].vdev_id = 0x99;
2906 		test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2907 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2908 					   sizeof(*inv_reqs), &num_inv);
2909 		assert(!num_inv);
2910 
2911 		/* Negative test: invalid data_uptr when array is not empty */
2912 		num_inv = 1;
2913 		inv_reqs[0].flags = 0;
2914 		inv_reqs[0].vdev_id = 0x99;
2915 		test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
2916 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2917 					   sizeof(*inv_reqs), &num_inv);
2918 		assert(!num_inv);
2919 
2920 		/* Negative test: invalid entry_len when array is not empty */
2921 		num_inv = 1;
2922 		inv_reqs[0].flags = 0;
2923 		inv_reqs[0].vdev_id = 0x99;
2924 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2925 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2926 					   0, &num_inv);
2927 		assert(!num_inv);
2928 
2929 		/* Negative test: invalid cache_id */
2930 		num_inv = 1;
2931 		inv_reqs[0].flags = 0;
2932 		inv_reqs[0].vdev_id = 0x99;
2933 		inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
2934 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2935 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2936 					   sizeof(*inv_reqs), &num_inv);
2937 		assert(!num_inv);
2938 
2939 		/* Negative test: invalid vdev_id */
2940 		num_inv = 1;
2941 		inv_reqs[0].flags = 0;
2942 		inv_reqs[0].vdev_id = 0x9;
2943 		inv_reqs[0].cache_id = 0;
2944 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2945 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2946 					   sizeof(*inv_reqs), &num_inv);
2947 		assert(!num_inv);
2948 
2949 		/*
2950 		 * Invalidate the 1st cache entry but fail the 2nd request
2951 		 * due to invalid flags configuration in the 2nd request.
2952 		 */
2953 		num_inv = 2;
2954 		inv_reqs[0].flags = 0;
2955 		inv_reqs[0].vdev_id = 0x99;
2956 		inv_reqs[0].cache_id = 0;
2957 		inv_reqs[1].flags = 0xffffffff;
2958 		inv_reqs[1].vdev_id = 0x99;
2959 		inv_reqs[1].cache_id = 1;
2960 		test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2961 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2962 					   sizeof(*inv_reqs), &num_inv);
2963 		assert(num_inv == 1);
2964 		test_cmd_dev_check_cache(dev_id, 0, 0);
2965 		test_cmd_dev_check_cache(dev_id, 1,
2966 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2967 		test_cmd_dev_check_cache(dev_id, 2,
2968 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2969 		test_cmd_dev_check_cache(dev_id, 3,
2970 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2971 
2972 		/*
2973 		 * Invalidate the 1st cache entry but fail the 2nd request
2974 		 * due to invalid cache_id configuration in the 2nd request.
2975 		 */
2976 		num_inv = 2;
2977 		inv_reqs[0].flags = 0;
2978 		inv_reqs[0].vdev_id = 0x99;
2979 		inv_reqs[0].cache_id = 0;
2980 		inv_reqs[1].flags = 0;
2981 		inv_reqs[1].vdev_id = 0x99;
2982 		inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
2983 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2984 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2985 					   sizeof(*inv_reqs), &num_inv);
2986 		assert(num_inv == 1);
2987 		test_cmd_dev_check_cache(dev_id, 0, 0);
2988 		test_cmd_dev_check_cache(dev_id, 1,
2989 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2990 		test_cmd_dev_check_cache(dev_id, 2,
2991 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2992 		test_cmd_dev_check_cache(dev_id, 3,
2993 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2994 
2995 		/* Invalidate the 2nd cache entry and verify */
2996 		num_inv = 1;
2997 		inv_reqs[0].flags = 0;
2998 		inv_reqs[0].vdev_id = 0x99;
2999 		inv_reqs[0].cache_id = 1;
3000 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
3001 					   sizeof(*inv_reqs), &num_inv);
3002 		assert(num_inv == 1);
3003 		test_cmd_dev_check_cache(dev_id, 0, 0);
3004 		test_cmd_dev_check_cache(dev_id, 1, 0);
3005 		test_cmd_dev_check_cache(dev_id, 2,
3006 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
3007 		test_cmd_dev_check_cache(dev_id, 3,
3008 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
3009 
3010 		/* Invalidate the 3rd and 4th cache entries and verify */
3011 		num_inv = 2;
3012 		inv_reqs[0].flags = 0;
3013 		inv_reqs[0].vdev_id = 0x99;
3014 		inv_reqs[0].cache_id = 2;
3015 		inv_reqs[1].flags = 0;
3016 		inv_reqs[1].vdev_id = 0x99;
3017 		inv_reqs[1].cache_id = 3;
3018 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
3019 					   sizeof(*inv_reqs), &num_inv);
3020 		assert(num_inv == 2);
3021 		test_cmd_dev_check_cache_all(dev_id, 0);
3022 
3023 		/* Invalidate all cache entries for nested_dev_id[1] and verify */
3024 		num_inv = 1;
3025 		inv_reqs[0].vdev_id = 0x99;
3026 		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
3027 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
3028 					   sizeof(*inv_reqs), &num_inv);
3029 		assert(num_inv == 1);
3030 		test_cmd_dev_check_cache_all(dev_id, 0);
3031 		test_ioctl_destroy(vdev_id);
3032 	}
3033 }
3034 
FIXTURE(iommufd_device_pasid)3035 FIXTURE(iommufd_device_pasid)
3036 {
3037 	int fd;
3038 	uint32_t ioas_id;
3039 	uint32_t hwpt_id;
3040 	uint32_t stdev_id;
3041 	uint32_t device_id;
3042 	uint32_t no_pasid_stdev_id;
3043 	uint32_t no_pasid_device_id;
3044 };
3045 
FIXTURE_VARIANT(iommufd_device_pasid)3046 FIXTURE_VARIANT(iommufd_device_pasid)
3047 {
3048 	bool pasid_capable;
3049 };
3050 
FIXTURE_SETUP(iommufd_device_pasid)3051 FIXTURE_SETUP(iommufd_device_pasid)
3052 {
3053 	self->fd = open("/dev/iommu", O_RDWR);
3054 	ASSERT_NE(-1, self->fd);
3055 	test_ioctl_ioas_alloc(&self->ioas_id);
3056 
3057 	test_cmd_mock_domain_flags(self->ioas_id,
3058 				   MOCK_FLAGS_DEVICE_PASID,
3059 				   &self->stdev_id, &self->hwpt_id,
3060 				   &self->device_id);
3061 	if (!variant->pasid_capable)
3062 		test_cmd_mock_domain_flags(self->ioas_id, 0,
3063 					   &self->no_pasid_stdev_id, NULL,
3064 					   &self->no_pasid_device_id);
3065 }
3066 
FIXTURE_TEARDOWN(iommufd_device_pasid)3067 FIXTURE_TEARDOWN(iommufd_device_pasid)
3068 {
3069 	teardown_iommufd(self->fd, _metadata);
3070 }
3071 
FIXTURE_VARIANT_ADD(iommufd_device_pasid,no_pasid)3072 FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
3073 {
3074 	.pasid_capable = false,
3075 };
3076 
FIXTURE_VARIANT_ADD(iommufd_device_pasid,has_pasid)3077 FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
3078 {
3079 	.pasid_capable = true,
3080 };
3081 
TEST_F(iommufd_device_pasid,pasid_attach)3082 TEST_F(iommufd_device_pasid, pasid_attach)
3083 {
3084 	struct iommu_hwpt_selftest data = {
3085 		.iotlb =  IOMMU_TEST_IOTLB_DEFAULT,
3086 	};
3087 	uint32_t nested_hwpt_id[3] = {};
3088 	uint32_t parent_hwpt_id = 0;
3089 	uint32_t fault_id, fault_fd;
3090 	uint32_t s2_hwpt_id = 0;
3091 	uint32_t iopf_hwpt_id;
3092 	uint32_t pasid = 100;
3093 	uint32_t viommu_id;
3094 
3095 	/*
3096 	 * Negative, detach pasid without attaching, this is not expected.
3097 	 * But it should not result in failure anyway.
3098 	 */
3099 	test_cmd_pasid_detach(pasid);
3100 
3101 	/* Allocate two nested hwpts sharing one common parent hwpt */
3102 	test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3103 			    IOMMU_HWPT_ALLOC_NEST_PARENT,
3104 			    &parent_hwpt_id);
3105 	test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3106 				   IOMMU_HWPT_ALLOC_PASID,
3107 				   &nested_hwpt_id[0],
3108 				   IOMMU_HWPT_DATA_SELFTEST,
3109 				   &data, sizeof(data));
3110 	test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3111 				   IOMMU_HWPT_ALLOC_PASID,
3112 				   &nested_hwpt_id[1],
3113 				   IOMMU_HWPT_DATA_SELFTEST,
3114 				   &data, sizeof(data));
3115 
3116 	/* Fault related preparation */
3117 	test_ioctl_fault_alloc(&fault_id, &fault_fd);
3118 	test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
3119 				 IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
3120 				 &iopf_hwpt_id,
3121 				 IOMMU_HWPT_DATA_SELFTEST, &data,
3122 				 sizeof(data));
3123 
3124 	/* Allocate a regular nested hwpt based on viommu */
3125 	test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
3126 			      IOMMU_VIOMMU_TYPE_SELFTEST,
3127 			      &viommu_id);
3128 	test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
3129 				   IOMMU_HWPT_ALLOC_PASID,
3130 				   &nested_hwpt_id[2],
3131 				   IOMMU_HWPT_DATA_SELFTEST, &data,
3132 				   sizeof(data));
3133 
3134 	test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3135 			    IOMMU_HWPT_ALLOC_PASID,
3136 			    &s2_hwpt_id);
3137 
3138 	/* Attach RID to non-pasid compat domain, */
3139 	test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
3140 	/* then attach to pasid should fail */
3141 	test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
3142 
3143 	/* Attach RID to pasid compat domain, */
3144 	test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
3145 	/* then attach to pasid should succeed, */
3146 	test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3147 	/* but attach RID to non-pasid compat domain should fail now. */
3148 	test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
3149 	/*
3150 	 * Detach hwpt from pasid 100, and check if the pasid 100
3151 	 * has null domain.
3152 	 */
3153 	test_cmd_pasid_detach(pasid);
3154 	ASSERT_EQ(0,
3155 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3156 					    pasid, 0));
3157 	/* RID is attached to pasid-comapt domain, pasid path is not used */
3158 
3159 	if (!variant->pasid_capable) {
3160 		/*
3161 		 * PASID-compatible domain can be used by non-PASID-capable
3162 		 * device.
3163 		 */
3164 		test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
3165 		test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
3166 		/*
3167 		 * Attach hwpt to pasid 100 of non-PASID-capable device,
3168 		 * should fail, no matter domain is pasid-comapt or not.
3169 		 */
3170 		EXPECT_ERRNO(EINVAL,
3171 			     _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3172 						    pasid, parent_hwpt_id));
3173 		EXPECT_ERRNO(EINVAL,
3174 			     _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3175 						    pasid, s2_hwpt_id));
3176 	}
3177 
3178 	/*
3179 	 * Attach non pasid compat hwpt to pasid-capable device, should
3180 	 * fail, and have null domain.
3181 	 */
3182 	test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
3183 	ASSERT_EQ(0,
3184 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3185 					    pasid, 0));
3186 
3187 	/*
3188 	 * Attach ioas to pasid 100, should fail, domain should
3189 	 * be null.
3190 	 */
3191 	test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
3192 	ASSERT_EQ(0,
3193 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3194 					    pasid, 0));
3195 
3196 	/*
3197 	 * Attach the s2_hwpt to pasid 100, should succeed, domain should
3198 	 * be valid.
3199 	 */
3200 	test_cmd_pasid_attach(pasid, s2_hwpt_id);
3201 	ASSERT_EQ(0,
3202 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3203 					    pasid, s2_hwpt_id));
3204 
3205 	/*
3206 	 * Try attach pasid 100 with another hwpt, should FAIL
3207 	 * as attach does not allow overwrite, use REPLACE instead.
3208 	 */
3209 	test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3210 
3211 	/*
3212 	 * Detach hwpt from pasid 100 for next test, should succeed,
3213 	 * and have null domain.
3214 	 */
3215 	test_cmd_pasid_detach(pasid);
3216 	ASSERT_EQ(0,
3217 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3218 					    pasid, 0));
3219 
3220 	/*
3221 	 * Attach nested hwpt to pasid 100, should succeed, domain
3222 	 * should be valid.
3223 	 */
3224 	test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3225 	ASSERT_EQ(0,
3226 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3227 					    pasid, nested_hwpt_id[0]));
3228 
3229 	/* Attach to pasid 100 which has been attached, should fail. */
3230 	test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3231 
3232 	/* cleanup pasid 100 */
3233 	test_cmd_pasid_detach(pasid);
3234 
3235 	/* Replace tests */
3236 
3237 	pasid = 200;
3238 	/*
3239 	 * Replace pasid 200 without attaching it, should fail
3240 	 * with -EINVAL.
3241 	 */
3242 	test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
3243 
3244 	/*
3245 	 * Attach the s2 hwpt to pasid 200, should succeed, domain should
3246 	 * be valid.
3247 	 */
3248 	test_cmd_pasid_attach(pasid, s2_hwpt_id);
3249 	ASSERT_EQ(0,
3250 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3251 					    pasid, s2_hwpt_id));
3252 
3253 	/*
3254 	 * Replace pasid 200 with self->ioas_id, should fail
3255 	 * and domain should be the prior s2 hwpt.
3256 	 */
3257 	test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
3258 	ASSERT_EQ(0,
3259 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3260 					    pasid, s2_hwpt_id));
3261 
3262 	/*
3263 	 * Replace a nested hwpt for pasid 200, should succeed,
3264 	 * and have valid domain.
3265 	 */
3266 	test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
3267 	ASSERT_EQ(0,
3268 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3269 					    pasid, nested_hwpt_id[0]));
3270 
3271 	/*
3272 	 * Replace with another nested hwpt for pasid 200, should
3273 	 * succeed, and have valid domain.
3274 	 */
3275 	test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
3276 	ASSERT_EQ(0,
3277 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3278 					    pasid, nested_hwpt_id[1]));
3279 
3280 	/* cleanup pasid 200 */
3281 	test_cmd_pasid_detach(pasid);
3282 
3283 	/* Negative Tests for pasid replace, use pasid 1024 */
3284 
3285 	/*
3286 	 * Attach the s2 hwpt to pasid 1024, should succeed, domain should
3287 	 * be valid.
3288 	 */
3289 	pasid = 1024;
3290 	test_cmd_pasid_attach(pasid, s2_hwpt_id);
3291 	ASSERT_EQ(0,
3292 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3293 					    pasid, s2_hwpt_id));
3294 
3295 	/*
3296 	 * Replace pasid 1024 with nested_hwpt_id[0], should fail,
3297 	 * but have the old valid domain. This is a designed
3298 	 * negative case. Normally, this shall succeed.
3299 	 */
3300 	test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
3301 	ASSERT_EQ(0,
3302 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3303 					    pasid, s2_hwpt_id));
3304 
3305 	/* cleanup pasid 1024 */
3306 	test_cmd_pasid_detach(pasid);
3307 
3308 	/* Attach to iopf-capable hwpt */
3309 
3310 	/*
3311 	 * Attach an iopf hwpt to pasid 2048, should succeed, domain should
3312 	 * be valid.
3313 	 */
3314 	pasid = 2048;
3315 	test_cmd_pasid_attach(pasid, iopf_hwpt_id);
3316 	ASSERT_EQ(0,
3317 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3318 					    pasid, iopf_hwpt_id));
3319 
3320 	test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
3321 
3322 	/*
3323 	 * Replace with s2_hwpt_id for pasid 2048, should
3324 	 * succeed, and have valid domain.
3325 	 */
3326 	test_cmd_pasid_replace(pasid, s2_hwpt_id);
3327 	ASSERT_EQ(0,
3328 		  test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3329 					    pasid, s2_hwpt_id));
3330 
3331 	/* cleanup pasid 2048 */
3332 	test_cmd_pasid_detach(pasid);
3333 
3334 	test_ioctl_destroy(iopf_hwpt_id);
3335 	close(fault_fd);
3336 	test_ioctl_destroy(fault_id);
3337 
3338 	/* Detach the s2_hwpt_id from RID */
3339 	test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3340 }
3341 
3342 TEST_HARNESS_MAIN
3343