xref: /linux/lib/tests/kunit_iov_iter.c (revision 0913b7554726aac089cab89b6f0877dafc30b2a0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* I/O iterator tests.  This can only test kernel-backed iterator types.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/bvec.h>
15 #include <linux/folio_queue.h>
16 #include <linux/scatterlist.h>
17 #include <linux/minmax.h>
18 #include <linux/mman.h>
19 #include <kunit/test.h>
20 
21 MODULE_DESCRIPTION("iov_iter testing");
22 MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
23 MODULE_LICENSE("GPL");
24 
25 struct kvec_test_range {
26 	int	from, to;
27 };
28 
29 static const struct kvec_test_range kvec_test_ranges[] = {
30 	{ 0x00002, 0x00002 },
31 	{ 0x00027, 0x03000 },
32 	{ 0x05193, 0x18794 },
33 	{ 0x20000, 0x20000 },
34 	{ 0x20000, 0x24000 },
35 	{ 0x24000, 0x27001 },
36 	{ 0x29000, 0xffffb },
37 	{ 0xffffd, 0xffffe },
38 	{ -1 }
39 };
40 
41 static inline u8 pattern(unsigned long x)
42 {
43 	return (u8)x + (u8)(x >> 8) + (u8)(x >> 16);
44 }
45 
46 static void iov_kunit_unmap(void *data)
47 {
48 	vfree(data);
49 }
50 
51 static void *__init iov_kunit_create_buffer(struct kunit *test,
52 					    struct page ***ppages,
53 					    size_t npages)
54 {
55 	struct page **pages;
56 	unsigned long got;
57 	void *buffer;
58 	unsigned int i;
59 
60 	pages = kzalloc_objs(struct page *, npages, GFP_KERNEL);
61 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
62 	*ppages = pages;
63 
64 	got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
65 	if (got != npages) {
66 		release_pages(pages, got);
67 		kvfree(pages);
68 		KUNIT_ASSERT_EQ(test, got, npages);
69 	}
70 	/* Make sure that we don't get a physically contiguous buffer. */
71 	for (i = 0; i < npages / 4; ++i)
72 		swap(pages[i], pages[i + npages / 2]);
73 
74 	buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
75 	if (buffer == NULL) {
76 		release_pages(pages, got);
77 		kvfree(pages);
78 	}
79         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
80 
81 	kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
82 	return buffer;
83 }
84 
85 static void __init iov_kunit_load_kvec(struct kunit *test,
86 				       struct iov_iter *iter, int dir,
87 				       struct kvec *kvec, unsigned int kvmax,
88 				       void *buffer, size_t bufsize,
89 				       const struct kvec_test_range *pr)
90 {
91 	size_t size = 0;
92 	int i;
93 
94 	for (i = 0; i < kvmax; i++, pr++) {
95 		if (pr->from < 0)
96 			break;
97 		KUNIT_ASSERT_GE(test, pr->to, pr->from);
98 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
99 		kvec[i].iov_base = buffer + pr->from;
100 		kvec[i].iov_len = pr->to - pr->from;
101 		size += pr->to - pr->from;
102 	}
103 	KUNIT_ASSERT_LE(test, size, bufsize);
104 
105 	iov_iter_kvec(iter, dir, kvec, i, size);
106 }
107 
108 /*
109  * Test copying to a ITER_KVEC-type iterator.
110  */
111 static void __init iov_kunit_copy_to_kvec(struct kunit *test)
112 {
113 	const struct kvec_test_range *pr;
114 	struct iov_iter iter;
115 	struct page **spages, **bpages;
116 	struct kvec kvec[8];
117 	u8 *scratch, *buffer;
118 	size_t bufsize, npages, size, copied;
119 	int i, patt;
120 
121 	bufsize = 0x100000;
122 	npages = bufsize / PAGE_SIZE;
123 
124 	scratch = iov_kunit_create_buffer(test, &spages, npages);
125 	for (i = 0; i < bufsize; i++)
126 		scratch[i] = pattern(i);
127 
128 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
129 	memset(buffer, 0, bufsize);
130 
131 	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
132 			    buffer, bufsize, kvec_test_ranges);
133 	size = iter.count;
134 
135 	copied = copy_to_iter(scratch, size, &iter);
136 
137 	KUNIT_EXPECT_EQ(test, copied, size);
138 	KUNIT_EXPECT_EQ(test, iter.count, 0);
139 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
140 
141 	/* Build the expected image in the scratch buffer. */
142 	patt = 0;
143 	memset(scratch, 0, bufsize);
144 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
145 		for (i = pr->from; i < pr->to; i++)
146 			scratch[i] = pattern(patt++);
147 
148 	/* Compare the images */
149 	for (i = 0; i < bufsize; i++) {
150 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
151 		if (buffer[i] != scratch[i])
152 			return;
153 	}
154 
155 	KUNIT_SUCCEED(test);
156 }
157 
158 /*
159  * Test copying from a ITER_KVEC-type iterator.
160  */
161 static void __init iov_kunit_copy_from_kvec(struct kunit *test)
162 {
163 	const struct kvec_test_range *pr;
164 	struct iov_iter iter;
165 	struct page **spages, **bpages;
166 	struct kvec kvec[8];
167 	u8 *scratch, *buffer;
168 	size_t bufsize, npages, size, copied;
169 	int i, j;
170 
171 	bufsize = 0x100000;
172 	npages = bufsize / PAGE_SIZE;
173 
174 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
175 	for (i = 0; i < bufsize; i++)
176 		buffer[i] = pattern(i);
177 
178 	scratch = iov_kunit_create_buffer(test, &spages, npages);
179 	memset(scratch, 0, bufsize);
180 
181 	iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
182 			    buffer, bufsize, kvec_test_ranges);
183 	size = min(iter.count, bufsize);
184 
185 	copied = copy_from_iter(scratch, size, &iter);
186 
187 	KUNIT_EXPECT_EQ(test, copied, size);
188 	KUNIT_EXPECT_EQ(test, iter.count, 0);
189 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
190 
191 	/* Build the expected image in the main buffer. */
192 	i = 0;
193 	memset(buffer, 0, bufsize);
194 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
195 		for (j = pr->from; j < pr->to; j++) {
196 			buffer[i++] = pattern(j);
197 			if (i >= bufsize)
198 				goto stop;
199 		}
200 	}
201 stop:
202 
203 	/* Compare the images */
204 	for (i = 0; i < bufsize; i++) {
205 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
206 		if (scratch[i] != buffer[i])
207 			return;
208 	}
209 
210 	KUNIT_SUCCEED(test);
211 }
212 
213 struct bvec_test_range {
214 	int	page, from, to;
215 };
216 
217 static const struct bvec_test_range bvec_test_ranges[] = {
218 	{ 0, 0x0002, 0x0002 },
219 	{ 1, 0x0027, 0x0893 },
220 	{ 2, 0x0193, 0x0794 },
221 	{ 3, 0x0000, 0x1000 },
222 	{ 4, 0x0000, 0x1000 },
223 	{ 5, 0x0000, 0x1000 },
224 	{ 6, 0x0000, 0x0ffb },
225 	{ 6, 0x0ffd, 0x0ffe },
226 	{ -1, -1, -1 }
227 };
228 
229 static void __init iov_kunit_load_bvec(struct kunit *test,
230 				       struct iov_iter *iter, int dir,
231 				       struct bio_vec *bvec, unsigned int bvmax,
232 				       struct page **pages, size_t npages,
233 				       size_t bufsize,
234 				       const struct bvec_test_range *pr)
235 {
236 	struct page *can_merge = NULL, *page;
237 	size_t size = 0;
238 	int i;
239 
240 	for (i = 0; i < bvmax; i++, pr++) {
241 		if (pr->from < 0)
242 			break;
243 		KUNIT_ASSERT_LT(test, pr->page, npages);
244 		KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
245 		KUNIT_ASSERT_GE(test, pr->from, 0);
246 		KUNIT_ASSERT_GE(test, pr->to, pr->from);
247 		KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
248 
249 		page = pages[pr->page];
250 		if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
251 			i--;
252 			bvec[i].bv_len += pr->to;
253 		} else {
254 			bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
255 		}
256 
257 		size += pr->to - pr->from;
258 		if ((pr->to & ~PAGE_MASK) == 0)
259 			can_merge = page + pr->to / PAGE_SIZE;
260 		else
261 			can_merge = NULL;
262 	}
263 
264 	iov_iter_bvec(iter, dir, bvec, i, size);
265 }
266 
267 /*
268  * Test copying to a ITER_BVEC-type iterator.
269  */
270 static void __init iov_kunit_copy_to_bvec(struct kunit *test)
271 {
272 	const struct bvec_test_range *pr;
273 	struct iov_iter iter;
274 	struct bio_vec bvec[8];
275 	struct page **spages, **bpages;
276 	u8 *scratch, *buffer;
277 	size_t bufsize, npages, size, copied;
278 	int i, b, patt;
279 
280 	bufsize = 0x100000;
281 	npages = bufsize / PAGE_SIZE;
282 
283 	scratch = iov_kunit_create_buffer(test, &spages, npages);
284 	for (i = 0; i < bufsize; i++)
285 		scratch[i] = pattern(i);
286 
287 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
288 	memset(buffer, 0, bufsize);
289 
290 	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
291 			    bpages, npages, bufsize, bvec_test_ranges);
292 	size = iter.count;
293 
294 	copied = copy_to_iter(scratch, size, &iter);
295 
296 	KUNIT_EXPECT_EQ(test, copied, size);
297 	KUNIT_EXPECT_EQ(test, iter.count, 0);
298 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
299 
300 	/* Build the expected image in the scratch buffer. */
301 	b = 0;
302 	patt = 0;
303 	memset(scratch, 0, bufsize);
304 	for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
305 		u8 *p = scratch + pr->page * PAGE_SIZE;
306 
307 		for (i = pr->from; i < pr->to; i++)
308 			p[i] = pattern(patt++);
309 	}
310 
311 	/* Compare the images */
312 	for (i = 0; i < bufsize; i++) {
313 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
314 		if (buffer[i] != scratch[i])
315 			return;
316 	}
317 
318 	KUNIT_SUCCEED(test);
319 }
320 
321 /*
322  * Test copying from a ITER_BVEC-type iterator.
323  */
324 static void __init iov_kunit_copy_from_bvec(struct kunit *test)
325 {
326 	const struct bvec_test_range *pr;
327 	struct iov_iter iter;
328 	struct bio_vec bvec[8];
329 	struct page **spages, **bpages;
330 	u8 *scratch, *buffer;
331 	size_t bufsize, npages, size, copied;
332 	int i, j;
333 
334 	bufsize = 0x100000;
335 	npages = bufsize / PAGE_SIZE;
336 
337 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
338 	for (i = 0; i < bufsize; i++)
339 		buffer[i] = pattern(i);
340 
341 	scratch = iov_kunit_create_buffer(test, &spages, npages);
342 	memset(scratch, 0, bufsize);
343 
344 	iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
345 			    bpages, npages, bufsize, bvec_test_ranges);
346 	size = iter.count;
347 
348 	copied = copy_from_iter(scratch, size, &iter);
349 
350 	KUNIT_EXPECT_EQ(test, copied, size);
351 	KUNIT_EXPECT_EQ(test, iter.count, 0);
352 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
353 
354 	/* Build the expected image in the main buffer. */
355 	i = 0;
356 	memset(buffer, 0, bufsize);
357 	for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
358 		size_t patt = pr->page * PAGE_SIZE;
359 
360 		for (j = pr->from; j < pr->to; j++) {
361 			buffer[i++] = pattern(patt + j);
362 			if (i >= bufsize)
363 				goto stop;
364 		}
365 	}
366 stop:
367 
368 	/* Compare the images */
369 	for (i = 0; i < bufsize; i++) {
370 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
371 		if (scratch[i] != buffer[i])
372 			return;
373 	}
374 
375 	KUNIT_SUCCEED(test);
376 }
377 
378 static void iov_kunit_destroy_folioq(void *data)
379 {
380 	struct folio_queue *folioq, *next;
381 
382 	for (folioq = data; folioq; folioq = next) {
383 		next = folioq->next;
384 		kfree(folioq);
385 	}
386 }
387 
388 static void __init iov_kunit_load_folioq(struct kunit *test,
389 					struct iov_iter *iter, int dir,
390 					struct folio_queue *folioq,
391 					struct page **pages, size_t npages)
392 {
393 	struct folio_queue *p = folioq;
394 	size_t size = 0;
395 	int i;
396 
397 	for (i = 0; i < npages; i++) {
398 		if (folioq_full(p)) {
399 			p->next = kzalloc_obj(struct folio_queue);
400 			KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
401 			folioq_init(p->next, 0);
402 			p->next->prev = p;
403 			p = p->next;
404 		}
405 		folioq_append(p, page_folio(pages[i]));
406 		size += PAGE_SIZE;
407 	}
408 	iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
409 }
410 
411 static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
412 {
413 	struct folio_queue *folioq;
414 
415 	folioq = kzalloc_obj(struct folio_queue);
416 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
417 	kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
418 	folioq_init(folioq, 0);
419 	return folioq;
420 }
421 
422 /*
423  * Test copying to a ITER_FOLIOQ-type iterator.
424  */
425 static void __init iov_kunit_copy_to_folioq(struct kunit *test)
426 {
427 	const struct kvec_test_range *pr;
428 	struct iov_iter iter;
429 	struct folio_queue *folioq;
430 	struct page **spages, **bpages;
431 	u8 *scratch, *buffer;
432 	size_t bufsize, npages, size, copied;
433 	int i, patt;
434 
435 	bufsize = 0x100000;
436 	npages = bufsize / PAGE_SIZE;
437 
438 	folioq = iov_kunit_create_folioq(test);
439 
440 	scratch = iov_kunit_create_buffer(test, &spages, npages);
441 	for (i = 0; i < bufsize; i++)
442 		scratch[i] = pattern(i);
443 
444 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
445 	memset(buffer, 0, bufsize);
446 
447 	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
448 
449 	i = 0;
450 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
451 		size = pr->to - pr->from;
452 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
453 
454 		iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
455 		iov_iter_advance(&iter, pr->from);
456 		copied = copy_to_iter(scratch + i, size, &iter);
457 
458 		KUNIT_EXPECT_EQ(test, copied, size);
459 		KUNIT_EXPECT_EQ(test, iter.count, 0);
460 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
461 		i += size;
462 		if (test->status == KUNIT_FAILURE)
463 			goto stop;
464 	}
465 
466 	/* Build the expected image in the scratch buffer. */
467 	patt = 0;
468 	memset(scratch, 0, bufsize);
469 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
470 		for (i = pr->from; i < pr->to; i++)
471 			scratch[i] = pattern(patt++);
472 
473 	/* Compare the images */
474 	for (i = 0; i < bufsize; i++) {
475 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
476 		if (buffer[i] != scratch[i])
477 			return;
478 	}
479 
480 stop:
481 	KUNIT_SUCCEED(test);
482 }
483 
484 /*
485  * Test copying from a ITER_FOLIOQ-type iterator.
486  */
487 static void __init iov_kunit_copy_from_folioq(struct kunit *test)
488 {
489 	const struct kvec_test_range *pr;
490 	struct iov_iter iter;
491 	struct folio_queue *folioq;
492 	struct page **spages, **bpages;
493 	u8 *scratch, *buffer;
494 	size_t bufsize, npages, size, copied;
495 	int i, j;
496 
497 	bufsize = 0x100000;
498 	npages = bufsize / PAGE_SIZE;
499 
500 	folioq = iov_kunit_create_folioq(test);
501 
502 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
503 	for (i = 0; i < bufsize; i++)
504 		buffer[i] = pattern(i);
505 
506 	scratch = iov_kunit_create_buffer(test, &spages, npages);
507 	memset(scratch, 0, bufsize);
508 
509 	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
510 
511 	i = 0;
512 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
513 		size = pr->to - pr->from;
514 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
515 
516 		iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
517 		iov_iter_advance(&iter, pr->from);
518 		copied = copy_from_iter(scratch + i, size, &iter);
519 
520 		KUNIT_EXPECT_EQ(test, copied, size);
521 		KUNIT_EXPECT_EQ(test, iter.count, 0);
522 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
523 		i += size;
524 	}
525 
526 	/* Build the expected image in the main buffer. */
527 	i = 0;
528 	memset(buffer, 0, bufsize);
529 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
530 		for (j = pr->from; j < pr->to; j++) {
531 			buffer[i++] = pattern(j);
532 			if (i >= bufsize)
533 				goto stop;
534 		}
535 	}
536 stop:
537 
538 	/* Compare the images */
539 	for (i = 0; i < bufsize; i++) {
540 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
541 		if (scratch[i] != buffer[i])
542 			return;
543 	}
544 
545 	KUNIT_SUCCEED(test);
546 }
547 
548 static void iov_kunit_destroy_xarray(void *data)
549 {
550 	struct xarray *xarray = data;
551 
552 	xa_destroy(xarray);
553 	kfree(xarray);
554 }
555 
556 static void __init iov_kunit_load_xarray(struct kunit *test,
557 					 struct iov_iter *iter, int dir,
558 					 struct xarray *xarray,
559 					 struct page **pages, size_t npages)
560 {
561 	size_t size = 0;
562 	int i;
563 
564 	for (i = 0; i < npages; i++) {
565 		void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
566 
567 		KUNIT_ASSERT_FALSE(test, xa_is_err(x));
568 		size += PAGE_SIZE;
569 	}
570 	iov_iter_xarray(iter, dir, xarray, 0, size);
571 }
572 
573 static struct xarray *iov_kunit_create_xarray(struct kunit *test)
574 {
575 	struct xarray *xarray;
576 
577 	xarray = kzalloc_obj(struct xarray);
578 	xa_init(xarray);
579 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
580 	kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
581 	return xarray;
582 }
583 
584 /*
585  * Test copying to a ITER_XARRAY-type iterator.
586  */
587 static void __init iov_kunit_copy_to_xarray(struct kunit *test)
588 {
589 	const struct kvec_test_range *pr;
590 	struct iov_iter iter;
591 	struct xarray *xarray;
592 	struct page **spages, **bpages;
593 	u8 *scratch, *buffer;
594 	size_t bufsize, npages, size, copied;
595 	int i, patt;
596 
597 	bufsize = 0x100000;
598 	npages = bufsize / PAGE_SIZE;
599 
600 	xarray = iov_kunit_create_xarray(test);
601 
602 	scratch = iov_kunit_create_buffer(test, &spages, npages);
603 	for (i = 0; i < bufsize; i++)
604 		scratch[i] = pattern(i);
605 
606 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
607 	memset(buffer, 0, bufsize);
608 
609 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
610 
611 	i = 0;
612 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
613 		size = pr->to - pr->from;
614 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
615 
616 		iov_iter_xarray(&iter, READ, xarray, pr->from, size);
617 		copied = copy_to_iter(scratch + i, size, &iter);
618 
619 		KUNIT_EXPECT_EQ(test, copied, size);
620 		KUNIT_EXPECT_EQ(test, iter.count, 0);
621 		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
622 		i += size;
623 	}
624 
625 	/* Build the expected image in the scratch buffer. */
626 	patt = 0;
627 	memset(scratch, 0, bufsize);
628 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
629 		for (i = pr->from; i < pr->to; i++)
630 			scratch[i] = pattern(patt++);
631 
632 	/* Compare the images */
633 	for (i = 0; i < bufsize; i++) {
634 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
635 		if (buffer[i] != scratch[i])
636 			return;
637 	}
638 
639 	KUNIT_SUCCEED(test);
640 }
641 
642 /*
643  * Test copying from a ITER_XARRAY-type iterator.
644  */
645 static void __init iov_kunit_copy_from_xarray(struct kunit *test)
646 {
647 	const struct kvec_test_range *pr;
648 	struct iov_iter iter;
649 	struct xarray *xarray;
650 	struct page **spages, **bpages;
651 	u8 *scratch, *buffer;
652 	size_t bufsize, npages, size, copied;
653 	int i, j;
654 
655 	bufsize = 0x100000;
656 	npages = bufsize / PAGE_SIZE;
657 
658 	xarray = iov_kunit_create_xarray(test);
659 
660 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
661 	for (i = 0; i < bufsize; i++)
662 		buffer[i] = pattern(i);
663 
664 	scratch = iov_kunit_create_buffer(test, &spages, npages);
665 	memset(scratch, 0, bufsize);
666 
667 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
668 
669 	i = 0;
670 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
671 		size = pr->to - pr->from;
672 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
673 
674 		iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
675 		copied = copy_from_iter(scratch + i, size, &iter);
676 
677 		KUNIT_EXPECT_EQ(test, copied, size);
678 		KUNIT_EXPECT_EQ(test, iter.count, 0);
679 		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
680 		i += size;
681 	}
682 
683 	/* Build the expected image in the main buffer. */
684 	i = 0;
685 	memset(buffer, 0, bufsize);
686 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
687 		for (j = pr->from; j < pr->to; j++) {
688 			buffer[i++] = pattern(j);
689 			if (i >= bufsize)
690 				goto stop;
691 		}
692 	}
693 stop:
694 
695 	/* Compare the images */
696 	for (i = 0; i < bufsize; i++) {
697 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
698 		if (scratch[i] != buffer[i])
699 			return;
700 	}
701 
702 	KUNIT_SUCCEED(test);
703 }
704 
705 /*
706  * Test the extraction of ITER_KVEC-type iterators.
707  */
708 static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
709 {
710 	const struct kvec_test_range *pr;
711 	struct iov_iter iter;
712 	struct page **bpages, *pagelist[8], **pages = pagelist;
713 	struct kvec kvec[8];
714 	u8 *buffer;
715 	ssize_t len;
716 	size_t bufsize, size = 0, npages;
717 	int i, from;
718 
719 	bufsize = 0x100000;
720 	npages = bufsize / PAGE_SIZE;
721 
722 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
723 
724 	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
725 			    buffer, bufsize, kvec_test_ranges);
726 	size = iter.count;
727 
728 	pr = kvec_test_ranges;
729 	from = pr->from;
730 	do {
731 		size_t offset0 = LONG_MAX;
732 
733 		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
734 			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
735 
736 		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
737 					     ARRAY_SIZE(pagelist), 0, &offset0);
738 		KUNIT_EXPECT_GE(test, len, 0);
739 		if (len < 0)
740 			break;
741 		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
742 		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
743 		KUNIT_EXPECT_LE(test, len, size);
744 		KUNIT_EXPECT_EQ(test, iter.count, size - len);
745 		size -= len;
746 
747 		if (len == 0)
748 			break;
749 
750 		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
751 			struct page *p;
752 			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
753 			int ix;
754 
755 			KUNIT_ASSERT_GE(test, part, 0);
756 			while (from == pr->to) {
757 				pr++;
758 				from = pr->from;
759 				if (from < 0)
760 					goto stop;
761 			}
762 			ix = from / PAGE_SIZE;
763 			KUNIT_ASSERT_LT(test, ix, npages);
764 			p = bpages[ix];
765 			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
766 			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
767 			from += part;
768 			len -= part;
769 			KUNIT_ASSERT_GE(test, len, 0);
770 			if (len == 0)
771 				break;
772 			offset0 = 0;
773 		}
774 
775 		if (test->status == KUNIT_FAILURE)
776 			break;
777 	} while (iov_iter_count(&iter) > 0);
778 
779 stop:
780 	KUNIT_EXPECT_EQ(test, size, 0);
781 	KUNIT_EXPECT_EQ(test, iter.count, 0);
782 	KUNIT_SUCCEED(test);
783 }
784 
785 /*
786  * Test the extraction of ITER_BVEC-type iterators.
787  */
788 static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
789 {
790 	const struct bvec_test_range *pr;
791 	struct iov_iter iter;
792 	struct page **bpages, *pagelist[8], **pages = pagelist;
793 	struct bio_vec bvec[8];
794 	ssize_t len;
795 	size_t bufsize, size = 0, npages;
796 	int i, from;
797 
798 	bufsize = 0x100000;
799 	npages = bufsize / PAGE_SIZE;
800 
801 	iov_kunit_create_buffer(test, &bpages, npages);
802 	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
803 			    bpages, npages, bufsize, bvec_test_ranges);
804 	size = iter.count;
805 
806 	pr = bvec_test_ranges;
807 	from = pr->from;
808 	do {
809 		size_t offset0 = LONG_MAX;
810 
811 		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
812 			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
813 
814 		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
815 					     ARRAY_SIZE(pagelist), 0, &offset0);
816 		KUNIT_EXPECT_GE(test, len, 0);
817 		if (len < 0)
818 			break;
819 		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
820 		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
821 		KUNIT_EXPECT_LE(test, len, size);
822 		KUNIT_EXPECT_EQ(test, iter.count, size - len);
823 		size -= len;
824 
825 		if (len == 0)
826 			break;
827 
828 		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
829 			struct page *p;
830 			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
831 			int ix;
832 
833 			KUNIT_ASSERT_GE(test, part, 0);
834 			while (from == pr->to) {
835 				pr++;
836 				from = pr->from;
837 				if (from < 0)
838 					goto stop;
839 			}
840 			ix = pr->page + from / PAGE_SIZE;
841 			KUNIT_ASSERT_LT(test, ix, npages);
842 			p = bpages[ix];
843 			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
844 			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
845 			from += part;
846 			len -= part;
847 			KUNIT_ASSERT_GE(test, len, 0);
848 			if (len == 0)
849 				break;
850 			offset0 = 0;
851 		}
852 
853 		if (test->status == KUNIT_FAILURE)
854 			break;
855 	} while (iov_iter_count(&iter) > 0);
856 
857 stop:
858 	KUNIT_EXPECT_EQ(test, size, 0);
859 	KUNIT_EXPECT_EQ(test, iter.count, 0);
860 	KUNIT_SUCCEED(test);
861 }
862 
863 /*
864  * Test the extraction of ITER_FOLIOQ-type iterators.
865  */
866 static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
867 {
868 	const struct kvec_test_range *pr;
869 	struct folio_queue *folioq;
870 	struct iov_iter iter;
871 	struct page **bpages, *pagelist[8], **pages = pagelist;
872 	ssize_t len;
873 	size_t bufsize, size = 0, npages;
874 	int i, from;
875 
876 	bufsize = 0x100000;
877 	npages = bufsize / PAGE_SIZE;
878 
879 	folioq = iov_kunit_create_folioq(test);
880 
881 	iov_kunit_create_buffer(test, &bpages, npages);
882 	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
883 
884 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
885 		from = pr->from;
886 		size = pr->to - from;
887 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
888 
889 		iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
890 		iov_iter_advance(&iter, from);
891 
892 		do {
893 			size_t offset0 = LONG_MAX;
894 
895 			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
896 				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
897 
898 			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
899 						     ARRAY_SIZE(pagelist), 0, &offset0);
900 			KUNIT_EXPECT_GE(test, len, 0);
901 			if (len < 0)
902 				break;
903 			KUNIT_EXPECT_LE(test, len, size);
904 			KUNIT_EXPECT_EQ(test, iter.count, size - len);
905 			if (len == 0)
906 				break;
907 			size -= len;
908 			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
909 			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
910 
911 			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
912 				struct page *p;
913 				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
914 				int ix;
915 
916 				KUNIT_ASSERT_GE(test, part, 0);
917 				ix = from / PAGE_SIZE;
918 				KUNIT_ASSERT_LT(test, ix, npages);
919 				p = bpages[ix];
920 				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
921 				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
922 				from += part;
923 				len -= part;
924 				KUNIT_ASSERT_GE(test, len, 0);
925 				if (len == 0)
926 					break;
927 				offset0 = 0;
928 			}
929 
930 			if (test->status == KUNIT_FAILURE)
931 				goto stop;
932 		} while (iov_iter_count(&iter) > 0);
933 
934 		KUNIT_EXPECT_EQ(test, size, 0);
935 		KUNIT_EXPECT_EQ(test, iter.count, 0);
936 	}
937 
938 stop:
939 	KUNIT_SUCCEED(test);
940 }
941 
942 /*
943  * Test the extraction of ITER_XARRAY-type iterators.
944  */
945 static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
946 {
947 	const struct kvec_test_range *pr;
948 	struct iov_iter iter;
949 	struct xarray *xarray;
950 	struct page **bpages, *pagelist[8], **pages = pagelist;
951 	ssize_t len;
952 	size_t bufsize, size = 0, npages;
953 	int i, from;
954 
955 	bufsize = 0x100000;
956 	npages = bufsize / PAGE_SIZE;
957 
958 	xarray = iov_kunit_create_xarray(test);
959 
960 	iov_kunit_create_buffer(test, &bpages, npages);
961 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
962 
963 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
964 		from = pr->from;
965 		size = pr->to - from;
966 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
967 
968 		iov_iter_xarray(&iter, WRITE, xarray, from, size);
969 
970 		do {
971 			size_t offset0 = LONG_MAX;
972 
973 			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
974 				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
975 
976 			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
977 						     ARRAY_SIZE(pagelist), 0, &offset0);
978 			KUNIT_EXPECT_GE(test, len, 0);
979 			if (len < 0)
980 				break;
981 			KUNIT_EXPECT_LE(test, len, size);
982 			KUNIT_EXPECT_EQ(test, iter.count, size - len);
983 			if (len == 0)
984 				break;
985 			size -= len;
986 			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
987 			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
988 
989 			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
990 				struct page *p;
991 				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
992 				int ix;
993 
994 				KUNIT_ASSERT_GE(test, part, 0);
995 				ix = from / PAGE_SIZE;
996 				KUNIT_ASSERT_LT(test, ix, npages);
997 				p = bpages[ix];
998 				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
999 				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
1000 				from += part;
1001 				len -= part;
1002 				KUNIT_ASSERT_GE(test, len, 0);
1003 				if (len == 0)
1004 					break;
1005 				offset0 = 0;
1006 			}
1007 
1008 			if (test->status == KUNIT_FAILURE)
1009 				goto stop;
1010 		} while (iov_iter_count(&iter) > 0);
1011 
1012 		KUNIT_EXPECT_EQ(test, size, 0);
1013 		KUNIT_EXPECT_EQ(test, iter.count, 0);
1014 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
1015 	}
1016 
1017 stop:
1018 	KUNIT_SUCCEED(test);
1019 }
1020 
1021 struct iov_kunit_iter_to_sg_data {
1022 	struct sg_table *sgt;
1023 	u8 *buffer, *scratch;
1024 	u8 __user *ubuf;
1025 	struct page **pages;
1026 	size_t npages;
1027 };
1028 
1029 static void __init
1030 iov_kunit_iter_unpin_sgt(void *data)
1031 {
1032 	struct sg_table *sgt = data;
1033 
1034 	for (unsigned int i = 0; i < sgt->nents; ++i)
1035 		unpin_user_page(sg_page(&sgt->sgl[i]));
1036 }
1037 
1038 static void __init
1039 iov_kunit_iter_to_sg_init(struct kunit *test, size_t bufsize, bool user,
1040 			  struct iov_kunit_iter_to_sg_data *data)
1041 {
1042 	struct page **spages;
1043 	struct scatterlist *sg;
1044 	unsigned long uaddr;
1045 	size_t i;
1046 
1047 	data->npages = bufsize / PAGE_SIZE;
1048 	sg = kunit_kmalloc_array(test, data->npages, sizeof(*sg), GFP_KERNEL);
1049 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sg);
1050 	sg_init_table(sg, data->npages);
1051 	data->sgt = kunit_kzalloc(test, sizeof(*data->sgt), GFP_KERNEL);
1052 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, data->sgt);
1053 	data->sgt->orig_nents = 0;
1054 	data->sgt->sgl = sg;
1055 
1056 	data->buffer = NULL;
1057 	data->ubuf = NULL;
1058 	if (user) {
1059 		uaddr = kunit_vm_mmap(test, NULL, 0, bufsize,
1060 				      PROT_READ | PROT_WRITE,
1061 				      MAP_ANONYMOUS | MAP_PRIVATE, 0);
1062 		KUNIT_ASSERT_NE(test, uaddr, 0);
1063 		data->ubuf = (u8 __user *)uaddr;
1064 		for (i = 0; i < bufsize; ++i)
1065 			put_user(pattern(i), data->ubuf + i);
1066 	} else {
1067 		data->buffer = iov_kunit_create_buffer(test, &data->pages,
1068 						       data->npages);
1069 		for (i = 0; i < bufsize; ++i)
1070 			data->buffer[i] = pattern(i);
1071 	}
1072 	data->scratch = iov_kunit_create_buffer(test, &spages, data->npages);
1073 	memset(data->scratch, 0, bufsize);
1074 }
1075 
1076 static void __init
1077 iov_kunit_iter_to_sg_check(struct kunit *test, struct iov_iter *iter,
1078 			   size_t bufsize,
1079 			   struct iov_kunit_iter_to_sg_data *data)
1080 {
1081 	static const size_t tail = 16 * PAGE_SIZE;
1082 	size_t i;
1083 
1084 	KUNIT_ASSERT_LT(test, tail, bufsize);
1085 
1086 	if (iov_iter_extract_will_pin(iter))
1087 		kunit_add_action_or_reset(test, iov_kunit_iter_unpin_sgt,
1088 					  data->sgt);
1089 
1090 	i = extract_iter_to_sg(iter, bufsize, data->sgt, 0, 0);
1091 	KUNIT_ASSERT_EQ(test, i, 0);
1092 	KUNIT_ASSERT_EQ(test, data->sgt->nents, 0);
1093 
1094 	i = extract_iter_to_sg(iter, bufsize - tail, data->sgt, 1, 0);
1095 	KUNIT_ASSERT_LE(test, i, bufsize - tail);
1096 	KUNIT_ASSERT_EQ(test, data->sgt->nents, 1);
1097 
1098 	i += extract_iter_to_sg(iter, bufsize - tail - i, data->sgt,
1099 				data->npages - data->sgt->nents, 0);
1100 	KUNIT_ASSERT_EQ(test, i, bufsize - tail);
1101 	KUNIT_ASSERT_LE(test, data->sgt->nents, data->npages);
1102 
1103 	i += extract_iter_to_sg(iter, tail, data->sgt,
1104 				data->npages - data->sgt->nents, 0);
1105 	KUNIT_ASSERT_EQ(test, i, bufsize);
1106 	KUNIT_ASSERT_LE(test, data->sgt->nents, data->npages);
1107 
1108 	sg_mark_end(&data->sgt->sgl[data->sgt->nents - 1]);
1109 
1110 	i = sg_copy_to_buffer(data->sgt->sgl, data->sgt->nents,
1111 			      data->scratch, bufsize);
1112 	KUNIT_ASSERT_EQ(test, i, bufsize);
1113 
1114 	for (i = 0; i < bufsize; ++i) {
1115 		KUNIT_EXPECT_EQ_MSG(test, data->scratch[i], pattern(i),
1116 				    "at i=%zx", i);
1117 		if (data->scratch[i] != pattern(i))
1118 			break;
1119 	}
1120 
1121 	KUNIT_EXPECT_EQ(test, i, bufsize);
1122 }
1123 
1124 static void __init iov_kunit_iter_to_sg_kvec(struct kunit *test)
1125 {
1126 	struct iov_kunit_iter_to_sg_data data;
1127 	struct iov_iter iter;
1128 	struct kvec kvec;
1129 	size_t bufsize;
1130 
1131 	bufsize = 0x100000;
1132 	iov_kunit_iter_to_sg_init(test, bufsize, false, &data);
1133 
1134 	kvec.iov_base = data.buffer;
1135 	kvec.iov_len = bufsize;
1136 	iov_iter_kvec(&iter, READ, &kvec, 1, bufsize);
1137 
1138 	iov_kunit_iter_to_sg_check(test, &iter, bufsize, &data);
1139 }
1140 
1141 static void __init iov_kunit_iter_to_sg_bvec(struct kunit *test)
1142 {
1143 	struct iov_kunit_iter_to_sg_data data;
1144 	struct page *p, *can_merge = NULL;
1145 	size_t i, k, bufsize;
1146 	struct bio_vec *bvec;
1147 	struct iov_iter iter;
1148 
1149 	bufsize = 0x100000;
1150 	iov_kunit_iter_to_sg_init(test, bufsize, false, &data);
1151 
1152 	bvec = kunit_kmalloc_array(test, data.npages, sizeof(*bvec),
1153 				   GFP_KERNEL);
1154 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bvec);
1155 	k = 0;
1156 	for (i = 0; i < data.npages; ++i) {
1157 		p = data.pages[i];
1158 		if (p == can_merge)
1159 			bvec[k-1].bv_len += PAGE_SIZE;
1160 		else
1161 			bvec_set_page(&bvec[k++], p, PAGE_SIZE, 0);
1162 		can_merge = p + 1;
1163 	}
1164 	iov_iter_bvec(&iter, READ, bvec, k, bufsize);
1165 
1166 	iov_kunit_iter_to_sg_check(test, &iter, bufsize, &data);
1167 }
1168 
1169 static void __init iov_kunit_iter_to_sg_folioq(struct kunit *test)
1170 {
1171 	struct iov_kunit_iter_to_sg_data data;
1172 	struct folio_queue *folioq;
1173 	struct iov_iter iter;
1174 	size_t bufsize;
1175 
1176 	bufsize = 0x100000;
1177 	iov_kunit_iter_to_sg_init(test, bufsize, false, &data);
1178 
1179 	folioq = iov_kunit_create_folioq(test);
1180 	iov_kunit_load_folioq(test, &iter, READ, folioq, data.pages,
1181 			      data.npages);
1182 
1183 	iov_kunit_iter_to_sg_check(test, &iter, bufsize, &data);
1184 }
1185 
1186 static void __init iov_kunit_iter_to_sg_xarray(struct kunit *test)
1187 {
1188 	struct iov_kunit_iter_to_sg_data data;
1189 	struct xarray *xarray;
1190 	struct iov_iter iter;
1191 	size_t bufsize;
1192 
1193 	bufsize = 0x100000;
1194 	iov_kunit_iter_to_sg_init(test, bufsize, false, &data);
1195 
1196 	xarray = iov_kunit_create_xarray(test);
1197 	iov_kunit_load_xarray(test, &iter, READ, xarray, data.pages,
1198 			      data.npages);
1199 
1200 	iov_kunit_iter_to_sg_check(test, &iter, bufsize, &data);
1201 }
1202 
1203 static void __init iov_kunit_iter_to_sg_ubuf(struct kunit *test)
1204 {
1205 	struct iov_kunit_iter_to_sg_data data;
1206 	struct iov_iter iter;
1207 	size_t bufsize;
1208 
1209 	bufsize = 0x100000;
1210 	iov_kunit_iter_to_sg_init(test, bufsize, true, &data);
1211 
1212 	iov_iter_ubuf(&iter, READ, data.ubuf, bufsize);
1213 
1214 	iov_kunit_iter_to_sg_check(test, &iter, bufsize, &data);
1215 }
1216 
1217 static struct kunit_case __refdata iov_kunit_cases[] = {
1218 	KUNIT_CASE(iov_kunit_copy_to_kvec),
1219 	KUNIT_CASE(iov_kunit_copy_from_kvec),
1220 	KUNIT_CASE(iov_kunit_copy_to_bvec),
1221 	KUNIT_CASE(iov_kunit_copy_from_bvec),
1222 	KUNIT_CASE(iov_kunit_copy_to_folioq),
1223 	KUNIT_CASE(iov_kunit_copy_from_folioq),
1224 	KUNIT_CASE(iov_kunit_copy_to_xarray),
1225 	KUNIT_CASE(iov_kunit_copy_from_xarray),
1226 	KUNIT_CASE(iov_kunit_extract_pages_kvec),
1227 	KUNIT_CASE(iov_kunit_extract_pages_bvec),
1228 	KUNIT_CASE(iov_kunit_extract_pages_folioq),
1229 	KUNIT_CASE(iov_kunit_extract_pages_xarray),
1230 	KUNIT_CASE(iov_kunit_iter_to_sg_kvec),
1231 	KUNIT_CASE(iov_kunit_iter_to_sg_bvec),
1232 	KUNIT_CASE(iov_kunit_iter_to_sg_folioq),
1233 	KUNIT_CASE(iov_kunit_iter_to_sg_xarray),
1234 	KUNIT_CASE(iov_kunit_iter_to_sg_ubuf),
1235 	{}
1236 };
1237 
1238 static struct kunit_suite iov_kunit_suite = {
1239 	.name = "iov_iter",
1240 	.test_cases = iov_kunit_cases,
1241 };
1242 
1243 kunit_test_suites(&iov_kunit_suite);
1244