xref: /linux/lib/kunit_iov_iter.c (revision 9c5968db9e625019a0ee5226c7eebef5519d366a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* I/O iterator tests.  This can only test kernel-backed iterator types.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/bvec.h>
15 #include <linux/folio_queue.h>
16 #include <kunit/test.h>
17 
18 MODULE_DESCRIPTION("iov_iter testing");
19 MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
20 MODULE_LICENSE("GPL");
21 
22 struct kvec_test_range {
23 	int	from, to;
24 };
25 
26 static const struct kvec_test_range kvec_test_ranges[] = {
27 	{ 0x00002, 0x00002 },
28 	{ 0x00027, 0x03000 },
29 	{ 0x05193, 0x18794 },
30 	{ 0x20000, 0x20000 },
31 	{ 0x20000, 0x24000 },
32 	{ 0x24000, 0x27001 },
33 	{ 0x29000, 0xffffb },
34 	{ 0xffffd, 0xffffe },
35 	{ -1 }
36 };
37 
38 static inline u8 pattern(unsigned long x)
39 {
40 	return x & 0xff;
41 }
42 
43 static void iov_kunit_unmap(void *data)
44 {
45 	vunmap(data);
46 }
47 
48 static void *__init iov_kunit_create_buffer(struct kunit *test,
49 					    struct page ***ppages,
50 					    size_t npages)
51 {
52 	struct page **pages;
53 	unsigned long got;
54 	void *buffer;
55 
56 	pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
57         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
58 	*ppages = pages;
59 
60 	got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
61 	if (got != npages) {
62 		release_pages(pages, got);
63 		KUNIT_ASSERT_EQ(test, got, npages);
64 	}
65 
66 	buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
67         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
68 
69 	kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
70 	return buffer;
71 }
72 
73 static void __init iov_kunit_load_kvec(struct kunit *test,
74 				       struct iov_iter *iter, int dir,
75 				       struct kvec *kvec, unsigned int kvmax,
76 				       void *buffer, size_t bufsize,
77 				       const struct kvec_test_range *pr)
78 {
79 	size_t size = 0;
80 	int i;
81 
82 	for (i = 0; i < kvmax; i++, pr++) {
83 		if (pr->from < 0)
84 			break;
85 		KUNIT_ASSERT_GE(test, pr->to, pr->from);
86 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
87 		kvec[i].iov_base = buffer + pr->from;
88 		kvec[i].iov_len = pr->to - pr->from;
89 		size += pr->to - pr->from;
90 	}
91 	KUNIT_ASSERT_LE(test, size, bufsize);
92 
93 	iov_iter_kvec(iter, dir, kvec, i, size);
94 }
95 
96 /*
97  * Test copying to a ITER_KVEC-type iterator.
98  */
99 static void __init iov_kunit_copy_to_kvec(struct kunit *test)
100 {
101 	const struct kvec_test_range *pr;
102 	struct iov_iter iter;
103 	struct page **spages, **bpages;
104 	struct kvec kvec[8];
105 	u8 *scratch, *buffer;
106 	size_t bufsize, npages, size, copied;
107 	int i, patt;
108 
109 	bufsize = 0x100000;
110 	npages = bufsize / PAGE_SIZE;
111 
112 	scratch = iov_kunit_create_buffer(test, &spages, npages);
113 	for (i = 0; i < bufsize; i++)
114 		scratch[i] = pattern(i);
115 
116 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
117 	memset(buffer, 0, bufsize);
118 
119 	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
120 			    buffer, bufsize, kvec_test_ranges);
121 	size = iter.count;
122 
123 	copied = copy_to_iter(scratch, size, &iter);
124 
125 	KUNIT_EXPECT_EQ(test, copied, size);
126 	KUNIT_EXPECT_EQ(test, iter.count, 0);
127 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
128 
129 	/* Build the expected image in the scratch buffer. */
130 	patt = 0;
131 	memset(scratch, 0, bufsize);
132 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
133 		for (i = pr->from; i < pr->to; i++)
134 			scratch[i] = pattern(patt++);
135 
136 	/* Compare the images */
137 	for (i = 0; i < bufsize; i++) {
138 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
139 		if (buffer[i] != scratch[i])
140 			return;
141 	}
142 
143 	KUNIT_SUCCEED(test);
144 }
145 
146 /*
147  * Test copying from a ITER_KVEC-type iterator.
148  */
149 static void __init iov_kunit_copy_from_kvec(struct kunit *test)
150 {
151 	const struct kvec_test_range *pr;
152 	struct iov_iter iter;
153 	struct page **spages, **bpages;
154 	struct kvec kvec[8];
155 	u8 *scratch, *buffer;
156 	size_t bufsize, npages, size, copied;
157 	int i, j;
158 
159 	bufsize = 0x100000;
160 	npages = bufsize / PAGE_SIZE;
161 
162 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
163 	for (i = 0; i < bufsize; i++)
164 		buffer[i] = pattern(i);
165 
166 	scratch = iov_kunit_create_buffer(test, &spages, npages);
167 	memset(scratch, 0, bufsize);
168 
169 	iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
170 			    buffer, bufsize, kvec_test_ranges);
171 	size = min(iter.count, bufsize);
172 
173 	copied = copy_from_iter(scratch, size, &iter);
174 
175 	KUNIT_EXPECT_EQ(test, copied, size);
176 	KUNIT_EXPECT_EQ(test, iter.count, 0);
177 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
178 
179 	/* Build the expected image in the main buffer. */
180 	i = 0;
181 	memset(buffer, 0, bufsize);
182 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
183 		for (j = pr->from; j < pr->to; j++) {
184 			buffer[i++] = pattern(j);
185 			if (i >= bufsize)
186 				goto stop;
187 		}
188 	}
189 stop:
190 
191 	/* Compare the images */
192 	for (i = 0; i < bufsize; i++) {
193 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
194 		if (scratch[i] != buffer[i])
195 			return;
196 	}
197 
198 	KUNIT_SUCCEED(test);
199 }
200 
201 struct bvec_test_range {
202 	int	page, from, to;
203 };
204 
205 static const struct bvec_test_range bvec_test_ranges[] = {
206 	{ 0, 0x0002, 0x0002 },
207 	{ 1, 0x0027, 0x0893 },
208 	{ 2, 0x0193, 0x0794 },
209 	{ 3, 0x0000, 0x1000 },
210 	{ 4, 0x0000, 0x1000 },
211 	{ 5, 0x0000, 0x1000 },
212 	{ 6, 0x0000, 0x0ffb },
213 	{ 6, 0x0ffd, 0x0ffe },
214 	{ -1, -1, -1 }
215 };
216 
217 static void __init iov_kunit_load_bvec(struct kunit *test,
218 				       struct iov_iter *iter, int dir,
219 				       struct bio_vec *bvec, unsigned int bvmax,
220 				       struct page **pages, size_t npages,
221 				       size_t bufsize,
222 				       const struct bvec_test_range *pr)
223 {
224 	struct page *can_merge = NULL, *page;
225 	size_t size = 0;
226 	int i;
227 
228 	for (i = 0; i < bvmax; i++, pr++) {
229 		if (pr->from < 0)
230 			break;
231 		KUNIT_ASSERT_LT(test, pr->page, npages);
232 		KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
233 		KUNIT_ASSERT_GE(test, pr->from, 0);
234 		KUNIT_ASSERT_GE(test, pr->to, pr->from);
235 		KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
236 
237 		page = pages[pr->page];
238 		if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
239 			i--;
240 			bvec[i].bv_len += pr->to;
241 		} else {
242 			bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
243 		}
244 
245 		size += pr->to - pr->from;
246 		if ((pr->to & ~PAGE_MASK) == 0)
247 			can_merge = page + pr->to / PAGE_SIZE;
248 		else
249 			can_merge = NULL;
250 	}
251 
252 	iov_iter_bvec(iter, dir, bvec, i, size);
253 }
254 
255 /*
256  * Test copying to a ITER_BVEC-type iterator.
257  */
258 static void __init iov_kunit_copy_to_bvec(struct kunit *test)
259 {
260 	const struct bvec_test_range *pr;
261 	struct iov_iter iter;
262 	struct bio_vec bvec[8];
263 	struct page **spages, **bpages;
264 	u8 *scratch, *buffer;
265 	size_t bufsize, npages, size, copied;
266 	int i, b, patt;
267 
268 	bufsize = 0x100000;
269 	npages = bufsize / PAGE_SIZE;
270 
271 	scratch = iov_kunit_create_buffer(test, &spages, npages);
272 	for (i = 0; i < bufsize; i++)
273 		scratch[i] = pattern(i);
274 
275 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
276 	memset(buffer, 0, bufsize);
277 
278 	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
279 			    bpages, npages, bufsize, bvec_test_ranges);
280 	size = iter.count;
281 
282 	copied = copy_to_iter(scratch, size, &iter);
283 
284 	KUNIT_EXPECT_EQ(test, copied, size);
285 	KUNIT_EXPECT_EQ(test, iter.count, 0);
286 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
287 
288 	/* Build the expected image in the scratch buffer. */
289 	b = 0;
290 	patt = 0;
291 	memset(scratch, 0, bufsize);
292 	for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
293 		u8 *p = scratch + pr->page * PAGE_SIZE;
294 
295 		for (i = pr->from; i < pr->to; i++)
296 			p[i] = pattern(patt++);
297 	}
298 
299 	/* Compare the images */
300 	for (i = 0; i < bufsize; i++) {
301 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
302 		if (buffer[i] != scratch[i])
303 			return;
304 	}
305 
306 	KUNIT_SUCCEED(test);
307 }
308 
309 /*
310  * Test copying from a ITER_BVEC-type iterator.
311  */
312 static void __init iov_kunit_copy_from_bvec(struct kunit *test)
313 {
314 	const struct bvec_test_range *pr;
315 	struct iov_iter iter;
316 	struct bio_vec bvec[8];
317 	struct page **spages, **bpages;
318 	u8 *scratch, *buffer;
319 	size_t bufsize, npages, size, copied;
320 	int i, j;
321 
322 	bufsize = 0x100000;
323 	npages = bufsize / PAGE_SIZE;
324 
325 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
326 	for (i = 0; i < bufsize; i++)
327 		buffer[i] = pattern(i);
328 
329 	scratch = iov_kunit_create_buffer(test, &spages, npages);
330 	memset(scratch, 0, bufsize);
331 
332 	iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
333 			    bpages, npages, bufsize, bvec_test_ranges);
334 	size = iter.count;
335 
336 	copied = copy_from_iter(scratch, size, &iter);
337 
338 	KUNIT_EXPECT_EQ(test, copied, size);
339 	KUNIT_EXPECT_EQ(test, iter.count, 0);
340 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
341 
342 	/* Build the expected image in the main buffer. */
343 	i = 0;
344 	memset(buffer, 0, bufsize);
345 	for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
346 		size_t patt = pr->page * PAGE_SIZE;
347 
348 		for (j = pr->from; j < pr->to; j++) {
349 			buffer[i++] = pattern(patt + j);
350 			if (i >= bufsize)
351 				goto stop;
352 		}
353 	}
354 stop:
355 
356 	/* Compare the images */
357 	for (i = 0; i < bufsize; i++) {
358 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
359 		if (scratch[i] != buffer[i])
360 			return;
361 	}
362 
363 	KUNIT_SUCCEED(test);
364 }
365 
366 static void iov_kunit_destroy_folioq(void *data)
367 {
368 	struct folio_queue *folioq, *next;
369 
370 	for (folioq = data; folioq; folioq = next) {
371 		next = folioq->next;
372 		for (int i = 0; i < folioq_nr_slots(folioq); i++)
373 			if (folioq_folio(folioq, i))
374 				folio_put(folioq_folio(folioq, i));
375 		kfree(folioq);
376 	}
377 }
378 
379 static void __init iov_kunit_load_folioq(struct kunit *test,
380 					struct iov_iter *iter, int dir,
381 					struct folio_queue *folioq,
382 					struct page **pages, size_t npages)
383 {
384 	struct folio_queue *p = folioq;
385 	size_t size = 0;
386 	int i;
387 
388 	for (i = 0; i < npages; i++) {
389 		if (folioq_full(p)) {
390 			p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
391 			KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
392 			folioq_init(p->next, 0);
393 			p->next->prev = p;
394 			p = p->next;
395 		}
396 		folioq_append(p, page_folio(pages[i]));
397 		size += PAGE_SIZE;
398 	}
399 	iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
400 }
401 
402 static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
403 {
404 	struct folio_queue *folioq;
405 
406 	folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
407 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
408 	kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
409 	folioq_init(folioq, 0);
410 	return folioq;
411 }
412 
413 /*
414  * Test copying to a ITER_FOLIOQ-type iterator.
415  */
416 static void __init iov_kunit_copy_to_folioq(struct kunit *test)
417 {
418 	const struct kvec_test_range *pr;
419 	struct iov_iter iter;
420 	struct folio_queue *folioq;
421 	struct page **spages, **bpages;
422 	u8 *scratch, *buffer;
423 	size_t bufsize, npages, size, copied;
424 	int i, patt;
425 
426 	bufsize = 0x100000;
427 	npages = bufsize / PAGE_SIZE;
428 
429 	folioq = iov_kunit_create_folioq(test);
430 
431 	scratch = iov_kunit_create_buffer(test, &spages, npages);
432 	for (i = 0; i < bufsize; i++)
433 		scratch[i] = pattern(i);
434 
435 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
436 	memset(buffer, 0, bufsize);
437 
438 	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
439 
440 	i = 0;
441 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
442 		size = pr->to - pr->from;
443 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
444 
445 		iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
446 		iov_iter_advance(&iter, pr->from);
447 		copied = copy_to_iter(scratch + i, size, &iter);
448 
449 		KUNIT_EXPECT_EQ(test, copied, size);
450 		KUNIT_EXPECT_EQ(test, iter.count, 0);
451 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
452 		i += size;
453 		if (test->status == KUNIT_FAILURE)
454 			goto stop;
455 	}
456 
457 	/* Build the expected image in the scratch buffer. */
458 	patt = 0;
459 	memset(scratch, 0, bufsize);
460 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
461 		for (i = pr->from; i < pr->to; i++)
462 			scratch[i] = pattern(patt++);
463 
464 	/* Compare the images */
465 	for (i = 0; i < bufsize; i++) {
466 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
467 		if (buffer[i] != scratch[i])
468 			return;
469 	}
470 
471 stop:
472 	KUNIT_SUCCEED(test);
473 }
474 
475 /*
476  * Test copying from a ITER_FOLIOQ-type iterator.
477  */
478 static void __init iov_kunit_copy_from_folioq(struct kunit *test)
479 {
480 	const struct kvec_test_range *pr;
481 	struct iov_iter iter;
482 	struct folio_queue *folioq;
483 	struct page **spages, **bpages;
484 	u8 *scratch, *buffer;
485 	size_t bufsize, npages, size, copied;
486 	int i, j;
487 
488 	bufsize = 0x100000;
489 	npages = bufsize / PAGE_SIZE;
490 
491 	folioq = iov_kunit_create_folioq(test);
492 
493 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
494 	for (i = 0; i < bufsize; i++)
495 		buffer[i] = pattern(i);
496 
497 	scratch = iov_kunit_create_buffer(test, &spages, npages);
498 	memset(scratch, 0, bufsize);
499 
500 	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
501 
502 	i = 0;
503 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
504 		size = pr->to - pr->from;
505 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
506 
507 		iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
508 		iov_iter_advance(&iter, pr->from);
509 		copied = copy_from_iter(scratch + i, size, &iter);
510 
511 		KUNIT_EXPECT_EQ(test, copied, size);
512 		KUNIT_EXPECT_EQ(test, iter.count, 0);
513 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
514 		i += size;
515 	}
516 
517 	/* Build the expected image in the main buffer. */
518 	i = 0;
519 	memset(buffer, 0, bufsize);
520 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
521 		for (j = pr->from; j < pr->to; j++) {
522 			buffer[i++] = pattern(j);
523 			if (i >= bufsize)
524 				goto stop;
525 		}
526 	}
527 stop:
528 
529 	/* Compare the images */
530 	for (i = 0; i < bufsize; i++) {
531 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
532 		if (scratch[i] != buffer[i])
533 			return;
534 	}
535 
536 	KUNIT_SUCCEED(test);
537 }
538 
539 static void iov_kunit_destroy_xarray(void *data)
540 {
541 	struct xarray *xarray = data;
542 
543 	xa_destroy(xarray);
544 	kfree(xarray);
545 }
546 
547 static void __init iov_kunit_load_xarray(struct kunit *test,
548 					 struct iov_iter *iter, int dir,
549 					 struct xarray *xarray,
550 					 struct page **pages, size_t npages)
551 {
552 	size_t size = 0;
553 	int i;
554 
555 	for (i = 0; i < npages; i++) {
556 		void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
557 
558 		KUNIT_ASSERT_FALSE(test, xa_is_err(x));
559 		size += PAGE_SIZE;
560 	}
561 	iov_iter_xarray(iter, dir, xarray, 0, size);
562 }
563 
564 static struct xarray *iov_kunit_create_xarray(struct kunit *test)
565 {
566 	struct xarray *xarray;
567 
568 	xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
569 	xa_init(xarray);
570 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
571 	kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
572 	return xarray;
573 }
574 
575 /*
576  * Test copying to a ITER_XARRAY-type iterator.
577  */
578 static void __init iov_kunit_copy_to_xarray(struct kunit *test)
579 {
580 	const struct kvec_test_range *pr;
581 	struct iov_iter iter;
582 	struct xarray *xarray;
583 	struct page **spages, **bpages;
584 	u8 *scratch, *buffer;
585 	size_t bufsize, npages, size, copied;
586 	int i, patt;
587 
588 	bufsize = 0x100000;
589 	npages = bufsize / PAGE_SIZE;
590 
591 	xarray = iov_kunit_create_xarray(test);
592 
593 	scratch = iov_kunit_create_buffer(test, &spages, npages);
594 	for (i = 0; i < bufsize; i++)
595 		scratch[i] = pattern(i);
596 
597 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
598 	memset(buffer, 0, bufsize);
599 
600 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
601 
602 	i = 0;
603 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
604 		size = pr->to - pr->from;
605 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
606 
607 		iov_iter_xarray(&iter, READ, xarray, pr->from, size);
608 		copied = copy_to_iter(scratch + i, size, &iter);
609 
610 		KUNIT_EXPECT_EQ(test, copied, size);
611 		KUNIT_EXPECT_EQ(test, iter.count, 0);
612 		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
613 		i += size;
614 	}
615 
616 	/* Build the expected image in the scratch buffer. */
617 	patt = 0;
618 	memset(scratch, 0, bufsize);
619 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
620 		for (i = pr->from; i < pr->to; i++)
621 			scratch[i] = pattern(patt++);
622 
623 	/* Compare the images */
624 	for (i = 0; i < bufsize; i++) {
625 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
626 		if (buffer[i] != scratch[i])
627 			return;
628 	}
629 
630 	KUNIT_SUCCEED(test);
631 }
632 
633 /*
634  * Test copying from a ITER_XARRAY-type iterator.
635  */
636 static void __init iov_kunit_copy_from_xarray(struct kunit *test)
637 {
638 	const struct kvec_test_range *pr;
639 	struct iov_iter iter;
640 	struct xarray *xarray;
641 	struct page **spages, **bpages;
642 	u8 *scratch, *buffer;
643 	size_t bufsize, npages, size, copied;
644 	int i, j;
645 
646 	bufsize = 0x100000;
647 	npages = bufsize / PAGE_SIZE;
648 
649 	xarray = iov_kunit_create_xarray(test);
650 
651 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
652 	for (i = 0; i < bufsize; i++)
653 		buffer[i] = pattern(i);
654 
655 	scratch = iov_kunit_create_buffer(test, &spages, npages);
656 	memset(scratch, 0, bufsize);
657 
658 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
659 
660 	i = 0;
661 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
662 		size = pr->to - pr->from;
663 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
664 
665 		iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
666 		copied = copy_from_iter(scratch + i, size, &iter);
667 
668 		KUNIT_EXPECT_EQ(test, copied, size);
669 		KUNIT_EXPECT_EQ(test, iter.count, 0);
670 		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
671 		i += size;
672 	}
673 
674 	/* Build the expected image in the main buffer. */
675 	i = 0;
676 	memset(buffer, 0, bufsize);
677 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
678 		for (j = pr->from; j < pr->to; j++) {
679 			buffer[i++] = pattern(j);
680 			if (i >= bufsize)
681 				goto stop;
682 		}
683 	}
684 stop:
685 
686 	/* Compare the images */
687 	for (i = 0; i < bufsize; i++) {
688 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
689 		if (scratch[i] != buffer[i])
690 			return;
691 	}
692 
693 	KUNIT_SUCCEED(test);
694 }
695 
696 /*
697  * Test the extraction of ITER_KVEC-type iterators.
698  */
699 static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
700 {
701 	const struct kvec_test_range *pr;
702 	struct iov_iter iter;
703 	struct page **bpages, *pagelist[8], **pages = pagelist;
704 	struct kvec kvec[8];
705 	u8 *buffer;
706 	ssize_t len;
707 	size_t bufsize, size = 0, npages;
708 	int i, from;
709 
710 	bufsize = 0x100000;
711 	npages = bufsize / PAGE_SIZE;
712 
713 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
714 
715 	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
716 			    buffer, bufsize, kvec_test_ranges);
717 	size = iter.count;
718 
719 	pr = kvec_test_ranges;
720 	from = pr->from;
721 	do {
722 		size_t offset0 = LONG_MAX;
723 
724 		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
725 			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
726 
727 		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
728 					     ARRAY_SIZE(pagelist), 0, &offset0);
729 		KUNIT_EXPECT_GE(test, len, 0);
730 		if (len < 0)
731 			break;
732 		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
733 		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
734 		KUNIT_EXPECT_LE(test, len, size);
735 		KUNIT_EXPECT_EQ(test, iter.count, size - len);
736 		size -= len;
737 
738 		if (len == 0)
739 			break;
740 
741 		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
742 			struct page *p;
743 			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
744 			int ix;
745 
746 			KUNIT_ASSERT_GE(test, part, 0);
747 			while (from == pr->to) {
748 				pr++;
749 				from = pr->from;
750 				if (from < 0)
751 					goto stop;
752 			}
753 			ix = from / PAGE_SIZE;
754 			KUNIT_ASSERT_LT(test, ix, npages);
755 			p = bpages[ix];
756 			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
757 			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
758 			from += part;
759 			len -= part;
760 			KUNIT_ASSERT_GE(test, len, 0);
761 			if (len == 0)
762 				break;
763 			offset0 = 0;
764 		}
765 
766 		if (test->status == KUNIT_FAILURE)
767 			break;
768 	} while (iov_iter_count(&iter) > 0);
769 
770 stop:
771 	KUNIT_EXPECT_EQ(test, size, 0);
772 	KUNIT_EXPECT_EQ(test, iter.count, 0);
773 	KUNIT_SUCCEED(test);
774 }
775 
776 /*
777  * Test the extraction of ITER_BVEC-type iterators.
778  */
779 static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
780 {
781 	const struct bvec_test_range *pr;
782 	struct iov_iter iter;
783 	struct page **bpages, *pagelist[8], **pages = pagelist;
784 	struct bio_vec bvec[8];
785 	ssize_t len;
786 	size_t bufsize, size = 0, npages;
787 	int i, from;
788 
789 	bufsize = 0x100000;
790 	npages = bufsize / PAGE_SIZE;
791 
792 	iov_kunit_create_buffer(test, &bpages, npages);
793 	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
794 			    bpages, npages, bufsize, bvec_test_ranges);
795 	size = iter.count;
796 
797 	pr = bvec_test_ranges;
798 	from = pr->from;
799 	do {
800 		size_t offset0 = LONG_MAX;
801 
802 		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
803 			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
804 
805 		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
806 					     ARRAY_SIZE(pagelist), 0, &offset0);
807 		KUNIT_EXPECT_GE(test, len, 0);
808 		if (len < 0)
809 			break;
810 		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
811 		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
812 		KUNIT_EXPECT_LE(test, len, size);
813 		KUNIT_EXPECT_EQ(test, iter.count, size - len);
814 		size -= len;
815 
816 		if (len == 0)
817 			break;
818 
819 		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
820 			struct page *p;
821 			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
822 			int ix;
823 
824 			KUNIT_ASSERT_GE(test, part, 0);
825 			while (from == pr->to) {
826 				pr++;
827 				from = pr->from;
828 				if (from < 0)
829 					goto stop;
830 			}
831 			ix = pr->page + from / PAGE_SIZE;
832 			KUNIT_ASSERT_LT(test, ix, npages);
833 			p = bpages[ix];
834 			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
835 			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
836 			from += part;
837 			len -= part;
838 			KUNIT_ASSERT_GE(test, len, 0);
839 			if (len == 0)
840 				break;
841 			offset0 = 0;
842 		}
843 
844 		if (test->status == KUNIT_FAILURE)
845 			break;
846 	} while (iov_iter_count(&iter) > 0);
847 
848 stop:
849 	KUNIT_EXPECT_EQ(test, size, 0);
850 	KUNIT_EXPECT_EQ(test, iter.count, 0);
851 	KUNIT_SUCCEED(test);
852 }
853 
854 /*
855  * Test the extraction of ITER_FOLIOQ-type iterators.
856  */
857 static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
858 {
859 	const struct kvec_test_range *pr;
860 	struct folio_queue *folioq;
861 	struct iov_iter iter;
862 	struct page **bpages, *pagelist[8], **pages = pagelist;
863 	ssize_t len;
864 	size_t bufsize, size = 0, npages;
865 	int i, from;
866 
867 	bufsize = 0x100000;
868 	npages = bufsize / PAGE_SIZE;
869 
870 	folioq = iov_kunit_create_folioq(test);
871 
872 	iov_kunit_create_buffer(test, &bpages, npages);
873 	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
874 
875 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
876 		from = pr->from;
877 		size = pr->to - from;
878 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
879 
880 		iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
881 		iov_iter_advance(&iter, from);
882 
883 		do {
884 			size_t offset0 = LONG_MAX;
885 
886 			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
887 				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
888 
889 			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
890 						     ARRAY_SIZE(pagelist), 0, &offset0);
891 			KUNIT_EXPECT_GE(test, len, 0);
892 			if (len < 0)
893 				break;
894 			KUNIT_EXPECT_LE(test, len, size);
895 			KUNIT_EXPECT_EQ(test, iter.count, size - len);
896 			if (len == 0)
897 				break;
898 			size -= len;
899 			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
900 			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
901 
902 			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
903 				struct page *p;
904 				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
905 				int ix;
906 
907 				KUNIT_ASSERT_GE(test, part, 0);
908 				ix = from / PAGE_SIZE;
909 				KUNIT_ASSERT_LT(test, ix, npages);
910 				p = bpages[ix];
911 				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
912 				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
913 				from += part;
914 				len -= part;
915 				KUNIT_ASSERT_GE(test, len, 0);
916 				if (len == 0)
917 					break;
918 				offset0 = 0;
919 			}
920 
921 			if (test->status == KUNIT_FAILURE)
922 				goto stop;
923 		} while (iov_iter_count(&iter) > 0);
924 
925 		KUNIT_EXPECT_EQ(test, size, 0);
926 		KUNIT_EXPECT_EQ(test, iter.count, 0);
927 	}
928 
929 stop:
930 	KUNIT_SUCCEED(test);
931 }
932 
933 /*
934  * Test the extraction of ITER_XARRAY-type iterators.
935  */
936 static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
937 {
938 	const struct kvec_test_range *pr;
939 	struct iov_iter iter;
940 	struct xarray *xarray;
941 	struct page **bpages, *pagelist[8], **pages = pagelist;
942 	ssize_t len;
943 	size_t bufsize, size = 0, npages;
944 	int i, from;
945 
946 	bufsize = 0x100000;
947 	npages = bufsize / PAGE_SIZE;
948 
949 	xarray = iov_kunit_create_xarray(test);
950 
951 	iov_kunit_create_buffer(test, &bpages, npages);
952 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
953 
954 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
955 		from = pr->from;
956 		size = pr->to - from;
957 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
958 
959 		iov_iter_xarray(&iter, WRITE, xarray, from, size);
960 
961 		do {
962 			size_t offset0 = LONG_MAX;
963 
964 			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
965 				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
966 
967 			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
968 						     ARRAY_SIZE(pagelist), 0, &offset0);
969 			KUNIT_EXPECT_GE(test, len, 0);
970 			if (len < 0)
971 				break;
972 			KUNIT_EXPECT_LE(test, len, size);
973 			KUNIT_EXPECT_EQ(test, iter.count, size - len);
974 			if (len == 0)
975 				break;
976 			size -= len;
977 			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
978 			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
979 
980 			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
981 				struct page *p;
982 				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
983 				int ix;
984 
985 				KUNIT_ASSERT_GE(test, part, 0);
986 				ix = from / PAGE_SIZE;
987 				KUNIT_ASSERT_LT(test, ix, npages);
988 				p = bpages[ix];
989 				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
990 				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
991 				from += part;
992 				len -= part;
993 				KUNIT_ASSERT_GE(test, len, 0);
994 				if (len == 0)
995 					break;
996 				offset0 = 0;
997 			}
998 
999 			if (test->status == KUNIT_FAILURE)
1000 				goto stop;
1001 		} while (iov_iter_count(&iter) > 0);
1002 
1003 		KUNIT_EXPECT_EQ(test, size, 0);
1004 		KUNIT_EXPECT_EQ(test, iter.count, 0);
1005 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
1006 	}
1007 
1008 stop:
1009 	KUNIT_SUCCEED(test);
1010 }
1011 
1012 static struct kunit_case __refdata iov_kunit_cases[] = {
1013 	KUNIT_CASE(iov_kunit_copy_to_kvec),
1014 	KUNIT_CASE(iov_kunit_copy_from_kvec),
1015 	KUNIT_CASE(iov_kunit_copy_to_bvec),
1016 	KUNIT_CASE(iov_kunit_copy_from_bvec),
1017 	KUNIT_CASE(iov_kunit_copy_to_folioq),
1018 	KUNIT_CASE(iov_kunit_copy_from_folioq),
1019 	KUNIT_CASE(iov_kunit_copy_to_xarray),
1020 	KUNIT_CASE(iov_kunit_copy_from_xarray),
1021 	KUNIT_CASE(iov_kunit_extract_pages_kvec),
1022 	KUNIT_CASE(iov_kunit_extract_pages_bvec),
1023 	KUNIT_CASE(iov_kunit_extract_pages_folioq),
1024 	KUNIT_CASE(iov_kunit_extract_pages_xarray),
1025 	{}
1026 };
1027 
1028 static struct kunit_suite iov_kunit_suite = {
1029 	.name = "iov_iter",
1030 	.test_cases = iov_kunit_cases,
1031 };
1032 
1033 kunit_test_suites(&iov_kunit_suite);
1034