xref: /linux/lib/raid/xor/tests/xor_kunit.c (revision 440d6635b20037bc9ad46b20817d7b61cef0fc1b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Unit test the XOR library functions.
4  *
5  * Copyright 2024 Google LLC
6  * Copyright 2026 Christoph Hellwig
7  *
8  * Based on the CRC tests by Eric Biggers <ebiggers@google.com>.
9  */
10 #include <kunit/test.h>
11 #include <linux/prandom.h>
12 #include <linux/string_choices.h>
13 #include <linux/vmalloc.h>
14 #include <linux/raid/xor.h>
15 
16 #define XOR_KUNIT_SEED			42
17 #define XOR_KUNIT_MAX_BYTES		16384
18 #define XOR_KUNIT_MAX_BUFFERS		64
19 #define XOR_KUNIT_NUM_TEST_ITERS	1000
20 
21 static struct rnd_state rng;
22 static void *test_buffers[XOR_KUNIT_MAX_BUFFERS];
23 static void *test_dest;
24 static void *test_ref;
25 static size_t test_buflen;
26 
27 static u32 rand32(void)
28 {
29 	return prandom_u32_state(&rng);
30 }
31 
32 /* Reference implementation using dumb byte-wise XOR */
33 static void xor_ref(void *dest, void **srcs, unsigned int src_cnt,
34 		unsigned int bytes)
35 {
36 	unsigned int off, idx;
37 	u8 *d = dest;
38 
39 	for (off = 0; off < bytes; off++) {
40 		for (idx = 0; idx < src_cnt; idx++) {
41 			u8 *src = srcs[idx];
42 
43 			d[off] ^= src[off];
44 		}
45 	}
46 }
47 
48 /* Generate a random length that is a multiple of 512. */
49 static unsigned int random_length(unsigned int max_length)
50 {
51 	return round_up((rand32() % max_length) + 1, 512);
52 }
53 
54 /* Generate a random alignment that is a multiple of 64. */
55 static unsigned int random_alignment(unsigned int max_alignment)
56 {
57 	return ((rand32() % max_alignment) + 1) & ~63;
58 }
59 
60 static void xor_generate_random_data(void)
61 {
62 	int i;
63 
64 	prandom_bytes_state(&rng, test_dest, test_buflen);
65 	memcpy(test_ref, test_dest, test_buflen);
66 	for (i = 0; i < XOR_KUNIT_MAX_BUFFERS; i++)
67 		prandom_bytes_state(&rng, test_buffers[i], test_buflen);
68 }
69 
70 /* Test that xor_gen gives the same result as a reference implementation. */
71 static void xor_test(struct kunit *test)
72 {
73 	void *aligned_buffers[XOR_KUNIT_MAX_BUFFERS];
74 	size_t i;
75 
76 	for (i = 0; i < XOR_KUNIT_NUM_TEST_ITERS; i++) {
77 		unsigned int nr_buffers =
78 			(rand32() % XOR_KUNIT_MAX_BUFFERS) + 1;
79 		unsigned int len = random_length(XOR_KUNIT_MAX_BYTES);
80 		unsigned int max_alignment, align = 0;
81 		void *buffers;
82 
83 		if (rand32() % 8 == 0)
84 			/* Refresh the data occasionally. */
85 			xor_generate_random_data();
86 
87 		/*
88 		 * If we're not using the entire buffer size, inject randomize
89 		 * alignment into the buffer.
90 		 */
91 		max_alignment = XOR_KUNIT_MAX_BYTES - len;
92 		if (max_alignment == 0) {
93 			buffers = test_buffers;
94 		} else if (rand32() % 2 == 0) {
95 			/* Use random alignments mod 64 */
96 			int j;
97 
98 			for (j = 0; j < nr_buffers; j++)
99 				aligned_buffers[j] = test_buffers[j] +
100 					random_alignment(max_alignment);
101 			buffers = aligned_buffers;
102 			align = random_alignment(max_alignment);
103 		} else {
104 			/* Go up to the guard page, to catch buffer overreads */
105 			int j;
106 
107 			align = test_buflen - len;
108 			for (j = 0; j < nr_buffers; j++)
109 				aligned_buffers[j] = test_buffers[j] + align;
110 			buffers = aligned_buffers;
111 		}
112 
113 		/*
114 		 * Compute the XOR, and verify that it equals the XOR computed
115 		 * by a simple byte-at-a-time reference implementation.
116 		 */
117 		xor_ref(test_ref + align, buffers, nr_buffers, len);
118 		xor_gen(test_dest + align, buffers, nr_buffers, len);
119 		KUNIT_EXPECT_MEMEQ_MSG(test, test_ref + align,
120 				test_dest + align, len,
121 				"Wrong result with buffers=%u, len=%u, unaligned=%s, at_end=%s",
122 				nr_buffers, len,
123 				str_yes_no(max_alignment),
124 				str_yes_no(align + len == test_buflen));
125 	}
126 }
127 
128 static struct kunit_case xor_test_cases[] = {
129 	KUNIT_CASE(xor_test),
130 	{},
131 };
132 
133 static int xor_suite_init(struct kunit_suite *suite)
134 {
135 	int i;
136 
137 	/*
138 	 * Allocate the test buffer using vmalloc() with a page-aligned length
139 	 * so that it is immediately followed by a guard page.  This allows
140 	 * buffer overreads to be detected, even in assembly code.
141 	 */
142 	test_buflen = round_up(XOR_KUNIT_MAX_BYTES, PAGE_SIZE);
143 	test_ref = vmalloc(test_buflen);
144 	if (!test_ref)
145 		return -ENOMEM;
146 	test_dest = vmalloc(test_buflen);
147 	if (!test_dest)
148 		goto out_free_ref;
149 	for (i = 0; i < XOR_KUNIT_MAX_BUFFERS; i++) {
150 		test_buffers[i] = vmalloc(test_buflen);
151 		if (!test_buffers[i])
152 			goto out_free_buffers;
153 	}
154 
155 	prandom_seed_state(&rng, XOR_KUNIT_SEED);
156 	xor_generate_random_data();
157 	return 0;
158 
159 out_free_buffers:
160 	while (--i >= 0)
161 		vfree(test_buffers[i]);
162 	vfree(test_dest);
163 out_free_ref:
164 	vfree(test_ref);
165 	return -ENOMEM;
166 }
167 
168 static void xor_suite_exit(struct kunit_suite *suite)
169 {
170 	int i;
171 
172 	vfree(test_ref);
173 	vfree(test_dest);
174 	for (i = 0; i < XOR_KUNIT_MAX_BUFFERS; i++)
175 		vfree(test_buffers[i]);
176 }
177 
178 static struct kunit_suite xor_test_suite = {
179 	.name		= "xor",
180 	.test_cases	= xor_test_cases,
181 	.suite_init	= xor_suite_init,
182 	.suite_exit	= xor_suite_exit,
183 };
184 kunit_test_suite(xor_test_suite);
185 
186 MODULE_DESCRIPTION("Unit test for the XOR library functions");
187 MODULE_LICENSE("GPL");
188