xref: /linux/lib/fortify_kunit.c (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
4  * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
5  *
6  * For corner cases with UBSAN, try testing with:
7  *
8  * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
9  *	--kconfig_add CONFIG_FORTIFY_SOURCE=y \
10  *	--kconfig_add CONFIG_UBSAN=y \
11  *	--kconfig_add CONFIG_UBSAN_TRAP=y \
12  *	--kconfig_add CONFIG_UBSAN_BOUNDS=y \
13  *	--kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
14  *	--make_options LLVM=1 fortify
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 /* We don't need to fill dmesg with the fortify WARNs during testing. */
19 #ifdef DEBUG
20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
21 # define FORTIFY_WARN_KUNIT(x...)   WARN_ONCE(x)
22 #else
23 # define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
24 # define FORTIFY_WARN_KUNIT(x...)   do { } while (0)
25 #endif
26 
27 /* Redefine fortify_panic() to track failures. */
28 void fortify_add_kunit_error(int write);
29 #define fortify_panic(func, write, avail, size, retfail) do {		\
30 	FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size);	\
31 	fortify_add_kunit_error(write);					\
32 	return (retfail);						\
33 } while (0)
34 
35 /* Redefine fortify_warn_once() to track memcpy() failures. */
36 #define fortify_warn_once(chk_func, x...) do {				\
37 	bool __result = chk_func;					\
38 	FORTIFY_WARN_KUNIT(__result, x);				\
39 	if (__result)							\
40 		fortify_add_kunit_error(1);				\
41 } while (0)
42 
43 #include <kunit/device.h>
44 #include <kunit/test.h>
45 #include <kunit/test-bug.h>
46 #include <linux/device.h>
47 #include <linux/slab.h>
48 #include <linux/string.h>
49 #include <linux/vmalloc.h>
50 
51 /* Handle being built without CONFIG_FORTIFY_SOURCE */
52 #ifndef __compiletime_strlen
53 # define __compiletime_strlen __builtin_strlen
54 #endif
55 
56 static struct kunit_resource read_resource;
57 static struct kunit_resource write_resource;
58 static int fortify_read_overflows;
59 static int fortify_write_overflows;
60 
61 static const char array_of_10[] = "this is 10";
62 static const char *ptr_of_11 = "this is 11!";
63 static char array_unknown[] = "compiler thinks I might change";
64 
65 void fortify_add_kunit_error(int write)
66 {
67 	struct kunit_resource *resource;
68 	struct kunit *current_test;
69 
70 	current_test = kunit_get_current_test();
71 	if (!current_test)
72 		return;
73 
74 	resource = kunit_find_named_resource(current_test,
75 			write ? "fortify_write_overflows"
76 			      : "fortify_read_overflows");
77 	if (!resource)
78 		return;
79 
80 	(*(int *)resource->data)++;
81 	kunit_put_resource(resource);
82 }
83 
84 static void fortify_test_known_sizes(struct kunit *test)
85 {
86 	KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
87 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
88 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
89 
90 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
91 	/* Externally defined and dynamically sized string pointer: */
92 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
93 }
94 
95 /* This is volatile so the optimizer can't perform DCE below. */
96 static volatile int pick;
97 
98 /* Not inline to keep optimizer from figuring out which string we want. */
99 static noinline size_t want_minus_one(int pick)
100 {
101 	const char *str;
102 
103 	switch (pick) {
104 	case 1:
105 		str = "4444";
106 		break;
107 	case 2:
108 		str = "333";
109 		break;
110 	default:
111 		str = "1";
112 		break;
113 	}
114 	return __compiletime_strlen(str);
115 }
116 
117 static void fortify_test_control_flow_split(struct kunit *test)
118 {
119 	KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
120 }
121 
122 #define KUNIT_EXPECT_BOS(test, p, expected, name)			\
123 	KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1),		\
124 		expected,						\
125 		"__alloc_size() not working with __bos on " name "\n")
126 
127 #if !__has_builtin(__builtin_dynamic_object_size)
128 #define KUNIT_EXPECT_BDOS(test, p, expected, name)			\
129 	/* Silence "unused variable 'expected'" warning. */		\
130 	KUNIT_EXPECT_EQ(test, expected, expected)
131 #else
132 #define KUNIT_EXPECT_BDOS(test, p, expected, name)			\
133 	KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1),	\
134 		expected,						\
135 		"__alloc_size() not working with __bdos on " name "\n")
136 #endif
137 
138 /* If the execpted size is a constant value, __bos can see it. */
139 #define check_const(_expected, alloc, free)		do {		\
140 	size_t expected = (_expected);					\
141 	void *p = alloc;						\
142 	KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n");	\
143 	KUNIT_EXPECT_BOS(test, p, expected, #alloc);			\
144 	KUNIT_EXPECT_BDOS(test, p, expected, #alloc);			\
145 	free;								\
146 } while (0)
147 
148 /* If the execpted size is NOT a constant value, __bos CANNOT see it. */
149 #define check_dynamic(_expected, alloc, free)		do {		\
150 	size_t expected = (_expected);					\
151 	void *p = alloc;						\
152 	KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n");	\
153 	KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc);			\
154 	KUNIT_EXPECT_BDOS(test, p, expected, #alloc);			\
155 	free;								\
156 } while (0)
157 
158 /* Assortment of constant-value kinda-edge cases. */
159 #define CONST_TEST_BODY(TEST_alloc)	do {				\
160 	/* Special-case vmalloc()-family to skip 0-sized allocs. */	\
161 	if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0)			\
162 		TEST_alloc(check_const, 0, 0);				\
163 	TEST_alloc(check_const, 1, 1);					\
164 	TEST_alloc(check_const, 128, 128);				\
165 	TEST_alloc(check_const, 1023, 1023);				\
166 	TEST_alloc(check_const, 1025, 1025);				\
167 	TEST_alloc(check_const, 4096, 4096);				\
168 	TEST_alloc(check_const, 4097, 4097);				\
169 } while (0)
170 
171 static volatile size_t zero_size;
172 static volatile size_t unknown_size = 50;
173 
174 #if !__has_builtin(__builtin_dynamic_object_size)
175 #define DYNAMIC_TEST_BODY(TEST_alloc)					\
176 	kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
177 #else
178 #define DYNAMIC_TEST_BODY(TEST_alloc)	do {				\
179 	size_t size = unknown_size;					\
180 									\
181 	/*								\
182 	 * Expected size is "size" in each test, before it is then	\
183 	 * internally incremented in each test.	Requires we disable	\
184 	 * -Wunsequenced.						\
185 	 */								\
186 	TEST_alloc(check_dynamic, size, size++);			\
187 	/* Make sure incrementing actually happened. */			\
188 	KUNIT_EXPECT_NE(test, size, unknown_size);			\
189 } while (0)
190 #endif
191 
192 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator)				\
193 static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
194 {									\
195 	CONST_TEST_BODY(TEST_##allocator);				\
196 }									\
197 static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
198 {									\
199 	DYNAMIC_TEST_BODY(TEST_##allocator);				\
200 }
201 
202 #define TEST_kmalloc(checker, expected_size, alloc_size)	do {	\
203 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
204 	void *orig;							\
205 	size_t len;							\
206 									\
207 	checker(expected_size, kmalloc(alloc_size, gfp),		\
208 		kfree(p));						\
209 	checker(expected_size,						\
210 		kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
211 		kfree(p));						\
212 	checker(expected_size, kzalloc(alloc_size, gfp),		\
213 		kfree(p));						\
214 	checker(expected_size,						\
215 		kzalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
216 		kfree(p));						\
217 	checker(expected_size, kcalloc(1, alloc_size, gfp),		\
218 		kfree(p));						\
219 	checker(expected_size, kcalloc(alloc_size, 1, gfp),		\
220 		kfree(p));						\
221 	checker(expected_size,						\
222 		kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE),		\
223 		kfree(p));						\
224 	checker(expected_size,						\
225 		kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE),		\
226 		kfree(p));						\
227 	checker(expected_size, kmalloc_array(1, alloc_size, gfp),	\
228 		kfree(p));						\
229 	checker(expected_size, kmalloc_array(alloc_size, 1, gfp),	\
230 		kfree(p));						\
231 	checker(expected_size,						\
232 		kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE),	\
233 		kfree(p));						\
234 	checker(expected_size,						\
235 		kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE),	\
236 		kfree(p));						\
237 	checker(expected_size, __kmalloc(alloc_size, gfp),		\
238 		kfree(p));						\
239 	checker(expected_size,						\
240 		__kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
241 		kfree(p));						\
242 									\
243 	orig = kmalloc(alloc_size, gfp);				\
244 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
245 	checker((expected_size) * 2,					\
246 		krealloc(orig, (alloc_size) * 2, gfp),			\
247 		kfree(p));						\
248 	orig = kmalloc(alloc_size, gfp);				\
249 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
250 	checker((expected_size) * 2,					\
251 		krealloc_array(orig, 1, (alloc_size) * 2, gfp),		\
252 		kfree(p));						\
253 	orig = kmalloc(alloc_size, gfp);				\
254 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
255 	checker((expected_size) * 2,					\
256 		krealloc_array(orig, (alloc_size) * 2, 1, gfp),		\
257 		kfree(p));						\
258 									\
259 	len = 11;							\
260 	/* Using memdup() with fixed size, so force unknown length. */	\
261 	if (!__builtin_constant_p(expected_size))			\
262 		len += zero_size;					\
263 	checker(len, kmemdup("hello there", len, gfp), kfree(p));	\
264 } while (0)
265 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
266 
267 /* Sizes are in pages, not bytes. */
268 #define TEST_vmalloc(checker, expected_pages, alloc_pages)	do {	\
269 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
270 	checker((expected_pages) * PAGE_SIZE,				\
271 		vmalloc((alloc_pages) * PAGE_SIZE),	   vfree(p));	\
272 	checker((expected_pages) * PAGE_SIZE,				\
273 		vzalloc((alloc_pages) * PAGE_SIZE),	   vfree(p));	\
274 	checker((expected_pages) * PAGE_SIZE,				\
275 		__vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p));	\
276 } while (0)
277 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
278 
279 /* Sizes are in pages (and open-coded for side-effects), not bytes. */
280 #define TEST_kvmalloc(checker, expected_pages, alloc_pages)	do {	\
281 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
282 	size_t prev_size;						\
283 	void *orig;							\
284 									\
285 	checker((expected_pages) * PAGE_SIZE,				\
286 		kvmalloc((alloc_pages) * PAGE_SIZE, gfp),		\
287 		kvfree(p));						\
288 	checker((expected_pages) * PAGE_SIZE,				\
289 		kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
290 		kvfree(p));						\
291 	checker((expected_pages) * PAGE_SIZE,				\
292 		kvzalloc((alloc_pages) * PAGE_SIZE, gfp),		\
293 		kvfree(p));						\
294 	checker((expected_pages) * PAGE_SIZE,				\
295 		kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
296 		kvfree(p));						\
297 	checker((expected_pages) * PAGE_SIZE,				\
298 		kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp),		\
299 		kvfree(p));						\
300 	checker((expected_pages) * PAGE_SIZE,				\
301 		kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp),		\
302 		kvfree(p));						\
303 	checker((expected_pages) * PAGE_SIZE,				\
304 		kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp),	\
305 		kvfree(p));						\
306 	checker((expected_pages) * PAGE_SIZE,				\
307 		kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp),	\
308 		kvfree(p));						\
309 									\
310 	prev_size = (expected_pages) * PAGE_SIZE;			\
311 	orig = kvmalloc(prev_size, gfp);				\
312 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
313 	checker(((expected_pages) * PAGE_SIZE) * 2,			\
314 		kvrealloc(orig, prev_size,				\
315 			  ((alloc_pages) * PAGE_SIZE) * 2, gfp),	\
316 		kvfree(p));						\
317 } while (0)
318 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
319 
320 #define TEST_devm_kmalloc(checker, expected_size, alloc_size)	do {	\
321 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
322 	const char dev_name[] = "fortify-test";				\
323 	struct device *dev;						\
324 	void *orig;							\
325 	size_t len;							\
326 									\
327 	/* Create dummy device for devm_kmalloc()-family tests. */	\
328 	dev = kunit_device_register(test, dev_name);			\
329 	KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),			\
330 			       "Cannot register test device\n");	\
331 									\
332 	checker(expected_size, devm_kmalloc(dev, alloc_size, gfp),	\
333 		devm_kfree(dev, p));					\
334 	checker(expected_size, devm_kzalloc(dev, alloc_size, gfp),	\
335 		devm_kfree(dev, p));					\
336 	checker(expected_size,						\
337 		devm_kmalloc_array(dev, 1, alloc_size, gfp),		\
338 		devm_kfree(dev, p));					\
339 	checker(expected_size,						\
340 		devm_kmalloc_array(dev, alloc_size, 1, gfp),		\
341 		devm_kfree(dev, p));					\
342 	checker(expected_size,						\
343 		devm_kcalloc(dev, 1, alloc_size, gfp),			\
344 		devm_kfree(dev, p));					\
345 	checker(expected_size,						\
346 		devm_kcalloc(dev, alloc_size, 1, gfp),			\
347 		devm_kfree(dev, p));					\
348 									\
349 	orig = devm_kmalloc(dev, alloc_size, gfp);			\
350 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
351 	checker((expected_size) * 2,					\
352 		devm_krealloc(dev, orig, (alloc_size) * 2, gfp),	\
353 		devm_kfree(dev, p));					\
354 									\
355 	len = 4;							\
356 	/* Using memdup() with fixed size, so force unknown length. */	\
357 	if (!__builtin_constant_p(expected_size))			\
358 		len += zero_size;					\
359 	checker(len, devm_kmemdup(dev, "Ohai", len, gfp),		\
360 		devm_kfree(dev, p));					\
361 									\
362 	kunit_device_unregister(test, dev);				\
363 } while (0)
364 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
365 
366 static const char * const test_strs[] = {
367 	"",
368 	"Hello there",
369 	"A longer string, just for variety",
370 };
371 
372 #define TEST_realloc(checker)	do {					\
373 	gfp_t gfp = GFP_KERNEL;						\
374 	size_t len;							\
375 	int i;								\
376 									\
377 	for (i = 0; i < ARRAY_SIZE(test_strs); i++) {			\
378 		len = strlen(test_strs[i]);				\
379 		KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0);	\
380 		checker(len, kmemdup_array(test_strs[i], len, 1, gfp),	\
381 			kfree(p));					\
382 		checker(len, kmemdup(test_strs[i], len, gfp),		\
383 			kfree(p));					\
384 	}								\
385 } while (0)
386 static void fortify_test_realloc_size(struct kunit *test)
387 {
388 	TEST_realloc(check_dynamic);
389 }
390 
391 /*
392  * We can't have an array at the end of a structure or else
393  * builds without -fstrict-flex-arrays=3 will report them as
394  * being an unknown length. Additionally, add bytes before
395  * and after the string to catch over/underflows if tests
396  * fail.
397  */
398 struct fortify_padding {
399 	unsigned long bytes_before;
400 	char buf[32];
401 	unsigned long bytes_after;
402 };
403 /* Force compiler into not being able to resolve size at compile-time. */
404 static volatile int unconst;
405 
406 static void fortify_test_strlen(struct kunit *test)
407 {
408 	struct fortify_padding pad = { };
409 	int i, end = sizeof(pad.buf) - 1;
410 
411 	/* Fill 31 bytes with valid characters. */
412 	for (i = 0; i < sizeof(pad.buf) - 1; i++)
413 		pad.buf[i] = i + '0';
414 	/* Trailing bytes are still %NUL. */
415 	KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
416 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
417 
418 	/* String is terminated, so strlen() is valid. */
419 	KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
420 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
421 
422 	/* Make string unterminated, and recount. */
423 	pad.buf[end] = 'A';
424 	end = sizeof(pad.buf);
425 	KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
426 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
427 }
428 
429 static void fortify_test_strnlen(struct kunit *test)
430 {
431 	struct fortify_padding pad = { };
432 	int i, end = sizeof(pad.buf) - 1;
433 
434 	/* Fill 31 bytes with valid characters. */
435 	for (i = 0; i < sizeof(pad.buf) - 1; i++)
436 		pad.buf[i] = i + '0';
437 	/* Trailing bytes are still %NUL. */
438 	KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
439 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
440 
441 	/* String is terminated, so strnlen() is valid. */
442 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
443 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
444 	/* A truncated strnlen() will be safe, too. */
445 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
446 					sizeof(pad.buf) / 2);
447 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
448 
449 	/* Make string unterminated, and recount. */
450 	pad.buf[end] = 'A';
451 	end = sizeof(pad.buf);
452 	/* Reading beyond with strncpy() will fail. */
453 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
454 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
455 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
456 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
457 
458 	/* Early-truncated is safe still, though. */
459 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
460 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
461 
462 	end = sizeof(pad.buf) / 2;
463 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
464 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
465 }
466 
467 static void fortify_test_strcpy(struct kunit *test)
468 {
469 	struct fortify_padding pad = { };
470 	char src[sizeof(pad.buf) + 1] = { };
471 	int i;
472 
473 	/* Fill 31 bytes with valid characters. */
474 	for (i = 0; i < sizeof(src) - 2; i++)
475 		src[i] = i + '0';
476 
477 	/* Destination is %NUL-filled to start with. */
478 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
479 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
480 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
481 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
482 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
483 
484 	/* Legitimate strcpy() 1 less than of max size. */
485 	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
486 				== pad.buf);
487 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
488 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
489 	/* Only last byte should be %NUL */
490 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
491 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
492 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
493 
494 	src[sizeof(src) - 2] = 'A';
495 	/* But now we trip the overflow checking. */
496 	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
497 				== pad.buf);
498 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
499 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
500 	/* Trailing %NUL -- thanks to FORTIFY. */
501 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
502 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
503 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
504 	/* And we will not have gone beyond. */
505 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
506 
507 	src[sizeof(src) - 1] = 'A';
508 	/* And for sure now, two bytes past. */
509 	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
510 				== pad.buf);
511 	/*
512 	 * Which trips both the strlen() on the unterminated src,
513 	 * and the resulting copy attempt.
514 	 */
515 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
516 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
517 	/* Trailing %NUL -- thanks to FORTIFY. */
518 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
519 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
520 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
521 	/* And we will not have gone beyond. */
522 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
523 }
524 
525 static void fortify_test_strncpy(struct kunit *test)
526 {
527 	struct fortify_padding pad = { };
528 	char src[] = "Copy me fully into a small buffer and I will overflow!";
529 
530 	/* Destination is %NUL-filled to start with. */
531 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
532 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
533 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
534 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
535 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
536 
537 	/* Legitimate strncpy() 1 less than of max size. */
538 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
539 					sizeof(pad.buf) + unconst - 1)
540 				== pad.buf);
541 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
542 	/* Only last byte should be %NUL */
543 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
544 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
545 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
546 
547 	/* Legitimate (though unterminated) max-size strncpy. */
548 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
549 					sizeof(pad.buf) + unconst)
550 				== pad.buf);
551 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
552 	/* No trailing %NUL -- thanks strncpy API. */
553 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
554 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
555 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
556 	/* But we will not have gone beyond. */
557 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
558 
559 	/* Now verify that FORTIFY is working... */
560 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
561 					sizeof(pad.buf) + unconst + 1)
562 				== pad.buf);
563 	/* Should catch the overflow. */
564 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
565 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
566 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
567 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
568 	/* And we will not have gone beyond. */
569 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
570 
571 	/* And further... */
572 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
573 					sizeof(pad.buf) + unconst + 2)
574 				== pad.buf);
575 	/* Should catch the overflow. */
576 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
577 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
578 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
579 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
580 	/* And we will not have gone beyond. */
581 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
582 }
583 
584 static void fortify_test_strscpy(struct kunit *test)
585 {
586 	struct fortify_padding pad = { };
587 	char src[] = "Copy me fully into a small buffer and I will overflow!";
588 
589 	/* Destination is %NUL-filled to start with. */
590 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
591 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
592 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
593 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
594 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
595 
596 	/* Legitimate strscpy() 1 less than of max size. */
597 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
598 				      sizeof(pad.buf) + unconst - 1),
599 			-E2BIG);
600 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
601 	/* Keeping space for %NUL, last two bytes should be %NUL */
602 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
603 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
604 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
605 
606 	/* Legitimate max-size strscpy. */
607 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
608 				      sizeof(pad.buf) + unconst),
609 			-E2BIG);
610 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
611 	/* A trailing %NUL will exist. */
612 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
613 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
614 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
615 
616 	/* Now verify that FORTIFY is working... */
617 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
618 				      sizeof(pad.buf) + unconst + 1),
619 			-E2BIG);
620 	/* Should catch the overflow. */
621 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
622 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
623 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
624 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
625 	/* And we will not have gone beyond. */
626 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
627 
628 	/* And much further... */
629 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
630 				      sizeof(src) * 2 + unconst),
631 			-E2BIG);
632 	/* Should catch the overflow. */
633 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
634 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
635 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
636 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
637 	/* And we will not have gone beyond. */
638 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
639 }
640 
641 static void fortify_test_strcat(struct kunit *test)
642 {
643 	struct fortify_padding pad = { };
644 	char src[sizeof(pad.buf) / 2] = { };
645 	char one[] = "A";
646 	char two[] = "BC";
647 	int i;
648 
649 	/* Fill 15 bytes with valid characters. */
650 	for (i = 0; i < sizeof(src) - 1; i++)
651 		src[i] = i + 'A';
652 
653 	/* Destination is %NUL-filled to start with. */
654 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
655 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
656 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
657 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
658 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
659 
660 	/* Legitimate strcat() using less than half max size. */
661 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
662 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
663 	/* Legitimate strcat() now 2 bytes shy of end. */
664 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
665 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
666 	/* Last two bytes should be %NUL */
667 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
668 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
669 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
670 
671 	/* Add one more character to the end. */
672 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
673 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
674 	/* Last byte should be %NUL */
675 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
676 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
677 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
678 
679 	/* And this one char will overflow. */
680 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
681 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
682 	/* Last byte should be %NUL thanks to FORTIFY. */
683 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
684 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
685 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
686 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
687 
688 	/* And adding two will overflow more. */
689 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
690 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
691 	/* Last byte should be %NUL thanks to FORTIFY. */
692 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
693 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
694 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
695 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
696 }
697 
698 static void fortify_test_strncat(struct kunit *test)
699 {
700 	struct fortify_padding pad = { };
701 	char src[sizeof(pad.buf)] = { };
702 	int i, partial;
703 
704 	/* Fill 31 bytes with valid characters. */
705 	partial = sizeof(src) / 2 - 1;
706 	for (i = 0; i < partial; i++)
707 		src[i] = i + 'A';
708 
709 	/* Destination is %NUL-filled to start with. */
710 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
711 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
712 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
713 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
714 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
715 
716 	/* Legitimate strncat() using less than half max size. */
717 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
718 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
719 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
720 	/* Legitimate strncat() now 2 bytes shy of end. */
721 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
722 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
723 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
724 	/* Last two bytes should be %NUL */
725 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
726 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
727 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
728 
729 	/* Add one more character to the end. */
730 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
731 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
732 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
733 	/* Last byte should be %NUL */
734 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
735 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
736 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
737 
738 	/* And this one char will overflow. */
739 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
740 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
741 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
742 	/* Last byte should be %NUL thanks to FORTIFY. */
743 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
744 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
745 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
746 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
747 
748 	/* And adding two will overflow more. */
749 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
750 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
751 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
752 	/* Last byte should be %NUL thanks to FORTIFY. */
753 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
754 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
755 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
756 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
757 
758 	/* Force an unterminated destination, and overflow. */
759 	pad.buf[sizeof(pad.buf) - 1] = 'A';
760 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
761 	/* This will have tripped both strlen() and strcat(). */
762 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
763 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
764 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
765 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
766 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
767 	/* But we should not go beyond the end. */
768 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
769 }
770 
771 static void fortify_test_strlcat(struct kunit *test)
772 {
773 	struct fortify_padding pad = { };
774 	char src[sizeof(pad.buf)] = { };
775 	int i, partial;
776 	int len = sizeof(pad.buf) + unconst;
777 
778 	/* Fill 15 bytes with valid characters. */
779 	partial = sizeof(src) / 2 - 1;
780 	for (i = 0; i < partial; i++)
781 		src[i] = i + 'A';
782 
783 	/* Destination is %NUL-filled to start with. */
784 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
785 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
786 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
787 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
788 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
789 
790 	/* Legitimate strlcat() using less than half max size. */
791 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
792 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
793 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
794 	/* Legitimate strlcat() now 2 bytes shy of end. */
795 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
796 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
797 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
798 	/* Last two bytes should be %NUL */
799 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
800 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
801 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
802 
803 	/* Add one more character to the end. */
804 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
805 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
806 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
807 	/* Last byte should be %NUL */
808 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
809 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
810 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
811 
812 	/* And this one char will overflow. */
813 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
814 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
815 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
816 	/* Last byte should be %NUL thanks to FORTIFY. */
817 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
818 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
819 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
820 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
821 
822 	/* And adding two will overflow more. */
823 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
824 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
825 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
826 	/* Last byte should be %NUL thanks to FORTIFY. */
827 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
828 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
829 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
830 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
831 
832 	/* Force an unterminated destination, and overflow. */
833 	pad.buf[sizeof(pad.buf) - 1] = 'A';
834 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
835 	/* This will have tripped both strlen() and strlcat(). */
836 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
837 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
838 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
839 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
840 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
841 	/* But we should not go beyond the end. */
842 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
843 
844 	/* Force an unterminated source, and overflow. */
845 	memset(src, 'B', sizeof(src));
846 	pad.buf[sizeof(pad.buf) - 1] = '\0';
847 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
848 	/* This will have tripped both strlen() and strlcat(). */
849 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
850 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
851 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
852 	/* But we should not go beyond the end. */
853 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
854 }
855 
856 /* Check for 0-sized arrays... */
857 struct fortify_zero_sized {
858 	unsigned long bytes_before;
859 	char buf[0];
860 	unsigned long bytes_after;
861 };
862 
863 #define __fortify_test(memfunc)					\
864 static void fortify_test_##memfunc(struct kunit *test)		\
865 {								\
866 	struct fortify_zero_sized zero = { };			\
867 	struct fortify_padding pad = { };			\
868 	char srcA[sizeof(pad.buf) + 2];				\
869 	char srcB[sizeof(pad.buf) + 2];				\
870 	size_t len = sizeof(pad.buf) + unconst;			\
871 								\
872 	memset(srcA, 'A', sizeof(srcA));			\
873 	KUNIT_ASSERT_EQ(test, srcA[0], 'A');			\
874 	memset(srcB, 'B', sizeof(srcB));			\
875 	KUNIT_ASSERT_EQ(test, srcB[0], 'B');			\
876 								\
877 	memfunc(pad.buf, srcA, 0 + unconst);			\
878 	KUNIT_EXPECT_EQ(test, pad.buf[0], '\0');		\
879 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
880 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
881 	memfunc(pad.buf + 1, srcB, 1 + unconst);		\
882 	KUNIT_EXPECT_EQ(test, pad.buf[0], '\0');		\
883 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'B');			\
884 	KUNIT_EXPECT_EQ(test, pad.buf[2], '\0');		\
885 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
886 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
887 	memfunc(pad.buf, srcA, 1 + unconst);			\
888 	KUNIT_EXPECT_EQ(test, pad.buf[0], 'A');			\
889 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'B');			\
890 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
891 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
892 	memfunc(pad.buf, srcA, len - 1);			\
893 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'A');			\
894 	KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0');		\
895 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
896 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
897 	memfunc(pad.buf, srcA, len);				\
898 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'A');			\
899 	KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A');		\
900 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);		\
901 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
902 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
903 	memfunc(pad.buf, srcA, len + 1);			\
904 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
905 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);	\
906 	memfunc(pad.buf + 1, srcB, len);			\
907 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
908 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);	\
909 								\
910 	/* Reset error counter. */				\
911 	fortify_write_overflows = 0;				\
912 	/* Copy nothing into nothing: no errors. */		\
913 	memfunc(zero.buf, srcB, 0 + unconst);			\
914 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
915 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
916 	/* We currently explicitly ignore zero-sized dests. */	\
917 	memfunc(zero.buf, srcB, 1 + unconst);			\
918 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
919 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
920 }
921 __fortify_test(memcpy)
922 __fortify_test(memmove)
923 
924 static void fortify_test_memscan(struct kunit *test)
925 {
926 	char haystack[] = "Where oh where is my memory range?";
927 	char *mem = haystack + strlen("Where oh where is ");
928 	char needle = 'm';
929 	size_t len = sizeof(haystack) + unconst;
930 
931 	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
932 				  mem);
933 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
934 	/* Catch too-large range. */
935 	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
936 				  NULL);
937 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
938 	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
939 				  NULL);
940 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
941 }
942 
943 static void fortify_test_memchr(struct kunit *test)
944 {
945 	char haystack[] = "Where oh where is my memory range?";
946 	char *mem = haystack + strlen("Where oh where is ");
947 	char needle = 'm';
948 	size_t len = sizeof(haystack) + unconst;
949 
950 	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
951 				  mem);
952 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
953 	/* Catch too-large range. */
954 	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
955 				  NULL);
956 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
957 	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
958 				  NULL);
959 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
960 }
961 
962 static void fortify_test_memchr_inv(struct kunit *test)
963 {
964 	char haystack[] = "Where oh where is my memory range?";
965 	char *mem = haystack + 1;
966 	char needle = 'W';
967 	size_t len = sizeof(haystack) + unconst;
968 
969 	/* Normal search is okay. */
970 	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
971 				  mem);
972 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
973 	/* Catch too-large range. */
974 	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
975 				  NULL);
976 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
977 	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
978 				  NULL);
979 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
980 }
981 
982 static void fortify_test_memcmp(struct kunit *test)
983 {
984 	char one[] = "My mind is going ...";
985 	char two[] = "My mind is going ... I can feel it.";
986 	size_t one_len = sizeof(one) + unconst - 1;
987 	size_t two_len = sizeof(two) + unconst - 1;
988 
989 	/* We match the first string (ignoring the %NUL). */
990 	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
991 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
992 	/* Still in bounds, but no longer matching. */
993 	KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
994 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
995 
996 	/* Catch too-large ranges. */
997 	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
998 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
999 
1000 	KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
1001 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1002 }
1003 
1004 static void fortify_test_kmemdup(struct kunit *test)
1005 {
1006 	char src[] = "I got Doom running on it!";
1007 	char *copy;
1008 	size_t len = sizeof(src) + unconst;
1009 
1010 	/* Copy is within bounds. */
1011 	copy = kmemdup(src, len, GFP_KERNEL);
1012 	KUNIT_EXPECT_NOT_NULL(test, copy);
1013 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1014 	kfree(copy);
1015 
1016 	/* Without %NUL. */
1017 	copy = kmemdup(src, len - 1, GFP_KERNEL);
1018 	KUNIT_EXPECT_NOT_NULL(test, copy);
1019 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1020 	kfree(copy);
1021 
1022 	/* Tiny bounds. */
1023 	copy = kmemdup(src, 1, GFP_KERNEL);
1024 	KUNIT_EXPECT_NOT_NULL(test, copy);
1025 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1026 	kfree(copy);
1027 
1028 	/* Out of bounds by 1 byte. */
1029 	copy = kmemdup(src, len + 1, GFP_KERNEL);
1030 	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1031 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1032 	kfree(copy);
1033 
1034 	/* Way out of bounds. */
1035 	copy = kmemdup(src, len * 2, GFP_KERNEL);
1036 	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1037 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1038 	kfree(copy);
1039 
1040 	/* Starting offset causing out of bounds. */
1041 	copy = kmemdup(src + 1, len, GFP_KERNEL);
1042 	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1043 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
1044 	kfree(copy);
1045 }
1046 
1047 static int fortify_test_init(struct kunit *test)
1048 {
1049 	if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
1050 		kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
1051 
1052 	fortify_read_overflows = 0;
1053 	kunit_add_named_resource(test, NULL, NULL, &read_resource,
1054 				 "fortify_read_overflows",
1055 				 &fortify_read_overflows);
1056 	fortify_write_overflows = 0;
1057 	kunit_add_named_resource(test, NULL, NULL, &write_resource,
1058 				 "fortify_write_overflows",
1059 				 &fortify_write_overflows);
1060 	return 0;
1061 }
1062 
1063 static struct kunit_case fortify_test_cases[] = {
1064 	KUNIT_CASE(fortify_test_known_sizes),
1065 	KUNIT_CASE(fortify_test_control_flow_split),
1066 	KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
1067 	KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
1068 	KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
1069 	KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
1070 	KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
1071 	KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
1072 	KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
1073 	KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
1074 	KUNIT_CASE(fortify_test_realloc_size),
1075 	KUNIT_CASE(fortify_test_strlen),
1076 	KUNIT_CASE(fortify_test_strnlen),
1077 	KUNIT_CASE(fortify_test_strcpy),
1078 	KUNIT_CASE(fortify_test_strncpy),
1079 	KUNIT_CASE(fortify_test_strscpy),
1080 	KUNIT_CASE(fortify_test_strcat),
1081 	KUNIT_CASE(fortify_test_strncat),
1082 	KUNIT_CASE(fortify_test_strlcat),
1083 	/* skip memset: performs bounds checking on whole structs */
1084 	KUNIT_CASE(fortify_test_memcpy),
1085 	KUNIT_CASE(fortify_test_memmove),
1086 	KUNIT_CASE(fortify_test_memscan),
1087 	KUNIT_CASE(fortify_test_memchr),
1088 	KUNIT_CASE(fortify_test_memchr_inv),
1089 	KUNIT_CASE(fortify_test_memcmp),
1090 	KUNIT_CASE(fortify_test_kmemdup),
1091 	{}
1092 };
1093 
1094 static struct kunit_suite fortify_test_suite = {
1095 	.name = "fortify",
1096 	.init = fortify_test_init,
1097 	.test_cases = fortify_test_cases,
1098 };
1099 
1100 kunit_test_suite(fortify_test_suite);
1101 
1102 MODULE_LICENSE("GPL");
1103