xref: /linux/drivers/android/tests/binder_alloc_kunit.c (revision 8a8d47e86cf537e6f6deb5c736bbf948a7bbc885)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for binder allocator code.
4  *
5  * Copyright 2025 Google LLC.
6  * Author: Tiffany Yang <ynaffit@google.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <kunit/test.h>
12 #include <linux/anon_inodes.h>
13 #include <linux/err.h>
14 #include <linux/file.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17 #include <linux/mman.h>
18 #include <linux/sizes.h>
19 
20 #include "../binder_alloc.h"
21 #include "../binder_internal.h"
22 
23 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
24 
25 #define BINDER_MMAP_SIZE SZ_128K
26 
27 #define BUFFER_NUM 5
28 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
29 
30 #define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)
31 
32 #define ALIGN_TYPE_STRLEN (12)
33 
34 #define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)
35 
36 #define PRINT_ALL_CASES (0)
37 
38 /* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */
39 #define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)
40 
41 /**
42  * enum buf_end_align_type - Page alignment of a buffer
43  * end with regard to the end of the previous buffer.
44  *
45  * In the pictures below, buf2 refers to the buffer we
46  * are aligning. buf1 refers to previous buffer by addr.
47  * Symbol [ means the start of a buffer, ] means the end
48  * of a buffer, and | means page boundaries.
49  */
50 enum buf_end_align_type {
51 	/**
52 	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
53 	 * the same page as the end of the previous buffer and
54 	 * is not page aligned. Examples:
55 	 * buf1 ][ buf2 ][ ...
56 	 * buf1 ]|[ buf2 ][ ...
57 	 */
58 	SAME_PAGE_UNALIGNED = 0,
59 	/**
60 	 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
61 	 * is not page aligned, the end of this buffer is on the
62 	 * same page as the end of the previous buffer and is page
63 	 * aligned. When the previous buffer is page aligned, the
64 	 * end of this buffer is aligned to the next page boundary.
65 	 * Examples:
66 	 * buf1 ][ buf2 ]| ...
67 	 * buf1 ]|[ buf2 ]| ...
68 	 */
69 	SAME_PAGE_ALIGNED,
70 	/**
71 	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
72 	 * the page next to the end of the previous buffer and
73 	 * is not page aligned. Examples:
74 	 * buf1 ][ buf2 | buf2 ][ ...
75 	 * buf1 ]|[ buf2 | buf2 ][ ...
76 	 */
77 	NEXT_PAGE_UNALIGNED,
78 	/**
79 	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
80 	 * the page next to the end of the previous buffer and
81 	 * is page aligned. Examples:
82 	 * buf1 ][ buf2 | buf2 ]| ...
83 	 * buf1 ]|[ buf2 | buf2 ]| ...
84 	 */
85 	NEXT_PAGE_ALIGNED,
86 	/**
87 	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
88 	 * the page that follows the page after the end of the
89 	 * previous buffer and is not page aligned. Examples:
90 	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
91 	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
92 	 */
93 	NEXT_NEXT_UNALIGNED,
94 	/**
95 	 * @LOOP_END: The number of enum values in &buf_end_align_type.
96 	 * It is used for controlling loop termination.
97 	 */
98 	LOOP_END,
99 };
100 
101 static const char *const buf_end_align_type_strs[LOOP_END] = {
102 	[SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",
103 	[SAME_PAGE_ALIGNED]   = " SP_ALIGNED ",
104 	[NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",
105 	[NEXT_PAGE_ALIGNED]   = " NP_ALIGNED ",
106 	[NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",
107 };
108 
109 struct binder_alloc_test_case_info {
110 	size_t *buffer_sizes;
111 	int *free_sequence;
112 	char alignments[ALIGNMENTS_BUFLEN];
113 	bool front_pages;
114 };
115 
116 static void stringify_free_seq(struct kunit *test, int *seq, char *buf,
117 			       size_t buf_len)
118 {
119 	size_t bytes = 0;
120 	int i;
121 
122 	for (i = 0; i < BUFFER_NUM; i++) {
123 		bytes += snprintf(buf + bytes, buf_len - bytes, "[%d]", seq[i]);
124 		if (bytes >= buf_len)
125 			break;
126 	}
127 	KUNIT_EXPECT_LT(test, bytes, buf_len);
128 }
129 
130 static void stringify_alignments(struct kunit *test, int *alignments,
131 				 char *buf, size_t buf_len)
132 {
133 	size_t bytes = 0;
134 	int i;
135 
136 	for (i = 0; i < BUFFER_NUM; i++) {
137 		bytes += snprintf(buf + bytes, buf_len - bytes, "[ %d:%s ]", i,
138 				  buf_end_align_type_strs[alignments[i]]);
139 		if (bytes >= buf_len)
140 			break;
141 	}
142 
143 	KUNIT_EXPECT_LT(test, bytes, buf_len);
144 }
145 
146 static bool check_buffer_pages_allocated(struct kunit *test,
147 					 struct binder_alloc *alloc,
148 					 struct binder_buffer *buffer,
149 					 size_t size)
150 {
151 	unsigned long page_addr;
152 	unsigned long end;
153 	int page_index;
154 
155 	end = PAGE_ALIGN(buffer->user_data + size);
156 	page_addr = buffer->user_data;
157 	for (; page_addr < end; page_addr += PAGE_SIZE) {
158 		page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
159 		if (!alloc->pages[page_index] ||
160 		    !list_empty(page_to_lru(alloc->pages[page_index]))) {
161 			kunit_err(test, "expect alloc but is %s at page index %d\n",
162 				  alloc->pages[page_index] ?
163 				  "lru" : "free", page_index);
164 			return false;
165 		}
166 	}
167 	return true;
168 }
169 
170 static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
171 						 struct binder_alloc *alloc,
172 						 struct binder_buffer *buffers[],
173 						 size_t *sizes, int *seq)
174 {
175 	unsigned long failures = 0;
176 	int i;
177 
178 	for (i = 0; i < BUFFER_NUM; i++) {
179 		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
180 		if (IS_ERR(buffers[i]) ||
181 		    !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
182 			failures++;
183 	}
184 
185 	return failures;
186 }
187 
188 static unsigned long binder_alloc_test_free_buf(struct kunit *test,
189 						struct binder_alloc *alloc,
190 						struct binder_buffer *buffers[],
191 						size_t *sizes, int *seq, size_t end)
192 {
193 	unsigned long failures = 0;
194 	int i;
195 
196 	for (i = 0; i < BUFFER_NUM; i++)
197 		binder_alloc_free_buf(alloc, buffers[seq[i]]);
198 
199 	for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
200 		if (list_empty(page_to_lru(alloc->pages[i]))) {
201 			kunit_err(test, "expect lru but is %s at page index %d\n",
202 				  alloc->pages[i] ? "alloc" : "free", i);
203 			failures++;
204 		}
205 	}
206 
207 	return failures;
208 }
209 
210 static unsigned long binder_alloc_test_free_page(struct kunit *test,
211 						 struct binder_alloc *alloc)
212 {
213 	unsigned long failures = 0;
214 	unsigned long count;
215 	int i;
216 
217 	while ((count = list_lru_count(alloc->freelist))) {
218 		list_lru_walk(alloc->freelist, binder_alloc_free_page,
219 			      NULL, count);
220 	}
221 
222 	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
223 		if (alloc->pages[i]) {
224 			kunit_err(test, "expect free but is %s at page index %d\n",
225 				  list_empty(page_to_lru(alloc->pages[i])) ?
226 				  "alloc" : "lru", i);
227 			failures++;
228 		}
229 	}
230 
231 	return failures;
232 }
233 
234 /* Executes one full test run for the given test case. */
235 static bool binder_alloc_test_alloc_free(struct kunit *test,
236 					 struct binder_alloc *alloc,
237 					 struct binder_alloc_test_case_info *tc,
238 					 size_t end)
239 {
240 	unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
241 	struct binder_buffer *buffers[BUFFER_NUM];
242 	unsigned long failures;
243 	bool failed = false;
244 
245 	failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
246 					       tc->buffer_sizes,
247 					       tc->free_sequence);
248 	failed = failed || failures;
249 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
250 			    "Initial allocation failed: %lu/%u buffers with errors",
251 			    failures, BUFFER_NUM);
252 
253 	failures = binder_alloc_test_free_buf(test, alloc, buffers,
254 					      tc->buffer_sizes,
255 					      tc->free_sequence, end);
256 	failed = failed || failures;
257 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
258 			    "Initial buffers not freed correctly: %lu/%lu pages not on lru list",
259 			    failures, pages);
260 
261 	/* Allocate from lru. */
262 	failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
263 					       tc->buffer_sizes,
264 					       tc->free_sequence);
265 	failed = failed || failures;
266 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
267 			    "Reallocation failed: %lu/%u buffers with errors",
268 			    failures, BUFFER_NUM);
269 
270 	failures = list_lru_count(alloc->freelist);
271 	failed = failed || failures;
272 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
273 			    "lru list should be empty after reallocation but still has %lu pages",
274 			    failures);
275 
276 	failures = binder_alloc_test_free_buf(test, alloc, buffers,
277 					      tc->buffer_sizes,
278 					      tc->free_sequence, end);
279 	failed = failed || failures;
280 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
281 			    "Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",
282 			    failures, pages);
283 
284 	failures = binder_alloc_test_free_page(test, alloc);
285 	failed = failed || failures;
286 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
287 			    "Failed to clean up allocated pages: %lu/%lu pages still installed",
288 			    failures, (alloc->buffer_size / PAGE_SIZE));
289 
290 	return failed;
291 }
292 
293 static bool is_dup(int *seq, int index, int val)
294 {
295 	int i;
296 
297 	for (i = 0; i < index; i++) {
298 		if (seq[i] == val)
299 			return true;
300 	}
301 	return false;
302 }
303 
304 /* Generate BUFFER_NUM factorial free orders. */
305 static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
306 			  struct binder_alloc_test_case_info *tc,
307 			  unsigned long *runs, unsigned long *failures,
308 			  int index, size_t end)
309 {
310 	bool case_failed;
311 	int i;
312 
313 	if (index == BUFFER_NUM) {
314 		char freeseq_buf[FREESEQ_BUFLEN];
315 
316 		case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
317 		*runs += 1;
318 		*failures += case_failed;
319 
320 		if (case_failed || PRINT_ALL_CASES) {
321 			stringify_free_seq(test, tc->free_sequence, freeseq_buf,
322 					   FREESEQ_BUFLEN);
323 			kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
324 				  case_failed ? "FAILED" : "PASSED",
325 				  tc->front_pages ? "front" : "back ",
326 				  tc->alignments, freeseq_buf);
327 		}
328 
329 		return;
330 	}
331 	for (i = 0; i < BUFFER_NUM; i++) {
332 		if (is_dup(tc->free_sequence, index, i))
333 			continue;
334 		tc->free_sequence[index] = i;
335 		permute_frees(test, alloc, tc, runs, failures, index + 1, end);
336 	}
337 }
338 
339 static void gen_buf_sizes(struct kunit *test,
340 			  struct binder_alloc *alloc,
341 			  struct binder_alloc_test_case_info *tc,
342 			  size_t *end_offset, unsigned long *runs,
343 			  unsigned long *failures)
344 {
345 	size_t last_offset, offset = 0;
346 	size_t front_sizes[BUFFER_NUM];
347 	size_t back_sizes[BUFFER_NUM];
348 	int seq[BUFFER_NUM] = {0};
349 	int i;
350 
351 	tc->free_sequence = seq;
352 	for (i = 0; i < BUFFER_NUM; i++) {
353 		last_offset = offset;
354 		offset = end_offset[i];
355 		front_sizes[i] = offset - last_offset;
356 		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
357 	}
358 	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
359 
360 	/*
361 	 * Buffers share the first or last few pages.
362 	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
363 	 * we need one giant buffer before getting to the last page.
364 	 */
365 	tc->front_pages = true;
366 	tc->buffer_sizes = front_sizes;
367 	permute_frees(test, alloc, tc, runs, failures, 0,
368 		      end_offset[BUFFER_NUM - 1]);
369 
370 	tc->front_pages = false;
371 	tc->buffer_sizes = back_sizes;
372 	permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
373 }
374 
375 static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
376 			    size_t *end_offset, int *alignments,
377 			    unsigned long *runs, unsigned long *failures,
378 			    int index)
379 {
380 	size_t end, prev;
381 	int align;
382 
383 	if (index == BUFFER_NUM) {
384 		struct binder_alloc_test_case_info tc = {0};
385 
386 		stringify_alignments(test, alignments, tc.alignments,
387 				     ALIGNMENTS_BUFLEN);
388 
389 		gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
390 		return;
391 	}
392 	prev = index == 0 ? 0 : end_offset[index - 1];
393 	end = prev;
394 
395 	BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
396 
397 	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
398 		if (align % 2)
399 			end = ALIGN(end, PAGE_SIZE);
400 		else
401 			end += BUFFER_MIN_SIZE;
402 		end_offset[index] = end;
403 		alignments[index] = align;
404 		gen_buf_offsets(test, alloc, end_offset, alignments, runs,
405 				failures, index + 1);
406 	}
407 }
408 
409 struct binder_alloc_test {
410 	struct binder_alloc alloc;
411 	struct list_lru binder_test_freelist;
412 	struct file *filp;
413 	unsigned long mmap_uaddr;
414 };
415 
416 static void binder_alloc_test_init_freelist(struct kunit *test)
417 {
418 	struct binder_alloc_test *priv = test->priv;
419 
420 	KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
421 			    &priv->binder_test_freelist);
422 }
423 
424 static void binder_alloc_test_mmap(struct kunit *test)
425 {
426 	struct binder_alloc_test *priv = test->priv;
427 	struct binder_alloc *alloc = &priv->alloc;
428 	struct binder_buffer *buf;
429 	struct rb_node *n;
430 
431 	KUNIT_EXPECT_EQ(test, alloc->mapped, true);
432 	KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
433 
434 	n = rb_first(&alloc->allocated_buffers);
435 	KUNIT_EXPECT_PTR_EQ(test, n, NULL);
436 
437 	n = rb_first(&alloc->free_buffers);
438 	buf = rb_entry(n, struct binder_buffer, rb_node);
439 	KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
440 			BINDER_MMAP_SIZE);
441 	KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
442 }
443 
444 /**
445  * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
446  * @test: The test context object.
447  *
448  * Allocate BUFFER_NUM buffers to cover all page alignment cases,
449  * then free them in all orders possible. Check that pages are
450  * correctly allocated, put onto lru when buffers are freed, and
451  * are freed when binder_alloc_free_page() is called.
452  */
453 static void binder_alloc_exhaustive_test(struct kunit *test)
454 {
455 	struct binder_alloc_test *priv = test->priv;
456 	size_t end_offset[BUFFER_NUM];
457 	int alignments[BUFFER_NUM];
458 	unsigned long failures = 0;
459 	unsigned long runs = 0;
460 
461 	gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
462 			&failures, 0);
463 
464 	KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
465 	KUNIT_EXPECT_EQ(test, failures, 0);
466 }
467 
468 /* ===== End test cases ===== */
469 
470 static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
471 {
472 	struct binder_alloc *alloc = vma->vm_private_data;
473 
474 	binder_alloc_vma_close(alloc);
475 }
476 
477 static const struct vm_operations_struct binder_alloc_test_vm_ops = {
478 	.close = binder_alloc_test_vma_close,
479 	.fault = binder_vm_fault,
480 };
481 
482 static int binder_alloc_test_mmap_handler(struct file *filp,
483 					  struct vm_area_struct *vma)
484 {
485 	struct binder_alloc *alloc = filp->private_data;
486 
487 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
488 
489 	vma->vm_ops = &binder_alloc_test_vm_ops;
490 	vma->vm_private_data = alloc;
491 
492 	return binder_alloc_mmap_handler(alloc, vma);
493 }
494 
495 static const struct file_operations binder_alloc_test_fops = {
496 	.mmap = binder_alloc_test_mmap_handler,
497 };
498 
499 static int binder_alloc_test_init(struct kunit *test)
500 {
501 	struct binder_alloc_test *priv;
502 	int ret;
503 
504 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
505 	if (!priv)
506 		return -ENOMEM;
507 	test->priv = priv;
508 
509 	ret = list_lru_init(&priv->binder_test_freelist);
510 	if (ret) {
511 		kunit_err(test, "Failed to initialize test freelist\n");
512 		return ret;
513 	}
514 
515 	/* __binder_alloc_init requires mm to be attached */
516 	ret = kunit_attach_mm();
517 	if (ret) {
518 		kunit_err(test, "Failed to attach mm\n");
519 		return ret;
520 	}
521 	__binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
522 
523 	priv->filp = anon_inode_getfile("binder_alloc_kunit",
524 					&binder_alloc_test_fops, &priv->alloc,
525 					O_RDWR | O_CLOEXEC);
526 	if (IS_ERR_OR_NULL(priv->filp)) {
527 		kunit_err(test, "Failed to open binder alloc test driver file\n");
528 		return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
529 	}
530 
531 	priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
532 					 PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
533 					 0);
534 	if (!priv->mmap_uaddr) {
535 		kunit_err(test, "Could not map the test's transaction memory\n");
536 		return -ENOMEM;
537 	}
538 
539 	return 0;
540 }
541 
542 static void binder_alloc_test_exit(struct kunit *test)
543 {
544 	struct binder_alloc_test *priv = test->priv;
545 
546 	/* Close the backing file to make sure binder_alloc_vma_close runs */
547 	if (!IS_ERR_OR_NULL(priv->filp))
548 		fput(priv->filp);
549 
550 	if (priv->alloc.mm)
551 		binder_alloc_deferred_release(&priv->alloc);
552 
553 	/* Make sure freelist is empty */
554 	KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
555 	list_lru_destroy(&priv->binder_test_freelist);
556 }
557 
558 static struct kunit_case binder_alloc_test_cases[] = {
559 	KUNIT_CASE(binder_alloc_test_init_freelist),
560 	KUNIT_CASE(binder_alloc_test_mmap),
561 	KUNIT_CASE(binder_alloc_exhaustive_test),
562 	{}
563 };
564 
565 static struct kunit_suite binder_alloc_test_suite = {
566 	.name = "binder_alloc",
567 	.test_cases = binder_alloc_test_cases,
568 	.init = binder_alloc_test_init,
569 	.exit = binder_alloc_test_exit,
570 };
571 
572 kunit_test_suite(binder_alloc_test_suite);
573 
574 MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
575 MODULE_DESCRIPTION("Binder Alloc KUnit tests");
576 MODULE_LICENSE("GPL");
577