xref: /linux/drivers/android/tests/binder_alloc_kunit.c (revision 239df3e4b4752524e7c0fb3417c218d8063654b4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for binder allocator code
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <kunit/test.h>
9 #include <linux/anon_inodes.h>
10 #include <linux/err.h>
11 #include <linux/file.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sizes.h>
16 
17 #include "../binder_alloc.h"
18 #include "../binder_internal.h"
19 
20 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
21 
22 #define BINDER_MMAP_SIZE SZ_128K
23 
24 #define BUFFER_NUM 5
25 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
26 
27 #define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)
28 
29 #define ALIGN_TYPE_STRLEN (12)
30 
31 #define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)
32 
33 #define PRINT_ALL_CASES (0)
34 
35 /* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */
36 #define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)
37 
38 /**
39  * enum buf_end_align_type - Page alignment of a buffer
40  * end with regard to the end of the previous buffer.
41  *
42  * In the pictures below, buf2 refers to the buffer we
43  * are aligning. buf1 refers to previous buffer by addr.
44  * Symbol [ means the start of a buffer, ] means the end
45  * of a buffer, and | means page boundaries.
46  */
47 enum buf_end_align_type {
48 	/**
49 	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
50 	 * the same page as the end of the previous buffer and
51 	 * is not page aligned. Examples:
52 	 * buf1 ][ buf2 ][ ...
53 	 * buf1 ]|[ buf2 ][ ...
54 	 */
55 	SAME_PAGE_UNALIGNED = 0,
56 	/**
57 	 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
58 	 * is not page aligned, the end of this buffer is on the
59 	 * same page as the end of the previous buffer and is page
60 	 * aligned. When the previous buffer is page aligned, the
61 	 * end of this buffer is aligned to the next page boundary.
62 	 * Examples:
63 	 * buf1 ][ buf2 ]| ...
64 	 * buf1 ]|[ buf2 ]| ...
65 	 */
66 	SAME_PAGE_ALIGNED,
67 	/**
68 	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
69 	 * the page next to the end of the previous buffer and
70 	 * is not page aligned. Examples:
71 	 * buf1 ][ buf2 | buf2 ][ ...
72 	 * buf1 ]|[ buf2 | buf2 ][ ...
73 	 */
74 	NEXT_PAGE_UNALIGNED,
75 	/**
76 	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
77 	 * the page next to the end of the previous buffer and
78 	 * is page aligned. Examples:
79 	 * buf1 ][ buf2 | buf2 ]| ...
80 	 * buf1 ]|[ buf2 | buf2 ]| ...
81 	 */
82 	NEXT_PAGE_ALIGNED,
83 	/**
84 	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
85 	 * the page that follows the page after the end of the
86 	 * previous buffer and is not page aligned. Examples:
87 	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
88 	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
89 	 */
90 	NEXT_NEXT_UNALIGNED,
91 	/**
92 	 * @LOOP_END: The number of enum values in &buf_end_align_type.
93 	 * It is used for controlling loop termination.
94 	 */
95 	LOOP_END,
96 };
97 
98 static const char *const buf_end_align_type_strs[LOOP_END] = {
99 	[SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",
100 	[SAME_PAGE_ALIGNED]   = " SP_ALIGNED ",
101 	[NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",
102 	[NEXT_PAGE_ALIGNED]   = " NP_ALIGNED ",
103 	[NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",
104 };
105 
106 struct binder_alloc_test_case_info {
107 	size_t *buffer_sizes;
108 	int *free_sequence;
109 	char alignments[ALIGNMENTS_BUFLEN];
110 	bool front_pages;
111 };
112 
113 static void stringify_free_seq(struct kunit *test, int *seq, char *buf,
114 			       size_t buf_len)
115 {
116 	size_t bytes = 0;
117 	int i;
118 
119 	for (i = 0; i < BUFFER_NUM; i++) {
120 		bytes += snprintf(buf + bytes, buf_len - bytes, "[%d]", seq[i]);
121 		if (bytes >= buf_len)
122 			break;
123 	}
124 	KUNIT_EXPECT_LT(test, bytes, buf_len);
125 }
126 
127 static void stringify_alignments(struct kunit *test, int *alignments,
128 				 char *buf, size_t buf_len)
129 {
130 	size_t bytes = 0;
131 	int i;
132 
133 	for (i = 0; i < BUFFER_NUM; i++) {
134 		bytes += snprintf(buf + bytes, buf_len - bytes, "[ %d:%s ]", i,
135 				  buf_end_align_type_strs[alignments[i]]);
136 		if (bytes >= buf_len)
137 			break;
138 	}
139 
140 	KUNIT_EXPECT_LT(test, bytes, buf_len);
141 }
142 
143 static bool check_buffer_pages_allocated(struct kunit *test,
144 					 struct binder_alloc *alloc,
145 					 struct binder_buffer *buffer,
146 					 size_t size)
147 {
148 	unsigned long page_addr;
149 	unsigned long end;
150 	int page_index;
151 
152 	end = PAGE_ALIGN(buffer->user_data + size);
153 	page_addr = buffer->user_data;
154 	for (; page_addr < end; page_addr += PAGE_SIZE) {
155 		page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
156 		if (!alloc->pages[page_index] ||
157 		    !list_empty(page_to_lru(alloc->pages[page_index]))) {
158 			kunit_err(test, "expect alloc but is %s at page index %d\n",
159 				  alloc->pages[page_index] ?
160 				  "lru" : "free", page_index);
161 			return false;
162 		}
163 	}
164 	return true;
165 }
166 
167 static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
168 						 struct binder_alloc *alloc,
169 						 struct binder_buffer *buffers[],
170 						 size_t *sizes, int *seq)
171 {
172 	unsigned long failures = 0;
173 	int i;
174 
175 	for (i = 0; i < BUFFER_NUM; i++) {
176 		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
177 		if (IS_ERR(buffers[i]) ||
178 		    !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
179 			failures++;
180 	}
181 
182 	return failures;
183 }
184 
185 static unsigned long binder_alloc_test_free_buf(struct kunit *test,
186 						struct binder_alloc *alloc,
187 						struct binder_buffer *buffers[],
188 						size_t *sizes, int *seq, size_t end)
189 {
190 	unsigned long failures = 0;
191 	int i;
192 
193 	for (i = 0; i < BUFFER_NUM; i++)
194 		binder_alloc_free_buf(alloc, buffers[seq[i]]);
195 
196 	for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
197 		if (list_empty(page_to_lru(alloc->pages[i]))) {
198 			kunit_err(test, "expect lru but is %s at page index %d\n",
199 				  alloc->pages[i] ? "alloc" : "free", i);
200 			failures++;
201 		}
202 	}
203 
204 	return failures;
205 }
206 
207 static unsigned long binder_alloc_test_free_page(struct kunit *test,
208 						 struct binder_alloc *alloc)
209 {
210 	unsigned long failures = 0;
211 	unsigned long count;
212 	int i;
213 
214 	while ((count = list_lru_count(alloc->freelist))) {
215 		list_lru_walk(alloc->freelist, binder_alloc_free_page,
216 			      NULL, count);
217 	}
218 
219 	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
220 		if (alloc->pages[i]) {
221 			kunit_err(test, "expect free but is %s at page index %d\n",
222 				  list_empty(page_to_lru(alloc->pages[i])) ?
223 				  "alloc" : "lru", i);
224 			failures++;
225 		}
226 	}
227 
228 	return failures;
229 }
230 
231 /* Executes one full test run for the given test case. */
232 static bool binder_alloc_test_alloc_free(struct kunit *test,
233 					 struct binder_alloc *alloc,
234 					 struct binder_alloc_test_case_info *tc,
235 					 size_t end)
236 {
237 	unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
238 	struct binder_buffer *buffers[BUFFER_NUM];
239 	unsigned long failures;
240 	bool failed = false;
241 
242 	failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
243 					       tc->buffer_sizes,
244 					       tc->free_sequence);
245 	failed = failed || failures;
246 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
247 			    "Initial allocation failed: %lu/%u buffers with errors",
248 			    failures, BUFFER_NUM);
249 
250 	failures = binder_alloc_test_free_buf(test, alloc, buffers,
251 					      tc->buffer_sizes,
252 					      tc->free_sequence, end);
253 	failed = failed || failures;
254 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
255 			    "Initial buffers not freed correctly: %lu/%lu pages not on lru list",
256 			    failures, pages);
257 
258 	/* Allocate from lru. */
259 	failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
260 					       tc->buffer_sizes,
261 					       tc->free_sequence);
262 	failed = failed || failures;
263 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
264 			    "Reallocation failed: %lu/%u buffers with errors",
265 			    failures, BUFFER_NUM);
266 
267 	failures = list_lru_count(alloc->freelist);
268 	failed = failed || failures;
269 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
270 			    "lru list should be empty after reallocation but still has %lu pages",
271 			    failures);
272 
273 	failures = binder_alloc_test_free_buf(test, alloc, buffers,
274 					      tc->buffer_sizes,
275 					      tc->free_sequence, end);
276 	failed = failed || failures;
277 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
278 			    "Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",
279 			    failures, pages);
280 
281 	failures = binder_alloc_test_free_page(test, alloc);
282 	failed = failed || failures;
283 	KUNIT_EXPECT_EQ_MSG(test, failures, 0,
284 			    "Failed to clean up allocated pages: %lu/%lu pages still installed",
285 			    failures, (alloc->buffer_size / PAGE_SIZE));
286 
287 	return failed;
288 }
289 
290 static bool is_dup(int *seq, int index, int val)
291 {
292 	int i;
293 
294 	for (i = 0; i < index; i++) {
295 		if (seq[i] == val)
296 			return true;
297 	}
298 	return false;
299 }
300 
301 /* Generate BUFFER_NUM factorial free orders. */
302 static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
303 			  struct binder_alloc_test_case_info *tc,
304 			  unsigned long *runs, unsigned long *failures,
305 			  int index, size_t end)
306 {
307 	bool case_failed;
308 	int i;
309 
310 	if (index == BUFFER_NUM) {
311 		char freeseq_buf[FREESEQ_BUFLEN];
312 
313 		case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
314 		*runs += 1;
315 		*failures += case_failed;
316 
317 		if (case_failed || PRINT_ALL_CASES) {
318 			stringify_free_seq(test, tc->free_sequence, freeseq_buf,
319 					   FREESEQ_BUFLEN);
320 			kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
321 				  case_failed ? "FAILED" : "PASSED",
322 				  tc->front_pages ? "front" : "back ",
323 				  tc->alignments, freeseq_buf);
324 		}
325 
326 		return;
327 	}
328 	for (i = 0; i < BUFFER_NUM; i++) {
329 		if (is_dup(tc->free_sequence, index, i))
330 			continue;
331 		tc->free_sequence[index] = i;
332 		permute_frees(test, alloc, tc, runs, failures, index + 1, end);
333 	}
334 }
335 
336 static void gen_buf_sizes(struct kunit *test,
337 			  struct binder_alloc *alloc,
338 			  struct binder_alloc_test_case_info *tc,
339 			  size_t *end_offset, unsigned long *runs,
340 			  unsigned long *failures)
341 {
342 	size_t last_offset, offset = 0;
343 	size_t front_sizes[BUFFER_NUM];
344 	size_t back_sizes[BUFFER_NUM];
345 	int seq[BUFFER_NUM] = {0};
346 	int i;
347 
348 	tc->free_sequence = seq;
349 	for (i = 0; i < BUFFER_NUM; i++) {
350 		last_offset = offset;
351 		offset = end_offset[i];
352 		front_sizes[i] = offset - last_offset;
353 		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
354 	}
355 	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
356 
357 	/*
358 	 * Buffers share the first or last few pages.
359 	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
360 	 * we need one giant buffer before getting to the last page.
361 	 */
362 	tc->front_pages = true;
363 	tc->buffer_sizes = front_sizes;
364 	permute_frees(test, alloc, tc, runs, failures, 0,
365 		      end_offset[BUFFER_NUM - 1]);
366 
367 	tc->front_pages = false;
368 	tc->buffer_sizes = back_sizes;
369 	permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
370 }
371 
372 static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
373 			    size_t *end_offset, int *alignments,
374 			    unsigned long *runs, unsigned long *failures,
375 			    int index)
376 {
377 	size_t end, prev;
378 	int align;
379 
380 	if (index == BUFFER_NUM) {
381 		struct binder_alloc_test_case_info tc = {0};
382 
383 		stringify_alignments(test, alignments, tc.alignments,
384 				     ALIGNMENTS_BUFLEN);
385 
386 		gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
387 		return;
388 	}
389 	prev = index == 0 ? 0 : end_offset[index - 1];
390 	end = prev;
391 
392 	BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
393 
394 	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
395 		if (align % 2)
396 			end = ALIGN(end, PAGE_SIZE);
397 		else
398 			end += BUFFER_MIN_SIZE;
399 		end_offset[index] = end;
400 		alignments[index] = align;
401 		gen_buf_offsets(test, alloc, end_offset, alignments, runs,
402 				failures, index + 1);
403 	}
404 }
405 
406 struct binder_alloc_test {
407 	struct binder_alloc alloc;
408 	struct list_lru binder_test_freelist;
409 	struct file *filp;
410 	unsigned long mmap_uaddr;
411 };
412 
413 static void binder_alloc_test_init_freelist(struct kunit *test)
414 {
415 	struct binder_alloc_test *priv = test->priv;
416 
417 	KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
418 			    &priv->binder_test_freelist);
419 }
420 
421 static void binder_alloc_test_mmap(struct kunit *test)
422 {
423 	struct binder_alloc_test *priv = test->priv;
424 	struct binder_alloc *alloc = &priv->alloc;
425 	struct binder_buffer *buf;
426 	struct rb_node *n;
427 
428 	KUNIT_EXPECT_EQ(test, alloc->mapped, true);
429 	KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
430 
431 	n = rb_first(&alloc->allocated_buffers);
432 	KUNIT_EXPECT_PTR_EQ(test, n, NULL);
433 
434 	n = rb_first(&alloc->free_buffers);
435 	buf = rb_entry(n, struct binder_buffer, rb_node);
436 	KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
437 			BINDER_MMAP_SIZE);
438 	KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
439 }
440 
441 /**
442  * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
443  * @test: The test context object.
444  *
445  * Allocate BUFFER_NUM buffers to cover all page alignment cases,
446  * then free them in all orders possible. Check that pages are
447  * correctly allocated, put onto lru when buffers are freed, and
448  * are freed when binder_alloc_free_page() is called.
449  */
450 static void binder_alloc_exhaustive_test(struct kunit *test)
451 {
452 	struct binder_alloc_test *priv = test->priv;
453 	size_t end_offset[BUFFER_NUM];
454 	int alignments[BUFFER_NUM];
455 	unsigned long failures = 0;
456 	unsigned long runs = 0;
457 
458 	gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
459 			&failures, 0);
460 
461 	KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
462 	KUNIT_EXPECT_EQ(test, failures, 0);
463 }
464 
465 /* ===== End test cases ===== */
466 
467 static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
468 {
469 	struct binder_alloc *alloc = vma->vm_private_data;
470 
471 	binder_alloc_vma_close(alloc);
472 }
473 
474 static const struct vm_operations_struct binder_alloc_test_vm_ops = {
475 	.close = binder_alloc_test_vma_close,
476 	.fault = binder_vm_fault,
477 };
478 
479 static int binder_alloc_test_mmap_handler(struct file *filp,
480 					  struct vm_area_struct *vma)
481 {
482 	struct binder_alloc *alloc = filp->private_data;
483 
484 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
485 
486 	vma->vm_ops = &binder_alloc_test_vm_ops;
487 	vma->vm_private_data = alloc;
488 
489 	return binder_alloc_mmap_handler(alloc, vma);
490 }
491 
492 static const struct file_operations binder_alloc_test_fops = {
493 	.mmap = binder_alloc_test_mmap_handler,
494 };
495 
496 static int binder_alloc_test_init(struct kunit *test)
497 {
498 	struct binder_alloc_test *priv;
499 	int ret;
500 
501 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
502 	if (!priv)
503 		return -ENOMEM;
504 	test->priv = priv;
505 
506 	ret = list_lru_init(&priv->binder_test_freelist);
507 	if (ret) {
508 		kunit_err(test, "Failed to initialize test freelist\n");
509 		return ret;
510 	}
511 
512 	/* __binder_alloc_init requires mm to be attached */
513 	ret = kunit_attach_mm();
514 	if (ret) {
515 		kunit_err(test, "Failed to attach mm\n");
516 		return ret;
517 	}
518 	__binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
519 
520 	priv->filp = anon_inode_getfile("binder_alloc_kunit",
521 					&binder_alloc_test_fops, &priv->alloc,
522 					O_RDWR | O_CLOEXEC);
523 	if (IS_ERR_OR_NULL(priv->filp)) {
524 		kunit_err(test, "Failed to open binder alloc test driver file\n");
525 		return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
526 	}
527 
528 	priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
529 					 PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
530 					 0);
531 	if (!priv->mmap_uaddr) {
532 		kunit_err(test, "Could not map the test's transaction memory\n");
533 		return -ENOMEM;
534 	}
535 
536 	return 0;
537 }
538 
539 static void binder_alloc_test_exit(struct kunit *test)
540 {
541 	struct binder_alloc_test *priv = test->priv;
542 
543 	/* Close the backing file to make sure binder_alloc_vma_close runs */
544 	if (!IS_ERR_OR_NULL(priv->filp))
545 		fput(priv->filp);
546 
547 	if (priv->alloc.mm)
548 		binder_alloc_deferred_release(&priv->alloc);
549 
550 	/* Make sure freelist is empty */
551 	KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
552 	list_lru_destroy(&priv->binder_test_freelist);
553 }
554 
555 static struct kunit_case binder_alloc_test_cases[] = {
556 	KUNIT_CASE(binder_alloc_test_init_freelist),
557 	KUNIT_CASE(binder_alloc_test_mmap),
558 	KUNIT_CASE(binder_alloc_exhaustive_test),
559 	{}
560 };
561 
562 static struct kunit_suite binder_alloc_test_suite = {
563 	.name = "binder_alloc",
564 	.test_cases = binder_alloc_test_cases,
565 	.init = binder_alloc_test_init,
566 	.exit = binder_alloc_test_exit,
567 };
568 
569 kunit_test_suite(binder_alloc_test_suite);
570 
571 MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
572 MODULE_DESCRIPTION("Binder Alloc KUnit tests");
573 MODULE_LICENSE("GPL");
574