xref: /linux/drivers/android/tests/binder_alloc_kunit.c (revision f6544dcdd0d2feb74f395072d8df52e3bea4be51)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for binder allocator code
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <kunit/test.h>
9 #include <linux/anon_inodes.h>
10 #include <linux/err.h>
11 #include <linux/file.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sizes.h>
16 
17 #include "../binder_alloc.h"
18 #include "../binder_internal.h"
19 
20 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
21 
22 #define BINDER_MMAP_SIZE SZ_128K
23 
24 #define BUFFER_NUM 5
25 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
26 
27 static int binder_alloc_test_failures;
28 
29 /**
30  * enum buf_end_align_type - Page alignment of a buffer
31  * end with regard to the end of the previous buffer.
32  *
33  * In the pictures below, buf2 refers to the buffer we
34  * are aligning. buf1 refers to previous buffer by addr.
35  * Symbol [ means the start of a buffer, ] means the end
36  * of a buffer, and | means page boundaries.
37  */
38 enum buf_end_align_type {
39 	/**
40 	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
41 	 * the same page as the end of the previous buffer and
42 	 * is not page aligned. Examples:
43 	 * buf1 ][ buf2 ][ ...
44 	 * buf1 ]|[ buf2 ][ ...
45 	 */
46 	SAME_PAGE_UNALIGNED = 0,
47 	/**
48 	 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
49 	 * is not page aligned, the end of this buffer is on the
50 	 * same page as the end of the previous buffer and is page
51 	 * aligned. When the previous buffer is page aligned, the
52 	 * end of this buffer is aligned to the next page boundary.
53 	 * Examples:
54 	 * buf1 ][ buf2 ]| ...
55 	 * buf1 ]|[ buf2 ]| ...
56 	 */
57 	SAME_PAGE_ALIGNED,
58 	/**
59 	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
60 	 * the page next to the end of the previous buffer and
61 	 * is not page aligned. Examples:
62 	 * buf1 ][ buf2 | buf2 ][ ...
63 	 * buf1 ]|[ buf2 | buf2 ][ ...
64 	 */
65 	NEXT_PAGE_UNALIGNED,
66 	/**
67 	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
68 	 * the page next to the end of the previous buffer and
69 	 * is page aligned. Examples:
70 	 * buf1 ][ buf2 | buf2 ]| ...
71 	 * buf1 ]|[ buf2 | buf2 ]| ...
72 	 */
73 	NEXT_PAGE_ALIGNED,
74 	/**
75 	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
76 	 * the page that follows the page after the end of the
77 	 * previous buffer and is not page aligned. Examples:
78 	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
79 	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
80 	 */
81 	NEXT_NEXT_UNALIGNED,
82 	/**
83 	 * @LOOP_END: The number of enum values in &buf_end_align_type.
84 	 * It is used for controlling loop termination.
85 	 */
86 	LOOP_END,
87 };
88 
89 static void pr_err_size_seq(struct kunit *test, size_t *sizes, int *seq)
90 {
91 	int i;
92 
93 	kunit_err(test, "alloc sizes: ");
94 	for (i = 0; i < BUFFER_NUM; i++)
95 		pr_cont("[%zu]", sizes[i]);
96 	pr_cont("\n");
97 	kunit_err(test, "free seq: ");
98 	for (i = 0; i < BUFFER_NUM; i++)
99 		pr_cont("[%d]", seq[i]);
100 	pr_cont("\n");
101 }
102 
103 static bool check_buffer_pages_allocated(struct kunit *test,
104 					 struct binder_alloc *alloc,
105 					 struct binder_buffer *buffer,
106 					 size_t size)
107 {
108 	unsigned long page_addr;
109 	unsigned long end;
110 	int page_index;
111 
112 	end = PAGE_ALIGN(buffer->user_data + size);
113 	page_addr = buffer->user_data;
114 	for (; page_addr < end; page_addr += PAGE_SIZE) {
115 		page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
116 		if (!alloc->pages[page_index] ||
117 		    !list_empty(page_to_lru(alloc->pages[page_index]))) {
118 			kunit_err(test, "expect alloc but is %s at page index %d\n",
119 				  alloc->pages[page_index] ?
120 				  "lru" : "free", page_index);
121 			return false;
122 		}
123 	}
124 	return true;
125 }
126 
127 static void binder_alloc_test_alloc_buf(struct kunit *test,
128 					struct binder_alloc *alloc,
129 					struct binder_buffer *buffers[],
130 					size_t *sizes, int *seq)
131 {
132 	int i;
133 
134 	for (i = 0; i < BUFFER_NUM; i++) {
135 		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
136 		if (IS_ERR(buffers[i]) ||
137 		    !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i])) {
138 			pr_err_size_seq(test, sizes, seq);
139 			binder_alloc_test_failures++;
140 		}
141 	}
142 }
143 
144 static void binder_alloc_test_free_buf(struct kunit *test,
145 				       struct binder_alloc *alloc,
146 				       struct binder_buffer *buffers[],
147 				       size_t *sizes, int *seq, size_t end)
148 {
149 	int i;
150 
151 	for (i = 0; i < BUFFER_NUM; i++)
152 		binder_alloc_free_buf(alloc, buffers[seq[i]]);
153 
154 	for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
155 		if (list_empty(page_to_lru(alloc->pages[i]))) {
156 			pr_err_size_seq(test, sizes, seq);
157 			kunit_err(test, "expect lru but is %s at page index %d\n",
158 				  alloc->pages[i] ? "alloc" : "free", i);
159 			binder_alloc_test_failures++;
160 		}
161 	}
162 }
163 
164 static void binder_alloc_test_free_page(struct kunit *test,
165 					struct binder_alloc *alloc)
166 {
167 	unsigned long count;
168 	int i;
169 
170 	while ((count = list_lru_count(alloc->freelist))) {
171 		list_lru_walk(alloc->freelist, binder_alloc_free_page,
172 			      NULL, count);
173 	}
174 
175 	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
176 		if (alloc->pages[i]) {
177 			kunit_err(test, "expect free but is %s at page index %d\n",
178 				  list_empty(page_to_lru(alloc->pages[i])) ?
179 				  "alloc" : "lru", i);
180 			binder_alloc_test_failures++;
181 		}
182 	}
183 }
184 
185 static void binder_alloc_test_alloc_free(struct kunit *test,
186 					 struct binder_alloc *alloc,
187 					 size_t *sizes, int *seq, size_t end)
188 {
189 	struct binder_buffer *buffers[BUFFER_NUM];
190 
191 	binder_alloc_test_alloc_buf(test, alloc, buffers, sizes, seq);
192 	binder_alloc_test_free_buf(test, alloc, buffers, sizes, seq, end);
193 
194 	/* Allocate from lru. */
195 	binder_alloc_test_alloc_buf(test, alloc, buffers, sizes, seq);
196 	if (list_lru_count(alloc->freelist))
197 		kunit_err(test, "lru list should be empty but is not\n");
198 
199 	binder_alloc_test_free_buf(test, alloc, buffers, sizes, seq, end);
200 	binder_alloc_test_free_page(test, alloc);
201 }
202 
203 static bool is_dup(int *seq, int index, int val)
204 {
205 	int i;
206 
207 	for (i = 0; i < index; i++) {
208 		if (seq[i] == val)
209 			return true;
210 	}
211 	return false;
212 }
213 
214 /* Generate BUFFER_NUM factorial free orders. */
215 static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
216 			  size_t *sizes, int *seq, int index, size_t end)
217 {
218 	int i;
219 
220 	if (index == BUFFER_NUM) {
221 		binder_alloc_test_alloc_free(test, alloc, sizes, seq, end);
222 		return;
223 	}
224 	for (i = 0; i < BUFFER_NUM; i++) {
225 		if (is_dup(seq, index, i))
226 			continue;
227 		seq[index] = i;
228 		permute_frees(test, alloc, sizes, seq, index + 1, end);
229 	}
230 }
231 
232 static void gen_buf_sizes(struct kunit *test, struct binder_alloc *alloc,
233 			  size_t *end_offset)
234 {
235 	size_t last_offset, offset = 0;
236 	size_t front_sizes[BUFFER_NUM];
237 	size_t back_sizes[BUFFER_NUM];
238 	int seq[BUFFER_NUM] = {0};
239 	int i;
240 
241 	for (i = 0; i < BUFFER_NUM; i++) {
242 		last_offset = offset;
243 		offset = end_offset[i];
244 		front_sizes[i] = offset - last_offset;
245 		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
246 	}
247 	/*
248 	 * Buffers share the first or last few pages.
249 	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
250 	 * we need one giant buffer before getting to the last page.
251 	 */
252 	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
253 	permute_frees(test, alloc, front_sizes, seq, 0,
254 		      end_offset[BUFFER_NUM - 1]);
255 	permute_frees(test, alloc, back_sizes, seq, 0, alloc->buffer_size);
256 }
257 
258 static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
259 			    size_t *end_offset, int index)
260 {
261 	size_t end, prev;
262 	int align;
263 
264 	if (index == BUFFER_NUM) {
265 		gen_buf_sizes(test, alloc, end_offset);
266 		return;
267 	}
268 	prev = index == 0 ? 0 : end_offset[index - 1];
269 	end = prev;
270 
271 	BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
272 
273 	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
274 		if (align % 2)
275 			end = ALIGN(end, PAGE_SIZE);
276 		else
277 			end += BUFFER_MIN_SIZE;
278 		end_offset[index] = end;
279 		gen_buf_offsets(test, alloc, end_offset, index + 1);
280 	}
281 }
282 
283 struct binder_alloc_test {
284 	struct binder_alloc alloc;
285 	struct list_lru binder_test_freelist;
286 	struct file *filp;
287 	unsigned long mmap_uaddr;
288 };
289 
290 static void binder_alloc_test_init_freelist(struct kunit *test)
291 {
292 	struct binder_alloc_test *priv = test->priv;
293 
294 	KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
295 			    &priv->binder_test_freelist);
296 }
297 
298 static void binder_alloc_test_mmap(struct kunit *test)
299 {
300 	struct binder_alloc_test *priv = test->priv;
301 	struct binder_alloc *alloc = &priv->alloc;
302 	struct binder_buffer *buf;
303 	struct rb_node *n;
304 
305 	KUNIT_EXPECT_EQ(test, alloc->mapped, true);
306 	KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
307 
308 	n = rb_first(&alloc->allocated_buffers);
309 	KUNIT_EXPECT_PTR_EQ(test, n, NULL);
310 
311 	n = rb_first(&alloc->free_buffers);
312 	buf = rb_entry(n, struct binder_buffer, rb_node);
313 	KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
314 			BINDER_MMAP_SIZE);
315 	KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
316 }
317 
318 /**
319  * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
320  * @test: The test context object.
321  *
322  * Allocate BUFFER_NUM buffers to cover all page alignment cases,
323  * then free them in all orders possible. Check that pages are
324  * correctly allocated, put onto lru when buffers are freed, and
325  * are freed when binder_alloc_free_page() is called.
326  */
327 static void binder_alloc_exhaustive_test(struct kunit *test)
328 {
329 	struct binder_alloc_test *priv = test->priv;
330 	size_t end_offset[BUFFER_NUM];
331 
332 	gen_buf_offsets(test, &priv->alloc, end_offset, 0);
333 
334 	KUNIT_EXPECT_EQ(test, binder_alloc_test_failures, 0);
335 }
336 
337 /* ===== End test cases ===== */
338 
339 static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
340 {
341 	struct binder_alloc *alloc = vma->vm_private_data;
342 
343 	binder_alloc_vma_close(alloc);
344 }
345 
346 static const struct vm_operations_struct binder_alloc_test_vm_ops = {
347 	.close = binder_alloc_test_vma_close,
348 	.fault = binder_vm_fault,
349 };
350 
351 static int binder_alloc_test_mmap_handler(struct file *filp,
352 					  struct vm_area_struct *vma)
353 {
354 	struct binder_alloc *alloc = filp->private_data;
355 
356 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
357 
358 	vma->vm_ops = &binder_alloc_test_vm_ops;
359 	vma->vm_private_data = alloc;
360 
361 	return binder_alloc_mmap_handler(alloc, vma);
362 }
363 
364 static const struct file_operations binder_alloc_test_fops = {
365 	.mmap = binder_alloc_test_mmap_handler,
366 };
367 
368 static int binder_alloc_test_init(struct kunit *test)
369 {
370 	struct binder_alloc_test *priv;
371 	int ret;
372 
373 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
374 	if (!priv)
375 		return -ENOMEM;
376 	test->priv = priv;
377 
378 	ret = list_lru_init(&priv->binder_test_freelist);
379 	if (ret) {
380 		kunit_err(test, "Failed to initialize test freelist\n");
381 		return ret;
382 	}
383 
384 	/* __binder_alloc_init requires mm to be attached */
385 	ret = kunit_attach_mm();
386 	if (ret) {
387 		kunit_err(test, "Failed to attach mm\n");
388 		return ret;
389 	}
390 	__binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
391 
392 	priv->filp = anon_inode_getfile("binder_alloc_kunit",
393 					&binder_alloc_test_fops, &priv->alloc,
394 					O_RDWR | O_CLOEXEC);
395 	if (IS_ERR_OR_NULL(priv->filp)) {
396 		kunit_err(test, "Failed to open binder alloc test driver file\n");
397 		return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
398 	}
399 
400 	priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
401 					 PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
402 					 0);
403 	if (!priv->mmap_uaddr) {
404 		kunit_err(test, "Could not map the test's transaction memory\n");
405 		return -ENOMEM;
406 	}
407 
408 	return 0;
409 }
410 
411 static void binder_alloc_test_exit(struct kunit *test)
412 {
413 	struct binder_alloc_test *priv = test->priv;
414 
415 	/* Close the backing file to make sure binder_alloc_vma_close runs */
416 	if (!IS_ERR_OR_NULL(priv->filp))
417 		fput(priv->filp);
418 
419 	if (priv->alloc.mm)
420 		binder_alloc_deferred_release(&priv->alloc);
421 
422 	/* Make sure freelist is empty */
423 	KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
424 	list_lru_destroy(&priv->binder_test_freelist);
425 }
426 
427 static struct kunit_case binder_alloc_test_cases[] = {
428 	KUNIT_CASE(binder_alloc_test_init_freelist),
429 	KUNIT_CASE(binder_alloc_test_mmap),
430 	KUNIT_CASE(binder_alloc_exhaustive_test),
431 	{}
432 };
433 
434 static struct kunit_suite binder_alloc_test_suite = {
435 	.name = "binder_alloc",
436 	.test_cases = binder_alloc_test_cases,
437 	.init = binder_alloc_test_init,
438 	.exit = binder_alloc_test_exit,
439 };
440 
441 kunit_test_suite(binder_alloc_test_suite);
442 
443 MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
444 MODULE_DESCRIPTION("Binder Alloc KUnit tests");
445 MODULE_LICENSE("GPL");
446