xref: /linux/drivers/android/binder_alloc_selftest.c (revision c0fd2101781ef761b636769b2f445351f71c3626)
19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24175e2b4SSherry Yang /* binder_alloc_selftest.c
34175e2b4SSherry Yang  *
44175e2b4SSherry Yang  * Android IPC Subsystem
54175e2b4SSherry Yang  *
64175e2b4SSherry Yang  * Copyright (C) 2017 Google, Inc.
74175e2b4SSherry Yang  */
84175e2b4SSherry Yang 
94175e2b4SSherry Yang #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
104175e2b4SSherry Yang 
114175e2b4SSherry Yang #include <linux/mm_types.h>
124175e2b4SSherry Yang #include <linux/err.h>
134175e2b4SSherry Yang #include "binder_alloc.h"
144175e2b4SSherry Yang 
154175e2b4SSherry Yang #define BUFFER_NUM 5
164175e2b4SSherry Yang #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
174175e2b4SSherry Yang 
184175e2b4SSherry Yang static bool binder_selftest_run = true;
194175e2b4SSherry Yang static int binder_selftest_failures;
204175e2b4SSherry Yang static DEFINE_MUTEX(binder_selftest_lock);
214175e2b4SSherry Yang 
224175e2b4SSherry Yang /**
234175e2b4SSherry Yang  * enum buf_end_align_type - Page alignment of a buffer
244175e2b4SSherry Yang  * end with regard to the end of the previous buffer.
254175e2b4SSherry Yang  *
264175e2b4SSherry Yang  * In the pictures below, buf2 refers to the buffer we
274175e2b4SSherry Yang  * are aligning. buf1 refers to previous buffer by addr.
284175e2b4SSherry Yang  * Symbol [ means the start of a buffer, ] means the end
294175e2b4SSherry Yang  * of a buffer, and | means page boundaries.
304175e2b4SSherry Yang  */
314175e2b4SSherry Yang enum buf_end_align_type {
324175e2b4SSherry Yang 	/**
334175e2b4SSherry Yang 	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
344175e2b4SSherry Yang 	 * the same page as the end of the previous buffer and
354175e2b4SSherry Yang 	 * is not page aligned. Examples:
364175e2b4SSherry Yang 	 * buf1 ][ buf2 ][ ...
374175e2b4SSherry Yang 	 * buf1 ]|[ buf2 ][ ...
384175e2b4SSherry Yang 	 */
394175e2b4SSherry Yang 	SAME_PAGE_UNALIGNED = 0,
404175e2b4SSherry Yang 	/**
414175e2b4SSherry Yang 	 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
424175e2b4SSherry Yang 	 * is not page aligned, the end of this buffer is on the
434175e2b4SSherry Yang 	 * same page as the end of the previous buffer and is page
444175e2b4SSherry Yang 	 * aligned. When the previous buffer is page aligned, the
454175e2b4SSherry Yang 	 * end of this buffer is aligned to the next page boundary.
464175e2b4SSherry Yang 	 * Examples:
474175e2b4SSherry Yang 	 * buf1 ][ buf2 ]| ...
484175e2b4SSherry Yang 	 * buf1 ]|[ buf2 ]| ...
494175e2b4SSherry Yang 	 */
504175e2b4SSherry Yang 	SAME_PAGE_ALIGNED,
514175e2b4SSherry Yang 	/**
524175e2b4SSherry Yang 	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
534175e2b4SSherry Yang 	 * the page next to the end of the previous buffer and
544175e2b4SSherry Yang 	 * is not page aligned. Examples:
554175e2b4SSherry Yang 	 * buf1 ][ buf2 | buf2 ][ ...
564175e2b4SSherry Yang 	 * buf1 ]|[ buf2 | buf2 ][ ...
574175e2b4SSherry Yang 	 */
584175e2b4SSherry Yang 	NEXT_PAGE_UNALIGNED,
594175e2b4SSherry Yang 	/**
604175e2b4SSherry Yang 	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
614175e2b4SSherry Yang 	 * the page next to the end of the previous buffer and
624175e2b4SSherry Yang 	 * is page aligned. Examples:
634175e2b4SSherry Yang 	 * buf1 ][ buf2 | buf2 ]| ...
644175e2b4SSherry Yang 	 * buf1 ]|[ buf2 | buf2 ]| ...
654175e2b4SSherry Yang 	 */
664175e2b4SSherry Yang 	NEXT_PAGE_ALIGNED,
674175e2b4SSherry Yang 	/**
684175e2b4SSherry Yang 	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
694175e2b4SSherry Yang 	 * the page that follows the page after the end of the
704175e2b4SSherry Yang 	 * previous buffer and is not page aligned. Examples:
714175e2b4SSherry Yang 	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
724175e2b4SSherry Yang 	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
734175e2b4SSherry Yang 	 */
744175e2b4SSherry Yang 	NEXT_NEXT_UNALIGNED,
754175e2b4SSherry Yang 	LOOP_END,
764175e2b4SSherry Yang };
774175e2b4SSherry Yang 
784175e2b4SSherry Yang static void pr_err_size_seq(size_t *sizes, int *seq)
794175e2b4SSherry Yang {
804175e2b4SSherry Yang 	int i;
814175e2b4SSherry Yang 
824175e2b4SSherry Yang 	pr_err("alloc sizes: ");
834175e2b4SSherry Yang 	for (i = 0; i < BUFFER_NUM; i++)
844175e2b4SSherry Yang 		pr_cont("[%zu]", sizes[i]);
854175e2b4SSherry Yang 	pr_cont("\n");
864175e2b4SSherry Yang 	pr_err("free seq: ");
874175e2b4SSherry Yang 	for (i = 0; i < BUFFER_NUM; i++)
884175e2b4SSherry Yang 		pr_cont("[%d]", seq[i]);
894175e2b4SSherry Yang 	pr_cont("\n");
904175e2b4SSherry Yang }
914175e2b4SSherry Yang 
924175e2b4SSherry Yang static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
934175e2b4SSherry Yang 					 struct binder_buffer *buffer,
944175e2b4SSherry Yang 					 size_t size)
954175e2b4SSherry Yang {
9636f30937STodd Kjos 	void __user *page_addr;
9736f30937STodd Kjos 	void __user *end;
984175e2b4SSherry Yang 	int page_index;
994175e2b4SSherry Yang 
10036f30937STodd Kjos 	end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
101bde4a19fSTodd Kjos 	page_addr = buffer->user_data;
10274310e06SSherry Yang 	for (; page_addr < end; page_addr += PAGE_SIZE) {
1034175e2b4SSherry Yang 		page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
104f2517eb7SSherry Yang 		if (!alloc->pages[page_index].page_ptr ||
105f2517eb7SSherry Yang 		    !list_empty(&alloc->pages[page_index].lru)) {
106f2517eb7SSherry Yang 			pr_err("expect alloc but is %s at page index %d\n",
107f2517eb7SSherry Yang 			       alloc->pages[page_index].page_ptr ?
108f2517eb7SSherry Yang 			       "lru" : "free", page_index);
1094175e2b4SSherry Yang 			return false;
1104175e2b4SSherry Yang 		}
1114175e2b4SSherry Yang 	}
1124175e2b4SSherry Yang 	return true;
1134175e2b4SSherry Yang }
1144175e2b4SSherry Yang 
1154175e2b4SSherry Yang static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
1164175e2b4SSherry Yang 				      struct binder_buffer *buffers[],
1174175e2b4SSherry Yang 				      size_t *sizes, int *seq)
1184175e2b4SSherry Yang {
1194175e2b4SSherry Yang 	int i;
1204175e2b4SSherry Yang 
1214175e2b4SSherry Yang 	for (i = 0; i < BUFFER_NUM; i++) {
122261e7818SMartijn Coenen 		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
1234175e2b4SSherry Yang 		if (IS_ERR(buffers[i]) ||
1244175e2b4SSherry Yang 		    !check_buffer_pages_allocated(alloc, buffers[i],
1254175e2b4SSherry Yang 						  sizes[i])) {
1264175e2b4SSherry Yang 			pr_err_size_seq(sizes, seq);
1274175e2b4SSherry Yang 			binder_selftest_failures++;
1284175e2b4SSherry Yang 		}
1294175e2b4SSherry Yang 	}
1304175e2b4SSherry Yang }
1314175e2b4SSherry Yang 
1324175e2b4SSherry Yang static void binder_selftest_free_buf(struct binder_alloc *alloc,
1334175e2b4SSherry Yang 				     struct binder_buffer *buffers[],
134f2517eb7SSherry Yang 				     size_t *sizes, int *seq, size_t end)
1354175e2b4SSherry Yang {
1364175e2b4SSherry Yang 	int i;
1374175e2b4SSherry Yang 
1384175e2b4SSherry Yang 	for (i = 0; i < BUFFER_NUM; i++)
1394175e2b4SSherry Yang 		binder_alloc_free_buf(alloc, buffers[seq[i]]);
1404175e2b4SSherry Yang 
141f2517eb7SSherry Yang 	for (i = 0; i < end / PAGE_SIZE; i++) {
142f2517eb7SSherry Yang 		/**
143f2517eb7SSherry Yang 		 * Error message on a free page can be false positive
144f2517eb7SSherry Yang 		 * if binder shrinker ran during binder_alloc_free_buf
145f2517eb7SSherry Yang 		 * calls above.
146f2517eb7SSherry Yang 		 */
147f2517eb7SSherry Yang 		if (list_empty(&alloc->pages[i].lru)) {
148f2517eb7SSherry Yang 			pr_err_size_seq(sizes, seq);
149f2517eb7SSherry Yang 			pr_err("expect lru but is %s at page index %d\n",
150f2517eb7SSherry Yang 			       alloc->pages[i].page_ptr ? "alloc" : "free", i);
151f2517eb7SSherry Yang 			binder_selftest_failures++;
152f2517eb7SSherry Yang 		}
153f2517eb7SSherry Yang 	}
154f2517eb7SSherry Yang }
155f2517eb7SSherry Yang 
156f2517eb7SSherry Yang static void binder_selftest_free_page(struct binder_alloc *alloc)
157f2517eb7SSherry Yang {
158f2517eb7SSherry Yang 	int i;
159f2517eb7SSherry Yang 	unsigned long count;
160f2517eb7SSherry Yang 
161f2517eb7SSherry Yang 	while ((count = list_lru_count(&binder_alloc_lru))) {
162f2517eb7SSherry Yang 		list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
163f2517eb7SSherry Yang 			      NULL, count);
164f2517eb7SSherry Yang 	}
165f2517eb7SSherry Yang 
1664175e2b4SSherry Yang 	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
167f2517eb7SSherry Yang 		if (alloc->pages[i].page_ptr) {
168f2517eb7SSherry Yang 			pr_err("expect free but is %s at page index %d\n",
169f2517eb7SSherry Yang 			       list_empty(&alloc->pages[i].lru) ?
170f2517eb7SSherry Yang 			       "alloc" : "lru", i);
1714175e2b4SSherry Yang 			binder_selftest_failures++;
1724175e2b4SSherry Yang 		}
1734175e2b4SSherry Yang 	}
1744175e2b4SSherry Yang }
1754175e2b4SSherry Yang 
1764175e2b4SSherry Yang static void binder_selftest_alloc_free(struct binder_alloc *alloc,
177f2517eb7SSherry Yang 				       size_t *sizes, int *seq, size_t end)
1784175e2b4SSherry Yang {
1794175e2b4SSherry Yang 	struct binder_buffer *buffers[BUFFER_NUM];
1804175e2b4SSherry Yang 
1814175e2b4SSherry Yang 	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
182f2517eb7SSherry Yang 	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
183f2517eb7SSherry Yang 
184f2517eb7SSherry Yang 	/* Allocate from lru. */
185f2517eb7SSherry Yang 	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
186f2517eb7SSherry Yang 	if (list_lru_count(&binder_alloc_lru))
187f2517eb7SSherry Yang 		pr_err("lru list should be empty but is not\n");
188f2517eb7SSherry Yang 
189f2517eb7SSherry Yang 	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
190f2517eb7SSherry Yang 	binder_selftest_free_page(alloc);
1914175e2b4SSherry Yang }
1924175e2b4SSherry Yang 
1934175e2b4SSherry Yang static bool is_dup(int *seq, int index, int val)
1944175e2b4SSherry Yang {
1954175e2b4SSherry Yang 	int i;
1964175e2b4SSherry Yang 
1974175e2b4SSherry Yang 	for (i = 0; i < index; i++) {
1984175e2b4SSherry Yang 		if (seq[i] == val)
1994175e2b4SSherry Yang 			return true;
2004175e2b4SSherry Yang 	}
2014175e2b4SSherry Yang 	return false;
2024175e2b4SSherry Yang }
2034175e2b4SSherry Yang 
2044175e2b4SSherry Yang /* Generate BUFFER_NUM factorial free orders. */
2054175e2b4SSherry Yang static void binder_selftest_free_seq(struct binder_alloc *alloc,
206f2517eb7SSherry Yang 				     size_t *sizes, int *seq,
207f2517eb7SSherry Yang 				     int index, size_t end)
2084175e2b4SSherry Yang {
2094175e2b4SSherry Yang 	int i;
2104175e2b4SSherry Yang 
2114175e2b4SSherry Yang 	if (index == BUFFER_NUM) {
212f2517eb7SSherry Yang 		binder_selftest_alloc_free(alloc, sizes, seq, end);
2134175e2b4SSherry Yang 		return;
2144175e2b4SSherry Yang 	}
2154175e2b4SSherry Yang 	for (i = 0; i < BUFFER_NUM; i++) {
2164175e2b4SSherry Yang 		if (is_dup(seq, index, i))
2174175e2b4SSherry Yang 			continue;
2184175e2b4SSherry Yang 		seq[index] = i;
219f2517eb7SSherry Yang 		binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
2204175e2b4SSherry Yang 	}
2214175e2b4SSherry Yang }
2224175e2b4SSherry Yang 
2234175e2b4SSherry Yang static void binder_selftest_alloc_size(struct binder_alloc *alloc,
2244175e2b4SSherry Yang 				       size_t *end_offset)
2254175e2b4SSherry Yang {
2264175e2b4SSherry Yang 	int i;
2274175e2b4SSherry Yang 	int seq[BUFFER_NUM] = {0};
2284175e2b4SSherry Yang 	size_t front_sizes[BUFFER_NUM];
2294175e2b4SSherry Yang 	size_t back_sizes[BUFFER_NUM];
2304175e2b4SSherry Yang 	size_t last_offset, offset = 0;
2314175e2b4SSherry Yang 
2324175e2b4SSherry Yang 	for (i = 0; i < BUFFER_NUM; i++) {
2334175e2b4SSherry Yang 		last_offset = offset;
2344175e2b4SSherry Yang 		offset = end_offset[i];
2354175e2b4SSherry Yang 		front_sizes[i] = offset - last_offset;
2364175e2b4SSherry Yang 		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
2374175e2b4SSherry Yang 	}
2384175e2b4SSherry Yang 	/*
2394175e2b4SSherry Yang 	 * Buffers share the first or last few pages.
2404175e2b4SSherry Yang 	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
2414175e2b4SSherry Yang 	 * we need one giant buffer before getting to the last page.
2424175e2b4SSherry Yang 	 */
24374310e06SSherry Yang 	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
244f2517eb7SSherry Yang 	binder_selftest_free_seq(alloc, front_sizes, seq, 0,
245f2517eb7SSherry Yang 				 end_offset[BUFFER_NUM - 1]);
246f2517eb7SSherry Yang 	binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
2474175e2b4SSherry Yang }
2484175e2b4SSherry Yang 
2494175e2b4SSherry Yang static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
2504175e2b4SSherry Yang 					 size_t *end_offset, int index)
2514175e2b4SSherry Yang {
2524175e2b4SSherry Yang 	int align;
2534175e2b4SSherry Yang 	size_t end, prev;
2544175e2b4SSherry Yang 
2554175e2b4SSherry Yang 	if (index == BUFFER_NUM) {
2564175e2b4SSherry Yang 		binder_selftest_alloc_size(alloc, end_offset);
2574175e2b4SSherry Yang 		return;
2584175e2b4SSherry Yang 	}
2594175e2b4SSherry Yang 	prev = index == 0 ? 0 : end_offset[index - 1];
2604175e2b4SSherry Yang 	end = prev;
2614175e2b4SSherry Yang 
26274310e06SSherry Yang 	BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
2634175e2b4SSherry Yang 
2644175e2b4SSherry Yang 	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
2654175e2b4SSherry Yang 		if (align % 2)
2664175e2b4SSherry Yang 			end = ALIGN(end, PAGE_SIZE);
2674175e2b4SSherry Yang 		else
2684175e2b4SSherry Yang 			end += BUFFER_MIN_SIZE;
2694175e2b4SSherry Yang 		end_offset[index] = end;
2704175e2b4SSherry Yang 		binder_selftest_alloc_offset(alloc, end_offset, index + 1);
2714175e2b4SSherry Yang 	}
2724175e2b4SSherry Yang }
2734175e2b4SSherry Yang 
2744175e2b4SSherry Yang /**
2754175e2b4SSherry Yang  * binder_selftest_alloc() - Test alloc and free of buffer pages.
2764175e2b4SSherry Yang  * @alloc: Pointer to alloc struct.
2774175e2b4SSherry Yang  *
2784175e2b4SSherry Yang  * Allocate BUFFER_NUM buffers to cover all page alignment cases,
2794175e2b4SSherry Yang  * then free them in all orders possible. Check that pages are
280f2517eb7SSherry Yang  * correctly allocated, put onto lru when buffers are freed, and
281f2517eb7SSherry Yang  * are freed when binder_alloc_free_page is called.
2824175e2b4SSherry Yang  */
2834175e2b4SSherry Yang void binder_selftest_alloc(struct binder_alloc *alloc)
2844175e2b4SSherry Yang {
2854175e2b4SSherry Yang 	size_t end_offset[BUFFER_NUM];
2864175e2b4SSherry Yang 
2874175e2b4SSherry Yang 	if (!binder_selftest_run)
2884175e2b4SSherry Yang 		return;
2894175e2b4SSherry Yang 	mutex_lock(&binder_selftest_lock);
290*c0fd2101SCarlos Llamas 	if (!binder_selftest_run || !alloc->vma)
2914175e2b4SSherry Yang 		goto done;
2924175e2b4SSherry Yang 	pr_info("STARTED\n");
2934175e2b4SSherry Yang 	binder_selftest_alloc_offset(alloc, end_offset, 0);
2944175e2b4SSherry Yang 	binder_selftest_run = false;
2954175e2b4SSherry Yang 	if (binder_selftest_failures > 0)
2964175e2b4SSherry Yang 		pr_info("%d tests FAILED\n", binder_selftest_failures);
2974175e2b4SSherry Yang 	else
2984175e2b4SSherry Yang 		pr_info("PASSED\n");
2994175e2b4SSherry Yang 
3004175e2b4SSherry Yang done:
3014175e2b4SSherry Yang 	mutex_unlock(&binder_selftest_lock);
3024175e2b4SSherry Yang }
303