1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder_alloc_selftest.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2017 Google, Inc. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/mm_types.h> 12 #include <linux/err.h> 13 #include "binder_alloc.h" 14 15 #define BUFFER_NUM 5 16 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8) 17 18 static bool binder_selftest_run = true; 19 static int binder_selftest_failures; 20 static DEFINE_MUTEX(binder_selftest_lock); 21 22 /** 23 * enum buf_end_align_type - Page alignment of a buffer 24 * end with regard to the end of the previous buffer. 25 * 26 * In the pictures below, buf2 refers to the buffer we 27 * are aligning. buf1 refers to previous buffer by addr. 28 * Symbol [ means the start of a buffer, ] means the end 29 * of a buffer, and | means page boundaries. 30 */ 31 enum buf_end_align_type { 32 /** 33 * @SAME_PAGE_UNALIGNED: The end of this buffer is on 34 * the same page as the end of the previous buffer and 35 * is not page aligned. Examples: 36 * buf1 ][ buf2 ][ ... 37 * buf1 ]|[ buf2 ][ ... 38 */ 39 SAME_PAGE_UNALIGNED = 0, 40 /** 41 * @SAME_PAGE_ALIGNED: When the end of the previous buffer 42 * is not page aligned, the end of this buffer is on the 43 * same page as the end of the previous buffer and is page 44 * aligned. When the previous buffer is page aligned, the 45 * end of this buffer is aligned to the next page boundary. 46 * Examples: 47 * buf1 ][ buf2 ]| ... 48 * buf1 ]|[ buf2 ]| ... 49 */ 50 SAME_PAGE_ALIGNED, 51 /** 52 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on 53 * the page next to the end of the previous buffer and 54 * is not page aligned. Examples: 55 * buf1 ][ buf2 | buf2 ][ ... 56 * buf1 ]|[ buf2 | buf2 ][ ... 57 */ 58 NEXT_PAGE_UNALIGNED, 59 /** 60 * @NEXT_PAGE_ALIGNED: The end of this buffer is on 61 * the page next to the end of the previous buffer and 62 * is page aligned. Examples: 63 * buf1 ][ buf2 | buf2 ]| ... 64 * buf1 ]|[ buf2 | buf2 ]| ... 65 */ 66 NEXT_PAGE_ALIGNED, 67 /** 68 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on 69 * the page that follows the page after the end of the 70 * previous buffer and is not page aligned. Examples: 71 * buf1 ][ buf2 | buf2 | buf2 ][ ... 72 * buf1 ]|[ buf2 | buf2 | buf2 ][ ... 73 */ 74 NEXT_NEXT_UNALIGNED, 75 /** 76 * @LOOP_END: The number of enum values in &buf_end_align_type. 77 * It is used for controlling loop termination. 78 */ 79 LOOP_END, 80 }; 81 82 static void pr_err_size_seq(size_t *sizes, int *seq) 83 { 84 int i; 85 86 pr_err("alloc sizes: "); 87 for (i = 0; i < BUFFER_NUM; i++) 88 pr_cont("[%zu]", sizes[i]); 89 pr_cont("\n"); 90 pr_err("free seq: "); 91 for (i = 0; i < BUFFER_NUM; i++) 92 pr_cont("[%d]", seq[i]); 93 pr_cont("\n"); 94 } 95 96 static bool check_buffer_pages_allocated(struct binder_alloc *alloc, 97 struct binder_buffer *buffer, 98 size_t size) 99 { 100 unsigned long page_addr; 101 unsigned long end; 102 int page_index; 103 104 end = PAGE_ALIGN(buffer->user_data + size); 105 page_addr = buffer->user_data; 106 for (; page_addr < end; page_addr += PAGE_SIZE) { 107 page_index = (page_addr - alloc->buffer) / PAGE_SIZE; 108 if (!alloc->pages[page_index].page_ptr || 109 !list_empty(&alloc->pages[page_index].lru)) { 110 pr_err("expect alloc but is %s at page index %d\n", 111 alloc->pages[page_index].page_ptr ? 112 "lru" : "free", page_index); 113 return false; 114 } 115 } 116 return true; 117 } 118 119 static void binder_selftest_alloc_buf(struct binder_alloc *alloc, 120 struct binder_buffer *buffers[], 121 size_t *sizes, int *seq) 122 { 123 int i; 124 125 for (i = 0; i < BUFFER_NUM; i++) { 126 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); 127 if (IS_ERR(buffers[i]) || 128 !check_buffer_pages_allocated(alloc, buffers[i], 129 sizes[i])) { 130 pr_err_size_seq(sizes, seq); 131 binder_selftest_failures++; 132 } 133 } 134 } 135 136 static void binder_selftest_free_buf(struct binder_alloc *alloc, 137 struct binder_buffer *buffers[], 138 size_t *sizes, int *seq, size_t end) 139 { 140 int i; 141 142 for (i = 0; i < BUFFER_NUM; i++) 143 binder_alloc_free_buf(alloc, buffers[seq[i]]); 144 145 for (i = 0; i < end / PAGE_SIZE; i++) { 146 /** 147 * Error message on a free page can be false positive 148 * if binder shrinker ran during binder_alloc_free_buf 149 * calls above. 150 */ 151 if (list_empty(&alloc->pages[i].lru)) { 152 pr_err_size_seq(sizes, seq); 153 pr_err("expect lru but is %s at page index %d\n", 154 alloc->pages[i].page_ptr ? "alloc" : "free", i); 155 binder_selftest_failures++; 156 } 157 } 158 } 159 160 static void binder_selftest_free_page(struct binder_alloc *alloc) 161 { 162 int i; 163 unsigned long count; 164 165 while ((count = list_lru_count(&binder_freelist))) { 166 list_lru_walk(&binder_freelist, binder_alloc_free_page, 167 NULL, count); 168 } 169 170 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { 171 if (alloc->pages[i].page_ptr) { 172 pr_err("expect free but is %s at page index %d\n", 173 list_empty(&alloc->pages[i].lru) ? 174 "alloc" : "lru", i); 175 binder_selftest_failures++; 176 } 177 } 178 } 179 180 static void binder_selftest_alloc_free(struct binder_alloc *alloc, 181 size_t *sizes, int *seq, size_t end) 182 { 183 struct binder_buffer *buffers[BUFFER_NUM]; 184 185 binder_selftest_alloc_buf(alloc, buffers, sizes, seq); 186 binder_selftest_free_buf(alloc, buffers, sizes, seq, end); 187 188 /* Allocate from lru. */ 189 binder_selftest_alloc_buf(alloc, buffers, sizes, seq); 190 if (list_lru_count(&binder_freelist)) 191 pr_err("lru list should be empty but is not\n"); 192 193 binder_selftest_free_buf(alloc, buffers, sizes, seq, end); 194 binder_selftest_free_page(alloc); 195 } 196 197 static bool is_dup(int *seq, int index, int val) 198 { 199 int i; 200 201 for (i = 0; i < index; i++) { 202 if (seq[i] == val) 203 return true; 204 } 205 return false; 206 } 207 208 /* Generate BUFFER_NUM factorial free orders. */ 209 static void binder_selftest_free_seq(struct binder_alloc *alloc, 210 size_t *sizes, int *seq, 211 int index, size_t end) 212 { 213 int i; 214 215 if (index == BUFFER_NUM) { 216 binder_selftest_alloc_free(alloc, sizes, seq, end); 217 return; 218 } 219 for (i = 0; i < BUFFER_NUM; i++) { 220 if (is_dup(seq, index, i)) 221 continue; 222 seq[index] = i; 223 binder_selftest_free_seq(alloc, sizes, seq, index + 1, end); 224 } 225 } 226 227 static void binder_selftest_alloc_size(struct binder_alloc *alloc, 228 size_t *end_offset) 229 { 230 int i; 231 int seq[BUFFER_NUM] = {0}; 232 size_t front_sizes[BUFFER_NUM]; 233 size_t back_sizes[BUFFER_NUM]; 234 size_t last_offset, offset = 0; 235 236 for (i = 0; i < BUFFER_NUM; i++) { 237 last_offset = offset; 238 offset = end_offset[i]; 239 front_sizes[i] = offset - last_offset; 240 back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; 241 } 242 /* 243 * Buffers share the first or last few pages. 244 * Only BUFFER_NUM - 1 buffer sizes are adjustable since 245 * we need one giant buffer before getting to the last page. 246 */ 247 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; 248 binder_selftest_free_seq(alloc, front_sizes, seq, 0, 249 end_offset[BUFFER_NUM - 1]); 250 binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size); 251 } 252 253 static void binder_selftest_alloc_offset(struct binder_alloc *alloc, 254 size_t *end_offset, int index) 255 { 256 int align; 257 size_t end, prev; 258 259 if (index == BUFFER_NUM) { 260 binder_selftest_alloc_size(alloc, end_offset); 261 return; 262 } 263 prev = index == 0 ? 0 : end_offset[index - 1]; 264 end = prev; 265 266 BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE); 267 268 for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { 269 if (align % 2) 270 end = ALIGN(end, PAGE_SIZE); 271 else 272 end += BUFFER_MIN_SIZE; 273 end_offset[index] = end; 274 binder_selftest_alloc_offset(alloc, end_offset, index + 1); 275 } 276 } 277 278 /** 279 * binder_selftest_alloc() - Test alloc and free of buffer pages. 280 * @alloc: Pointer to alloc struct. 281 * 282 * Allocate BUFFER_NUM buffers to cover all page alignment cases, 283 * then free them in all orders possible. Check that pages are 284 * correctly allocated, put onto lru when buffers are freed, and 285 * are freed when binder_alloc_free_page is called. 286 */ 287 void binder_selftest_alloc(struct binder_alloc *alloc) 288 { 289 size_t end_offset[BUFFER_NUM]; 290 291 if (!binder_selftest_run) 292 return; 293 mutex_lock(&binder_selftest_lock); 294 if (!binder_selftest_run || !alloc->vma) 295 goto done; 296 pr_info("STARTED\n"); 297 binder_selftest_alloc_offset(alloc, end_offset, 0); 298 binder_selftest_run = false; 299 if (binder_selftest_failures > 0) 300 pr_info("%d tests FAILED\n", binder_selftest_failures); 301 else 302 pr_info("PASSED\n"); 303 304 done: 305 mutex_unlock(&binder_selftest_lock); 306 } 307