Lines Matching refs:alloc
141 struct binder_alloc *alloc, in check_buffer_pages_allocated() argument
152 page_index = (page_addr - alloc->vm_start) / PAGE_SIZE; in check_buffer_pages_allocated()
153 if (!alloc->pages[page_index] || in check_buffer_pages_allocated()
154 !list_empty(page_to_lru(alloc->pages[page_index]))) { in check_buffer_pages_allocated()
156 alloc->pages[page_index] ? in check_buffer_pages_allocated()
165 struct binder_alloc *alloc, in binder_alloc_test_alloc_buf() argument
173 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); in binder_alloc_test_alloc_buf()
175 !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i])) in binder_alloc_test_alloc_buf()
183 struct binder_alloc *alloc, in binder_alloc_test_free_buf() argument
191 binder_alloc_free_buf(alloc, buffers[seq[i]]); in binder_alloc_test_free_buf()
194 if (list_empty(page_to_lru(alloc->pages[i]))) { in binder_alloc_test_free_buf()
196 alloc->pages[i] ? "alloc" : "free", i); in binder_alloc_test_free_buf()
205 struct binder_alloc *alloc) in binder_alloc_test_free_page() argument
211 while ((count = list_lru_count(alloc->freelist))) { in binder_alloc_test_free_page()
212 list_lru_walk(alloc->freelist, binder_alloc_free_page, in binder_alloc_test_free_page()
216 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { in binder_alloc_test_free_page()
217 if (alloc->pages[i]) { in binder_alloc_test_free_page()
219 list_empty(page_to_lru(alloc->pages[i])) ? in binder_alloc_test_free_page()
230 struct binder_alloc *alloc, in binder_alloc_test_alloc_free() argument
239 failures = binder_alloc_test_alloc_buf(test, alloc, buffers, in binder_alloc_test_alloc_free()
247 failures = binder_alloc_test_free_buf(test, alloc, buffers, in binder_alloc_test_alloc_free()
256 failures = binder_alloc_test_alloc_buf(test, alloc, buffers, in binder_alloc_test_alloc_free()
264 failures = list_lru_count(alloc->freelist); in binder_alloc_test_alloc_free()
270 failures = binder_alloc_test_free_buf(test, alloc, buffers, in binder_alloc_test_alloc_free()
278 failures = binder_alloc_test_free_page(test, alloc); in binder_alloc_test_alloc_free()
282 failures, (alloc->buffer_size / PAGE_SIZE)); in binder_alloc_test_alloc_free()
299 static void permute_frees(struct kunit *test, struct binder_alloc *alloc, in permute_frees() argument
310 case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end); in permute_frees()
330 permute_frees(test, alloc, tc, runs, failures, index + 1, end); in permute_frees()
335 struct binder_alloc *alloc, in gen_buf_sizes() argument
353 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; in gen_buf_sizes()
362 permute_frees(test, alloc, tc, runs, failures, 0, in gen_buf_sizes()
367 permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size); in gen_buf_sizes()
370 static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc, in gen_buf_offsets() argument
385 gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures); in gen_buf_offsets()
400 gen_buf_offsets(test, alloc, end_offset, alignments, runs, in gen_buf_offsets()
406 struct binder_alloc alloc; member
416 KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist, in binder_alloc_test_init_freelist()
423 struct binder_alloc *alloc = &priv->alloc; in binder_alloc_test_mmap() local
427 KUNIT_EXPECT_EQ(test, alloc->mapped, true); in binder_alloc_test_mmap()
428 KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE); in binder_alloc_test_mmap()
430 n = rb_first(&alloc->allocated_buffers); in binder_alloc_test_mmap()
433 n = rb_first(&alloc->free_buffers); in binder_alloc_test_mmap()
435 KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf), in binder_alloc_test_mmap()
437 KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers)); in binder_alloc_test_mmap()
457 gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs, in binder_alloc_exhaustive_test()
468 struct binder_alloc *alloc = vma->vm_private_data; in binder_alloc_test_vma_close() local
470 binder_alloc_vma_close(alloc); in binder_alloc_test_vma_close()
481 struct binder_alloc *alloc = filp->private_data; in binder_alloc_test_mmap_handler() local
486 vma->vm_private_data = alloc; in binder_alloc_test_mmap_handler()
488 return binder_alloc_mmap_handler(alloc, vma); in binder_alloc_test_mmap_handler()
517 __binder_alloc_init(&priv->alloc, &priv->binder_test_freelist); in binder_alloc_test_init()
520 &binder_alloc_test_fops, &priv->alloc, in binder_alloc_test_init()
546 if (priv->alloc.mm) in binder_alloc_test_exit()
547 binder_alloc_deferred_release(&priv->alloc); in binder_alloc_test_exit()