Lines Matching refs:alloc
61 VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc, in binder_alloc_buffer_size() argument
64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
65 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
70 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
73 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
81 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
85 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
92 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
104 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument
106 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
125 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
129 struct binder_alloc *alloc, in binder_alloc_prepare_to_free_locked() argument
132 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
169 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, in binder_alloc_prepare_to_free() argument
172 guard(mutex)(&alloc->mutex); in binder_alloc_prepare_to_free()
173 return binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
177 binder_set_installed_page(struct binder_alloc *alloc, in binder_set_installed_page() argument
182 smp_store_release(&alloc->pages[index], page); in binder_set_installed_page()
186 binder_get_installed_page(struct binder_alloc *alloc, unsigned long index) in binder_get_installed_page() argument
189 return smp_load_acquire(&alloc->pages[index]); in binder_get_installed_page()
192 static void binder_lru_freelist_add(struct binder_alloc *alloc, in binder_lru_freelist_add() argument
198 trace_binder_update_page_range(alloc, false, start, end); in binder_lru_freelist_add()
204 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_add()
205 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_add()
209 trace_binder_free_lru_start(alloc, index); in binder_lru_freelist_add()
211 ret = list_lru_add(alloc->freelist, in binder_lru_freelist_add()
217 trace_binder_free_lru_end(alloc, index); in binder_lru_freelist_add()
222 void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state) in binder_alloc_set_mapped() argument
225 smp_store_release(&alloc->mapped, state); in binder_alloc_set_mapped()
228 static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc) in binder_alloc_is_mapped() argument
231 return smp_load_acquire(&alloc->mapped); in binder_alloc_is_mapped()
234 static struct page *binder_page_lookup(struct binder_alloc *alloc, in binder_page_lookup() argument
237 struct mm_struct *mm = alloc->mm; in binder_page_lookup()
246 if (binder_alloc_is_mapped(alloc)) in binder_page_lookup()
254 static int binder_page_insert(struct binder_alloc *alloc, in binder_page_insert() argument
258 struct mm_struct *mm = alloc->mm; in binder_page_insert()
265 if (binder_alloc_is_mapped(alloc)) in binder_page_insert()
274 if (vma && binder_alloc_is_mapped(alloc)) in binder_page_insert()
281 static struct page *binder_page_alloc(struct binder_alloc *alloc, in binder_page_alloc() argument
298 mdata->alloc = alloc; in binder_page_alloc()
312 static int binder_install_single_page(struct binder_alloc *alloc, in binder_install_single_page() argument
319 if (!mmget_not_zero(alloc->mm)) in binder_install_single_page()
322 page = binder_page_alloc(alloc, index); in binder_install_single_page()
328 ret = binder_page_insert(alloc, addr, page); in binder_install_single_page()
338 page = binder_page_lookup(alloc, addr); in binder_install_single_page()
341 alloc->pid, addr - alloc->vm_start); in binder_install_single_page()
348 binder_set_installed_page(alloc, index, page); in binder_install_single_page()
353 alloc->pid, __func__, addr - alloc->vm_start, ret); in binder_install_single_page()
357 mmput_async(alloc->mm); in binder_install_single_page()
361 static int binder_install_buffer_pages(struct binder_alloc *alloc, in binder_install_buffer_pages() argument
375 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_install_buffer_pages()
376 if (binder_get_installed_page(alloc, index)) in binder_install_buffer_pages()
379 trace_binder_alloc_page_start(alloc, index); in binder_install_buffer_pages()
381 ret = binder_install_single_page(alloc, index, page_addr); in binder_install_buffer_pages()
385 trace_binder_alloc_page_end(alloc, index); in binder_install_buffer_pages()
392 static void binder_lru_freelist_del(struct binder_alloc *alloc, in binder_lru_freelist_del() argument
398 trace_binder_update_page_range(alloc, true, start, end); in binder_lru_freelist_del()
404 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_del()
405 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_del()
408 trace_binder_alloc_lru_start(alloc, index); in binder_lru_freelist_del()
410 on_lru = list_lru_del(alloc->freelist, in binder_lru_freelist_del()
416 trace_binder_alloc_lru_end(alloc, index); in binder_lru_freelist_del()
420 if (index + 1 > alloc->pages_high) in binder_lru_freelist_del()
421 alloc->pages_high = index + 1; in binder_lru_freelist_del()
425 static void debug_no_space_locked(struct binder_alloc *alloc) in debug_no_space_locked() argument
437 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
439 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
446 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
448 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
462 static bool debug_low_async_space_locked(struct binder_alloc *alloc) in debug_low_async_space_locked() argument
481 if (alloc->free_async_space >= alloc->buffer_size / 10) { in debug_low_async_space_locked()
482 alloc->oneway_spam_detected = false; in debug_low_async_space_locked()
486 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
493 total_alloc_size += binder_alloc_buffer_size(alloc, buffer); in debug_low_async_space_locked()
502 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
505 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
506 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
507 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
516 struct binder_alloc *alloc, in binder_alloc_new_buf_locked() argument
521 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
528 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
531 alloc->pid, size); in binder_alloc_new_buf_locked()
539 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
555 alloc->pid, size); in binder_alloc_new_buf_locked()
556 debug_no_space_locked(alloc); in binder_alloc_new_buf_locked()
564 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
570 binder_insert_free_buffer(alloc, new_buffer); in binder_alloc_new_buf_locked()
576 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
586 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
589 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
592 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
596 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
599 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
600 if (debug_low_async_space_locked(alloc)) in binder_alloc_new_buf_locked()
647 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, in binder_alloc_new_buf() argument
658 if (!binder_alloc_is_mapped(alloc)) { in binder_alloc_new_buf()
661 alloc->pid); in binder_alloc_new_buf()
669 alloc->pid, data_size, offsets_size, in binder_alloc_new_buf()
679 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
680 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
682 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
690 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
692 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
694 binder_alloc_free_buf(alloc, buffer); in binder_alloc_new_buf()
712 static void binder_delete_free_buffer(struct binder_alloc *alloc, in binder_delete_free_buffer() argument
720 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
726 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
732 binder_lru_freelist_add(alloc, buffer_start_page(buffer), in binder_delete_free_buffer()
739 static void binder_free_buf_locked(struct binder_alloc *alloc, in binder_free_buf_locked() argument
744 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
752 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
757 BUG_ON(buffer->user_data < alloc->vm_start); in binder_free_buf_locked()
758 BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size); in binder_free_buf_locked()
761 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
764 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
767 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
770 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
772 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
776 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
777 binder_delete_free_buffer(alloc, next); in binder_free_buf_locked()
780 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
784 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
785 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
789 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
811 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page() argument
817 (buffer->user_data - alloc->vm_start); in binder_alloc_get_page()
823 return alloc->pages[index]; in binder_alloc_get_page()
833 static void binder_alloc_clear_buf(struct binder_alloc *alloc, in binder_alloc_clear_buf() argument
836 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
844 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
860 void binder_alloc_free_buf(struct binder_alloc *alloc, in binder_alloc_free_buf() argument
872 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
875 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
876 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
877 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
894 int binder_alloc_mmap_handler(struct binder_alloc *alloc, in binder_alloc_mmap_handler() argument
901 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
908 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
913 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
917 alloc->vm_start = vma->vm_start; in binder_alloc_mmap_handler()
919 alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
920 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
922 if (!alloc->pages) { in binder_alloc_mmap_handler()
935 buffer->user_data = alloc->vm_start; in binder_alloc_mmap_handler()
936 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
938 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
939 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
942 binder_alloc_set_mapped(alloc, true); in binder_alloc_mmap_handler()
947 kvfree(alloc->pages); in binder_alloc_mmap_handler()
948 alloc->pages = NULL; in binder_alloc_mmap_handler()
950 alloc->vm_start = 0; in binder_alloc_mmap_handler()
952 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
958 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
964 void binder_alloc_deferred_release(struct binder_alloc *alloc) in binder_alloc_deferred_release() argument
971 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
972 BUG_ON(alloc->mapped); in binder_alloc_deferred_release()
974 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
981 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
984 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
988 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
989 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
994 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
999 if (alloc->pages) { in binder_alloc_deferred_release()
1002 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
1006 page = binder_get_installed_page(alloc, i); in binder_alloc_deferred_release()
1010 on_lru = list_lru_del(alloc->freelist, in binder_alloc_deferred_release()
1016 __func__, alloc->pid, i, in binder_alloc_deferred_release()
1022 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
1023 kvfree(alloc->pages); in binder_alloc_deferred_release()
1024 if (alloc->mm) in binder_alloc_deferred_release()
1025 mmdrop(alloc->mm); in binder_alloc_deferred_release()
1029 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
1042 struct binder_alloc *alloc) in binder_alloc_print_allocated() argument
1047 guard(mutex)(&alloc->mutex); in binder_alloc_print_allocated()
1048 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in binder_alloc_print_allocated()
1052 buffer->user_data - alloc->vm_start, in binder_alloc_print_allocated()
1065 struct binder_alloc *alloc) in binder_alloc_print_pages() argument
1073 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
1078 if (binder_alloc_is_mapped(alloc)) { in binder_alloc_print_pages()
1079 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
1080 page = binder_get_installed_page(alloc, i); in binder_alloc_print_pages()
1089 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
1091 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
1100 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) in binder_alloc_get_allocated_count() argument
1105 guard(mutex)(&alloc->mutex); in binder_alloc_get_allocated_count()
1106 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
1120 void binder_alloc_vma_close(struct binder_alloc *alloc) in binder_alloc_vma_close() argument
1122 binder_alloc_set_mapped(alloc, false); in binder_alloc_vma_close()
1141 struct binder_alloc *alloc = mdata->alloc; in binder_alloc_free_page() local
1142 struct mm_struct *mm = alloc->mm; in binder_alloc_free_page()
1153 page_addr = alloc->vm_start + index * PAGE_SIZE; in binder_alloc_free_page()
1165 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
1173 if (vma && !binder_alloc_is_mapped(alloc)) in binder_alloc_free_page()
1176 trace_binder_unmap_kernel_start(alloc, index); in binder_alloc_free_page()
1178 page_to_free = alloc->pages[index]; in binder_alloc_free_page()
1179 binder_set_installed_page(alloc, index, NULL); in binder_alloc_free_page()
1181 trace_binder_unmap_kernel_end(alloc, index); in binder_alloc_free_page()
1187 trace_binder_unmap_user_start(alloc, index); in binder_alloc_free_page()
1191 trace_binder_unmap_user_end(alloc, index); in binder_alloc_free_page()
1194 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1205 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1233 VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc, in __binder_alloc_init() argument
1236 alloc->pid = current->group_leader->pid; in __binder_alloc_init()
1237 alloc->mm = current->mm; in __binder_alloc_init()
1238 mmgrab(alloc->mm); in __binder_alloc_init()
1239 mutex_init(&alloc->mutex); in __binder_alloc_init()
1240 INIT_LIST_HEAD(&alloc->buffers); in __binder_alloc_init()
1241 alloc->freelist = freelist; in __binder_alloc_init()
1252 void binder_alloc_init(struct binder_alloc *alloc) in binder_alloc_init() argument
1254 __binder_alloc_init(alloc, &binder_freelist); in binder_alloc_init()
1304 static inline bool check_buffer(struct binder_alloc *alloc, in check_buffer() argument
1308 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1330 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_user_to_buffer() argument
1336 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1346 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1361 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, in binder_alloc_do_buffer_copy() argument
1369 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1377 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1392 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_to_buffer() argument
1398 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1402 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, in binder_alloc_copy_from_buffer() argument
1408 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()