Lines Matching +full:buffer +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2007-2017 Google, Inc.
51 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) in binder_buffer_next() argument
53 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
56 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) in binder_buffer_prev() argument
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
62 struct binder_buffer *buffer) in binder_alloc_buffer_size() argument
64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
65 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
66 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
73 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
75 struct binder_buffer *buffer; in binder_insert_free_buffer() local
79 BUG_ON(!new_buffer->free); in binder_insert_free_buffer()
84 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
85 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
89 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
90 BUG_ON(!buffer->free); in binder_insert_free_buffer()
92 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
95 p = &parent->rb_left; in binder_insert_free_buffer()
97 p = &parent->rb_right; in binder_insert_free_buffer()
99 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_free_buffer()
100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
106 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
108 struct binder_buffer *buffer; in binder_insert_allocated_buffer_locked() local
110 BUG_ON(new_buffer->free); in binder_insert_allocated_buffer_locked()
114 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
115 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
117 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
118 p = &parent->rb_left; in binder_insert_allocated_buffer_locked()
119 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
120 p = &parent->rb_right; in binder_insert_allocated_buffer_locked()
124 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_allocated_buffer_locked()
125 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
132 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
133 struct binder_buffer *buffer; in binder_alloc_prepare_to_free_locked() local
136 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
137 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
139 if (user_ptr < buffer->user_data) { in binder_alloc_prepare_to_free_locked()
140 n = n->rb_left; in binder_alloc_prepare_to_free_locked()
141 } else if (user_ptr > buffer->user_data) { in binder_alloc_prepare_to_free_locked()
142 n = n->rb_right; in binder_alloc_prepare_to_free_locked()
146 * free the buffer when in use by kernel or in binder_alloc_prepare_to_free_locked()
149 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
150 return ERR_PTR(-EPERM); in binder_alloc_prepare_to_free_locked()
151 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
152 return buffer; in binder_alloc_prepare_to_free_locked()
159 * binder_alloc_prepare_to_free() - get buffer given user ptr
161 * @user_ptr: User pointer to buffer data
163 * Validate userspace pointer to buffer data and return buffer corresponding to
164 * that user pointer. Search the rb tree for buffer that matches user data
167 * Return: Pointer to buffer or NULL
172 guard(mutex)(&alloc->mutex); in binder_alloc_prepare_to_free()
182 smp_store_release(&alloc->pages[index], page); in binder_set_installed_page()
189 return smp_load_acquire(&alloc->pages[index]); in binder_get_installed_page()
204 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_add()
211 ret = list_lru_add(alloc->freelist, in binder_lru_freelist_add()
225 smp_store_release(&alloc->mapped, state); in binder_alloc_set_mapped()
231 return smp_load_acquire(&alloc->mapped); in binder_alloc_is_mapped()
237 struct mm_struct *mm = alloc->mm; in binder_page_lookup()
243 * don't attempt to fault-in just propagate an error. in binder_page_lookup()
258 struct mm_struct *mm = alloc->mm; in binder_page_insert()
260 int ret = -ESRCH; in binder_page_insert()
262 /* attempt per-vma lock first */ in binder_page_insert()
291 /* allocate and install shrinker metadata under page->private */ in binder_page_alloc()
298 mdata->alloc = alloc; in binder_page_alloc()
299 mdata->page_index = index; in binder_page_alloc()
300 INIT_LIST_HEAD(&mdata->lru); in binder_page_alloc()
319 if (!mmget_not_zero(alloc->mm)) in binder_install_single_page()
320 return -ESRCH; in binder_install_single_page()
324 ret = -ENOMEM; in binder_install_single_page()
330 case -EBUSY: in binder_install_single_page()
333 * alloc->pages[index] has not been updated yet. Discard in binder_install_single_page()
341 alloc->pid, addr - alloc->vm_start); in binder_install_single_page()
342 ret = -ESRCH; in binder_install_single_page()
353 alloc->pid, __func__, addr - alloc->vm_start, ret); in binder_install_single_page()
357 mmput_async(alloc->mm); in binder_install_single_page()
362 struct binder_buffer *buffer, in binder_install_buffer_pages() argument
363 size_t size) in binder_install_buffer_pages() argument
368 start = buffer->user_data & PAGE_MASK; in binder_install_buffer_pages()
369 final = PAGE_ALIGN(buffer->user_data + size); in binder_install_buffer_pages()
375 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_install_buffer_pages()
404 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_del()
410 on_lru = list_lru_del(alloc->freelist, in binder_lru_freelist_del()
420 if (index + 1 > alloc->pages_high) in binder_lru_freelist_del()
421 alloc->pages_high = index + 1; in binder_lru_freelist_del()
428 struct binder_buffer *buffer; in debug_no_space_locked() local
437 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
438 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_no_space_locked()
439 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
446 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
447 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_no_space_locked()
448 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
465 * Find the amount and size of buffers allocated by the current caller; in debug_low_async_space_locked()
471 struct binder_buffer *buffer; in debug_low_async_space_locked() local
473 int pid = current->tgid; in debug_low_async_space_locked()
479 * space left (which is less than 10% of total buffer size). in debug_low_async_space_locked()
481 if (alloc->free_async_space >= alloc->buffer_size / 10) { in debug_low_async_space_locked()
482 alloc->oneway_spam_detected = false; in debug_low_async_space_locked()
486 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
488 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_low_async_space_locked()
489 if (buffer->pid != pid) in debug_low_async_space_locked()
491 if (!buffer->async_transaction) in debug_low_async_space_locked()
493 total_alloc_size += binder_alloc_buffer_size(alloc, buffer); in debug_low_async_space_locked()
499 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
502 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
504 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", in debug_low_async_space_locked()
505 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
506 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
507 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
518 size_t size, in binder_alloc_new_buf_locked() argument
521 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
523 struct binder_buffer *buffer; in binder_alloc_new_buf_locked() local
528 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
530 "%d: binder_alloc_buf size %zd failed, no async space left\n", in binder_alloc_new_buf_locked()
531 alloc->pid, size); in binder_alloc_new_buf_locked()
532 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
537 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
538 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
539 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
541 if (size < buffer_size) { in binder_alloc_new_buf_locked()
543 n = n->rb_left; in binder_alloc_new_buf_locked()
544 } else if (size > buffer_size) { in binder_alloc_new_buf_locked()
545 n = n->rb_right; in binder_alloc_new_buf_locked()
554 "%d: binder_alloc_buf size %zd failed, no address space\n", in binder_alloc_new_buf_locked()
555 alloc->pid, size); in binder_alloc_new_buf_locked()
557 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
561 if (buffer_size != size) { in binder_alloc_new_buf_locked()
562 /* Found an oversized buffer and needs to be split */ in binder_alloc_new_buf_locked()
563 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
564 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
566 WARN_ON(n || buffer_size == size); in binder_alloc_new_buf_locked()
567 new_buffer->user_data = buffer->user_data + size; in binder_alloc_new_buf_locked()
568 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
569 new_buffer->free = 1; in binder_alloc_new_buf_locked()
575 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
576 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
581 * adjacent in-use buffer. In such case, the page has been already in binder_alloc_new_buf_locked()
584 next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; in binder_alloc_new_buf_locked()
585 curr_last_page = PAGE_ALIGN(buffer->user_data + size); in binder_alloc_new_buf_locked()
586 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
589 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
590 buffer->free = 0; in binder_alloc_new_buf_locked()
591 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
592 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
593 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
594 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
596 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
598 "%d: binder_alloc_buf size %zd async free %zd\n", in binder_alloc_new_buf_locked()
599 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
601 buffer->oneway_spam_suspect = true; in binder_alloc_new_buf_locked()
607 return buffer; in binder_alloc_new_buf_locked()
610 /* Calculate the sanitized total size, returns 0 for invalid request */
617 /* Align to pointer size and check for overflows */ in sanitized_size()
626 /* Pad 0-sized buffers so they get a unique address */ in sanitized_size()
633 * binder_alloc_new_buf() - Allocate a new binder buffer
635 * @data_size: size of user data buffer
636 * @offsets_size: user specified buffer offset
637 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
638 * @is_async: buffer for async transaction
640 * Allocate a new buffer given the requested sizes. Returns
641 * the kernel version of the buffer pointer. The size allocated
643 * pointer-sized boundary)
645 * Return: The allocated buffer or %ERR_PTR(-errno) if error
653 struct binder_buffer *buffer, *next; in binder_alloc_new_buf() local
654 size_t size; in binder_alloc_new_buf() local
661 alloc->pid); in binder_alloc_new_buf()
662 return ERR_PTR(-ESRCH); in binder_alloc_new_buf()
665 size = sanitized_size(data_size, offsets_size, extra_buffers_size); in binder_alloc_new_buf()
666 if (unlikely(!size)) { in binder_alloc_new_buf()
668 "%d: got transaction with invalid size %zd-%zd-%zd\n", in binder_alloc_new_buf()
669 alloc->pid, data_size, offsets_size, in binder_alloc_new_buf()
671 return ERR_PTR(-EINVAL); in binder_alloc_new_buf()
674 /* Preallocate the next buffer */ in binder_alloc_new_buf()
677 return ERR_PTR(-ENOMEM); in binder_alloc_new_buf()
679 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
680 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
681 if (IS_ERR(buffer)) { in binder_alloc_new_buf()
682 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
686 buffer->data_size = data_size; in binder_alloc_new_buf()
687 buffer->offsets_size = offsets_size; in binder_alloc_new_buf()
688 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf()
689 buffer->pid = current->tgid; in binder_alloc_new_buf()
690 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
692 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
694 binder_alloc_free_buf(alloc, buffer); in binder_alloc_new_buf()
695 buffer = ERR_PTR(ret); in binder_alloc_new_buf()
698 return buffer; in binder_alloc_new_buf()
702 static unsigned long buffer_start_page(struct binder_buffer *buffer) in buffer_start_page() argument
704 return buffer->user_data & PAGE_MASK; in buffer_start_page()
707 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) in prev_buffer_end_page() argument
709 return (buffer->user_data - 1) & PAGE_MASK; in prev_buffer_end_page()
713 struct binder_buffer *buffer) in binder_delete_free_buffer() argument
717 if (PAGE_ALIGNED(buffer->user_data)) in binder_delete_free_buffer()
720 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
721 prev = binder_buffer_prev(buffer); in binder_delete_free_buffer()
722 BUG_ON(!prev->free); in binder_delete_free_buffer()
723 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) in binder_delete_free_buffer()
726 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
727 next = binder_buffer_next(buffer); in binder_delete_free_buffer()
728 if (buffer_start_page(next) == buffer_start_page(buffer)) in binder_delete_free_buffer()
732 binder_lru_freelist_add(alloc, buffer_start_page(buffer), in binder_delete_free_buffer()
733 buffer_start_page(buffer) + PAGE_SIZE); in binder_delete_free_buffer()
735 list_del(&buffer->entry); in binder_delete_free_buffer()
736 kfree(buffer); in binder_delete_free_buffer()
740 struct binder_buffer *buffer) in binder_free_buf_locked() argument
742 size_t size, buffer_size; in binder_free_buf_locked() local
744 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
746 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
747 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
748 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
751 "%d: binder_free_buf %pK size %zd buffer_size %zd\n", in binder_free_buf_locked()
752 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
754 BUG_ON(buffer->free); in binder_free_buf_locked()
755 BUG_ON(size > buffer_size); in binder_free_buf_locked()
756 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
757 BUG_ON(buffer->user_data < alloc->vm_start); in binder_free_buf_locked()
758 BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size); in binder_free_buf_locked()
760 if (buffer->async_transaction) { in binder_free_buf_locked()
761 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
763 "%d: binder_free_buf size %zd async free %zd\n", in binder_free_buf_locked()
764 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
767 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
768 (buffer->user_data + buffer_size) & PAGE_MASK); in binder_free_buf_locked()
770 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
771 buffer->free = 1; in binder_free_buf_locked()
772 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
773 struct binder_buffer *next = binder_buffer_next(buffer); in binder_free_buf_locked()
775 if (next->free) { in binder_free_buf_locked()
776 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
780 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
781 struct binder_buffer *prev = binder_buffer_prev(buffer); in binder_free_buf_locked()
783 if (prev->free) { in binder_free_buf_locked()
784 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
785 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
786 buffer = prev; in binder_free_buf_locked()
789 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
793 * binder_alloc_get_page() - get kernel pointer for given buffer offset
795 * @buffer: binder buffer to be accessed
796 * @buffer_offset: offset into @buffer data
800 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
801 * NULL, the byte-offset into the page is written there.
804 * to a valid address within the @buffer and that @buffer is
806 * guaranteed that the corresponding elements of @alloc->pages[]
812 struct binder_buffer *buffer, in binder_alloc_get_page() argument
817 (buffer->user_data - alloc->vm_start); in binder_alloc_get_page()
823 return alloc->pages[index]; in binder_alloc_get_page()
827 * binder_alloc_clear_buf() - zero out buffer
829 * @buffer: binder buffer to be cleared
831 * memset the given buffer to 0
834 struct binder_buffer *buffer) in binder_alloc_clear_buf() argument
836 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
840 unsigned long size; in binder_alloc_clear_buf() local
844 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
846 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_clear_buf()
847 memset_page(page, pgoff, 0, size); in binder_alloc_clear_buf()
848 bytes -= size; in binder_alloc_clear_buf()
849 buffer_offset += size; in binder_alloc_clear_buf()
854 * binder_alloc_free_buf() - free a binder buffer
856 * @buffer: kernel pointer to buffer
858 * Free the buffer allocated via binder_alloc_new_buf()
861 struct binder_buffer *buffer) in binder_alloc_free_buf() argument
871 if (buffer->clear_on_free) { in binder_alloc_free_buf()
872 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
873 buffer->clear_on_free = false; in binder_alloc_free_buf()
875 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
876 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
877 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
882 * binder_alloc_mmap_handler() - map virtual address space for proc
891 * -EBUSY = address space already mapped
892 * -ENOMEM = failed to map memory to given address space
897 struct binder_buffer *buffer; in binder_alloc_mmap_handler() local
901 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
902 ret = -EINVAL; in binder_alloc_mmap_handler()
903 failure_string = "invalid vma->vm_mm"; in binder_alloc_mmap_handler()
908 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
909 ret = -EBUSY; in binder_alloc_mmap_handler()
913 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
917 alloc->vm_start = vma->vm_start; in binder_alloc_mmap_handler()
919 alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
920 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
922 if (!alloc->pages) { in binder_alloc_mmap_handler()
923 ret = -ENOMEM; in binder_alloc_mmap_handler()
928 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_mmap_handler()
929 if (!buffer) { in binder_alloc_mmap_handler()
930 ret = -ENOMEM; in binder_alloc_mmap_handler()
931 failure_string = "alloc buffer struct"; in binder_alloc_mmap_handler()
935 buffer->user_data = alloc->vm_start; in binder_alloc_mmap_handler()
936 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
937 buffer->free = 1; in binder_alloc_mmap_handler()
938 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
939 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
947 kvfree(alloc->pages); in binder_alloc_mmap_handler()
948 alloc->pages = NULL; in binder_alloc_mmap_handler()
950 alloc->vm_start = 0; in binder_alloc_mmap_handler()
952 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
957 "%s: %d %lx-%lx %s failed %d\n", __func__, in binder_alloc_mmap_handler()
958 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
968 struct binder_buffer *buffer; in binder_alloc_deferred_release() local
971 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
972 BUG_ON(alloc->mapped); in binder_alloc_deferred_release()
974 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
975 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
978 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
980 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
981 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
982 buffer->clear_on_free = false; in binder_alloc_deferred_release()
984 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
988 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
989 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
991 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
993 list_del(&buffer->entry); in binder_alloc_deferred_release()
994 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
995 kfree(buffer); in binder_alloc_deferred_release()
999 if (alloc->pages) { in binder_alloc_deferred_release()
1002 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
1010 on_lru = list_lru_del(alloc->freelist, in binder_alloc_deferred_release()
1016 __func__, alloc->pid, i, in binder_alloc_deferred_release()
1022 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
1023 kvfree(alloc->pages); in binder_alloc_deferred_release()
1024 if (alloc->mm) in binder_alloc_deferred_release()
1025 mmdrop(alloc->mm); in binder_alloc_deferred_release()
1029 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
1034 * binder_alloc_print_allocated() - print buffer info
1038 * Prints information about every buffer associated with
1044 struct binder_buffer *buffer; in binder_alloc_print_allocated() local
1047 guard(mutex)(&alloc->mutex); in binder_alloc_print_allocated()
1048 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in binder_alloc_print_allocated()
1049 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_print_allocated()
1050 seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", in binder_alloc_print_allocated()
1051 buffer->debug_id, in binder_alloc_print_allocated()
1052 buffer->user_data - alloc->vm_start, in binder_alloc_print_allocated()
1053 buffer->data_size, buffer->offsets_size, in binder_alloc_print_allocated()
1054 buffer->extra_buffers_size, in binder_alloc_print_allocated()
1055 buffer->transaction ? "active" : "delivered"); in binder_alloc_print_allocated()
1060 * binder_alloc_print_pages() - print page usage
1073 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
1079 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
1089 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
1091 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
1095 * binder_alloc_get_allocated_count() - return count of buffers
1105 guard(mutex)(&alloc->mutex); in binder_alloc_get_allocated_count()
1106 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
1113 * binder_alloc_vma_close() - invalidate address space
1117 * Clears alloc->mapped to prevent new incoming transactions from
1127 * binder_alloc_free_page() - shrinker callback to free pages
1138 __must_hold(&lru->lock) in binder_alloc_free_page()
1141 struct binder_alloc *alloc = mdata->alloc; in binder_alloc_free_page()
1142 struct mm_struct *mm = alloc->mm; in binder_alloc_free_page()
1152 index = mdata->page_index; in binder_alloc_free_page()
1153 page_addr = alloc->vm_start + index * PAGE_SIZE; in binder_alloc_free_page()
1155 /* attempt per-vma lock first */ in binder_alloc_free_page()
1165 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
1178 page_to_free = alloc->pages[index]; in binder_alloc_free_page()
1184 spin_unlock(&lru->lock); in binder_alloc_free_page()
1194 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1205 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1228 NULL, sc->nr_to_scan); in binder_shrink_scan()
1236 alloc->pid = current->group_leader->pid; in __binder_alloc_init()
1237 alloc->mm = current->mm; in __binder_alloc_init()
1238 mmgrab(alloc->mm); in __binder_alloc_init()
1239 mutex_init(&alloc->mutex); in __binder_alloc_init()
1240 INIT_LIST_HEAD(&alloc->buffers); in __binder_alloc_init()
1241 alloc->freelist = freelist; in __binder_alloc_init()
1246 * binder_alloc_init() - called by binder_open() for per-proc initialization
1265 binder_shrinker = shrinker_alloc(0, "android-binder"); in binder_alloc_shrinker_init()
1268 return -ENOMEM; in binder_alloc_shrinker_init()
1271 binder_shrinker->count_objects = binder_shrink_count; in binder_alloc_shrinker_init()
1272 binder_shrinker->scan_objects = binder_shrink_scan; in binder_alloc_shrinker_init()
1286 * check_buffer() - verify that buffer/offset is safe to access
1288 * @buffer: binder buffer to be accessed
1289 * @offset: offset into @buffer data
1292 * Check that the @offset/@bytes are within the size of the given
1293 * @buffer and that the buffer is currently active and not freeable.
1295 * allowed to touch the buffer in two cases:
1297 * 1) when the buffer is being created:
1298 * (buffer->free == 0 && buffer->allow_user_free == 0)
1299 * 2) when the buffer is being torn down:
1300 * (buffer->free == 0 && buffer->transaction == NULL).
1302 * Return: true if the buffer is safe to access
1305 struct binder_buffer *buffer, in check_buffer() argument
1308 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1311 offset <= buffer_size - bytes && in check_buffer()
1313 !buffer->free && in check_buffer()
1314 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1318 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1320 * @buffer: binder buffer to be accessed
1321 * @buffer_offset: offset into @buffer data
1322 * @from: userspace pointer to source buffer
1325 * Copy bytes from source userspace to target buffer.
1331 struct binder_buffer *buffer, in binder_alloc_copy_user_to_buffer() argument
1336 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1340 unsigned long size; in binder_alloc_copy_user_to_buffer() local
1346 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1348 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_copy_user_to_buffer()
1350 ret = copy_from_user(kptr, from, size); in binder_alloc_copy_user_to_buffer()
1353 return bytes - size + ret; in binder_alloc_copy_user_to_buffer()
1354 bytes -= size; in binder_alloc_copy_user_to_buffer()
1355 from += size; in binder_alloc_copy_user_to_buffer()
1356 buffer_offset += size; in binder_alloc_copy_user_to_buffer()
1363 struct binder_buffer *buffer, in binder_alloc_do_buffer_copy() argument
1368 /* All copies must be 32-bit aligned and 32-bit size */ in binder_alloc_do_buffer_copy()
1369 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1370 return -EINVAL; in binder_alloc_do_buffer_copy()
1373 unsigned long size; in binder_alloc_do_buffer_copy() local
1377 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1379 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_do_buffer_copy()
1381 memcpy_to_page(page, pgoff, ptr, size); in binder_alloc_do_buffer_copy()
1383 memcpy_from_page(ptr, page, pgoff, size); in binder_alloc_do_buffer_copy()
1384 bytes -= size; in binder_alloc_do_buffer_copy()
1386 ptr = ptr + size; in binder_alloc_do_buffer_copy()
1387 buffer_offset += size; in binder_alloc_do_buffer_copy()
1393 struct binder_buffer *buffer, in binder_alloc_copy_to_buffer() argument
1398 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1404 struct binder_buffer *buffer, in binder_alloc_copy_from_buffer() argument
1408 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()