Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2007-2017 Google, Inc.
53 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
65 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
66 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
73 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
79 BUG_ON(!new_buffer->free); in binder_insert_free_buffer()
84 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
85 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
90 BUG_ON(!buffer->free); in binder_insert_free_buffer()
95 p = &parent->rb_left; in binder_insert_free_buffer()
97 p = &parent->rb_right; in binder_insert_free_buffer()
99 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_free_buffer()
100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
106 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
110 BUG_ON(new_buffer->free); in binder_insert_allocated_buffer_locked()
115 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
117 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
118 p = &parent->rb_left; in binder_insert_allocated_buffer_locked()
119 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
120 p = &parent->rb_right; in binder_insert_allocated_buffer_locked()
124 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_allocated_buffer_locked()
125 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
132 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
137 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
139 if (user_ptr < buffer->user_data) { in binder_alloc_prepare_to_free_locked()
140 n = n->rb_left; in binder_alloc_prepare_to_free_locked()
141 } else if (user_ptr > buffer->user_data) { in binder_alloc_prepare_to_free_locked()
142 n = n->rb_right; in binder_alloc_prepare_to_free_locked()
149 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
150 return ERR_PTR(-EPERM); in binder_alloc_prepare_to_free_locked()
151 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
159 * binder_alloc_prepare_to_free() - get buffer given user ptr
172 guard(mutex)(&alloc->mutex); in binder_alloc_prepare_to_free()
179 struct page *page) in binder_set_installed_page() argument
182 smp_store_release(&alloc->pages[index], page); in binder_set_installed_page()
185 static inline struct page *
189 return smp_load_acquire(&alloc->pages[index]); in binder_get_installed_page()
196 struct page *page; in binder_lru_freelist_add() local
204 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_add()
205 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_add()
206 if (!page) in binder_lru_freelist_add()
211 ret = list_lru_add(alloc->freelist, in binder_lru_freelist_add()
212 page_to_lru(page), in binder_lru_freelist_add()
213 page_to_nid(page), in binder_lru_freelist_add()
225 smp_store_release(&alloc->mapped, state); in binder_alloc_set_mapped()
231 return smp_load_acquire(&alloc->mapped); in binder_alloc_is_mapped()
234 static struct page *binder_page_lookup(struct binder_alloc *alloc, in binder_page_lookup()
237 struct mm_struct *mm = alloc->mm; in binder_page_lookup()
238 struct page *page; in binder_page_lookup() local
242 * Find an existing page in the remote mm. If missing, in binder_page_lookup()
243 * don't attempt to fault-in just propagate an error. in binder_page_lookup()
248 &page, NULL); in binder_page_lookup()
251 return npages > 0 ? page : NULL; in binder_page_lookup()
256 struct page *page) in binder_page_insert() argument
258 struct mm_struct *mm = alloc->mm; in binder_page_insert()
260 int ret = -ESRCH; in binder_page_insert()
262 /* attempt per-vma lock first */ in binder_page_insert()
266 ret = vm_insert_page(vma, addr, page); in binder_page_insert()
275 ret = vm_insert_page(vma, addr, page); in binder_page_insert()
281 static struct page *binder_page_alloc(struct binder_alloc *alloc, in binder_page_alloc()
285 struct page *page; in binder_page_alloc() local
287 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); in binder_page_alloc()
288 if (!page) in binder_page_alloc()
291 /* allocate and install shrinker metadata under page->private */ in binder_page_alloc()
294 __free_page(page); in binder_page_alloc()
298 mdata->alloc = alloc; in binder_page_alloc()
299 mdata->page_index = index; in binder_page_alloc()
300 INIT_LIST_HEAD(&mdata->lru); in binder_page_alloc()
301 set_page_private(page, (unsigned long)mdata); in binder_page_alloc()
303 return page; in binder_page_alloc()
306 static void binder_free_page(struct page *page) in binder_free_page() argument
308 kfree((struct binder_shrinker_mdata *)page_private(page)); in binder_free_page()
309 __free_page(page); in binder_free_page()
316 struct page *page; in binder_install_single_page() local
319 if (!mmget_not_zero(alloc->mm)) in binder_install_single_page()
320 return -ESRCH; in binder_install_single_page()
322 page = binder_page_alloc(alloc, index); in binder_install_single_page()
323 if (!page) { in binder_install_single_page()
324 ret = -ENOMEM; in binder_install_single_page()
328 ret = binder_page_insert(alloc, addr, page); in binder_install_single_page()
330 case -EBUSY: in binder_install_single_page()
333 * alloc->pages[index] has not been updated yet. Discard in binder_install_single_page()
334 * our page and look up the one already installed. in binder_install_single_page()
337 binder_free_page(page); in binder_install_single_page()
338 page = binder_page_lookup(alloc, addr); in binder_install_single_page()
339 if (!page) { in binder_install_single_page()
340 pr_err("%d: failed to find page at offset %lx\n", in binder_install_single_page()
341 alloc->pid, addr - alloc->vm_start); in binder_install_single_page()
342 ret = -ESRCH; in binder_install_single_page()
347 /* Mark page installation complete and safe to use */ in binder_install_single_page()
348 binder_set_installed_page(alloc, index, page); in binder_install_single_page()
351 binder_free_page(page); in binder_install_single_page()
352 pr_err("%d: %s failed to insert page at offset %lx with %d\n", in binder_install_single_page()
353 alloc->pid, __func__, addr - alloc->vm_start, ret); in binder_install_single_page()
357 mmput_async(alloc->mm); in binder_install_single_page()
363 size_t size) in binder_install_buffer_pages() argument
368 start = buffer->user_data & PAGE_MASK; in binder_install_buffer_pages()
369 final = PAGE_ALIGN(buffer->user_data + size); in binder_install_buffer_pages()
375 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_install_buffer_pages()
396 struct page *page; in binder_lru_freelist_del() local
404 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_del()
405 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_del()
407 if (page) { in binder_lru_freelist_del()
410 on_lru = list_lru_del(alloc->freelist, in binder_lru_freelist_del()
411 page_to_lru(page), in binder_lru_freelist_del()
412 page_to_nid(page), in binder_lru_freelist_del()
420 if (index + 1 > alloc->pages_high) in binder_lru_freelist_del()
421 alloc->pages_high = index + 1; in binder_lru_freelist_del()
437 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
446 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
465 * Find the amount and size of buffers allocated by the current caller; in debug_low_async_space_locked()
473 int pid = current->tgid; in debug_low_async_space_locked()
479 * space left (which is less than 10% of total buffer size). in debug_low_async_space_locked()
481 if (alloc->free_async_space >= alloc->buffer_size / 10) { in debug_low_async_space_locked()
482 alloc->oneway_spam_detected = false; in debug_low_async_space_locked()
486 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
489 if (buffer->pid != pid) in debug_low_async_space_locked()
491 if (!buffer->async_transaction) in debug_low_async_space_locked()
499 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
502 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
504 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", in debug_low_async_space_locked()
505 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
506 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
507 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
518 size_t size, in binder_alloc_new_buf_locked() argument
521 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
528 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
530 "%d: binder_alloc_buf size %zd failed, no async space left\n", in binder_alloc_new_buf_locked()
531 alloc->pid, size); in binder_alloc_new_buf_locked()
532 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
538 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
541 if (size < buffer_size) { in binder_alloc_new_buf_locked()
543 n = n->rb_left; in binder_alloc_new_buf_locked()
544 } else if (size > buffer_size) { in binder_alloc_new_buf_locked()
545 n = n->rb_right; in binder_alloc_new_buf_locked()
554 "%d: binder_alloc_buf size %zd failed, no address space\n", in binder_alloc_new_buf_locked()
555 alloc->pid, size); in binder_alloc_new_buf_locked()
557 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
561 if (buffer_size != size) { in binder_alloc_new_buf_locked()
566 WARN_ON(n || buffer_size == size); in binder_alloc_new_buf_locked()
567 new_buffer->user_data = buffer->user_data + size; in binder_alloc_new_buf_locked()
568 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
569 new_buffer->free = 1; in binder_alloc_new_buf_locked()
575 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
576 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
580 * with buffer_size determines if the last page is shared with an in binder_alloc_new_buf_locked()
581 * adjacent in-use buffer. In such case, the page has been already in binder_alloc_new_buf_locked()
584 next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; in binder_alloc_new_buf_locked()
585 curr_last_page = PAGE_ALIGN(buffer->user_data + size); in binder_alloc_new_buf_locked()
586 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
589 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
590 buffer->free = 0; in binder_alloc_new_buf_locked()
591 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
593 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
594 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
596 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
598 "%d: binder_alloc_buf size %zd async free %zd\n", in binder_alloc_new_buf_locked()
599 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
601 buffer->oneway_spam_suspect = true; in binder_alloc_new_buf_locked()
610 /* Calculate the sanitized total size, returns 0 for invalid request */
617 /* Align to pointer size and check for overflows */ in sanitized_size()
626 /* Pad 0-sized buffers so they get a unique address */ in sanitized_size()
633 * binder_alloc_new_buf() - Allocate a new binder buffer
635 * @data_size: size of user data buffer
637 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
641 * the kernel version of the buffer pointer. The size allocated
643 * pointer-sized boundary)
645 * Return: The allocated buffer or %ERR_PTR(-errno) if error
654 size_t size; in binder_alloc_new_buf() local
661 alloc->pid); in binder_alloc_new_buf()
662 return ERR_PTR(-ESRCH); in binder_alloc_new_buf()
665 size = sanitized_size(data_size, offsets_size, extra_buffers_size); in binder_alloc_new_buf()
666 if (unlikely(!size)) { in binder_alloc_new_buf()
668 "%d: got transaction with invalid size %zd-%zd-%zd\n", in binder_alloc_new_buf()
669 alloc->pid, data_size, offsets_size, in binder_alloc_new_buf()
671 return ERR_PTR(-EINVAL); in binder_alloc_new_buf()
677 return ERR_PTR(-ENOMEM); in binder_alloc_new_buf()
679 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
680 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
682 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
686 buffer->data_size = data_size; in binder_alloc_new_buf()
687 buffer->offsets_size = offsets_size; in binder_alloc_new_buf()
688 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf()
689 buffer->pid = current->tgid; in binder_alloc_new_buf()
690 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
692 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
704 return buffer->user_data & PAGE_MASK; in buffer_start_page()
709 return (buffer->user_data - 1) & PAGE_MASK; in prev_buffer_end_page()
717 if (PAGE_ALIGNED(buffer->user_data)) in binder_delete_free_buffer()
720 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
722 BUG_ON(!prev->free); in binder_delete_free_buffer()
726 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
735 list_del(&buffer->entry); in binder_delete_free_buffer()
742 size_t size, buffer_size; in binder_free_buf_locked() local
746 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
747 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
748 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
751 "%d: binder_free_buf %pK size %zd buffer_size %zd\n", in binder_free_buf_locked()
752 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
754 BUG_ON(buffer->free); in binder_free_buf_locked()
755 BUG_ON(size > buffer_size); in binder_free_buf_locked()
756 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
757 BUG_ON(buffer->user_data < alloc->vm_start); in binder_free_buf_locked()
758 BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size); in binder_free_buf_locked()
760 if (buffer->async_transaction) { in binder_free_buf_locked()
761 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
763 "%d: binder_free_buf size %zd async free %zd\n", in binder_free_buf_locked()
764 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
767 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
768 (buffer->user_data + buffer_size) & PAGE_MASK); in binder_free_buf_locked()
770 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
771 buffer->free = 1; in binder_free_buf_locked()
772 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
775 if (next->free) { in binder_free_buf_locked()
776 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
780 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
783 if (prev->free) { in binder_free_buf_locked()
785 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
793 * binder_alloc_get_page() - get kernel pointer for given buffer offset
797 * @pgoffp: address to copy final page offset to
799 * Lookup the struct page corresponding to the address
800 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
801 * NULL, the byte-offset into the page is written there.
806 * guaranteed that the corresponding elements of @alloc->pages[]
809 * Return: struct page
811 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page()
817 (buffer->user_data - alloc->vm_start); in binder_alloc_get_page()
823 return alloc->pages[index]; in binder_alloc_get_page()
827 * binder_alloc_clear_buf() - zero out buffer
840 unsigned long size; in binder_alloc_clear_buf() local
841 struct page *page; in binder_alloc_clear_buf() local
844 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
846 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_clear_buf()
847 memset_page(page, pgoff, 0, size); in binder_alloc_clear_buf()
848 bytes -= size; in binder_alloc_clear_buf()
849 buffer_offset += size; in binder_alloc_clear_buf()
854 * binder_alloc_free_buf() - free a binder buffer
871 if (buffer->clear_on_free) { in binder_alloc_free_buf()
873 buffer->clear_on_free = false; in binder_alloc_free_buf()
875 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
877 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
882 * binder_alloc_mmap_handler() - map virtual address space for proc
891 * -EBUSY = address space already mapped
892 * -ENOMEM = failed to map memory to given address space
901 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
902 ret = -EINVAL; in binder_alloc_mmap_handler()
903 failure_string = "invalid vma->vm_mm"; in binder_alloc_mmap_handler()
908 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
909 ret = -EBUSY; in binder_alloc_mmap_handler()
913 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
917 alloc->vm_start = vma->vm_start; in binder_alloc_mmap_handler()
919 alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
920 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
922 if (!alloc->pages) { in binder_alloc_mmap_handler()
923 ret = -ENOMEM; in binder_alloc_mmap_handler()
924 failure_string = "alloc page array"; in binder_alloc_mmap_handler()
930 ret = -ENOMEM; in binder_alloc_mmap_handler()
935 buffer->user_data = alloc->vm_start; in binder_alloc_mmap_handler()
936 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
937 buffer->free = 1; in binder_alloc_mmap_handler()
939 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
947 kvfree(alloc->pages); in binder_alloc_mmap_handler()
948 alloc->pages = NULL; in binder_alloc_mmap_handler()
950 alloc->vm_start = 0; in binder_alloc_mmap_handler()
952 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
957 "%s: %d %lx-%lx %s failed %d\n", __func__, in binder_alloc_mmap_handler()
958 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
971 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
972 BUG_ON(alloc->mapped); in binder_alloc_deferred_release()
974 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
978 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
980 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
982 buffer->clear_on_free = false; in binder_alloc_deferred_release()
988 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
989 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
991 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
993 list_del(&buffer->entry); in binder_alloc_deferred_release()
994 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
999 if (alloc->pages) { in binder_alloc_deferred_release()
1002 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
1003 struct page *page; in binder_alloc_deferred_release() local
1006 page = binder_get_installed_page(alloc, i); in binder_alloc_deferred_release()
1007 if (!page) in binder_alloc_deferred_release()
1010 on_lru = list_lru_del(alloc->freelist, in binder_alloc_deferred_release()
1011 page_to_lru(page), in binder_alloc_deferred_release()
1012 page_to_nid(page), in binder_alloc_deferred_release()
1015 "%s: %d: page %d %s\n", in binder_alloc_deferred_release()
1016 __func__, alloc->pid, i, in binder_alloc_deferred_release()
1018 binder_free_page(page); in binder_alloc_deferred_release()
1022 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
1023 kvfree(alloc->pages); in binder_alloc_deferred_release()
1024 if (alloc->mm) in binder_alloc_deferred_release()
1025 mmdrop(alloc->mm); in binder_alloc_deferred_release()
1029 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
1034 * binder_alloc_print_allocated() - print buffer info
1047 guard(mutex)(&alloc->mutex); in binder_alloc_print_allocated()
1048 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in binder_alloc_print_allocated()
1050 seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", in binder_alloc_print_allocated()
1051 buffer->debug_id, in binder_alloc_print_allocated()
1052 buffer->user_data - alloc->vm_start, in binder_alloc_print_allocated()
1053 buffer->data_size, buffer->offsets_size, in binder_alloc_print_allocated()
1054 buffer->extra_buffers_size, in binder_alloc_print_allocated()
1055 buffer->transaction ? "active" : "delivered"); in binder_alloc_print_allocated()
1060 * binder_alloc_print_pages() - print page usage
1067 struct page *page; in binder_alloc_print_pages() local
1073 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
1079 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
1080 page = binder_get_installed_page(alloc, i); in binder_alloc_print_pages()
1081 if (!page) in binder_alloc_print_pages()
1083 else if (list_empty(page_to_lru(page))) in binder_alloc_print_pages()
1089 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
1091 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
1095 * binder_alloc_get_allocated_count() - return count of buffers
1105 guard(mutex)(&alloc->mutex); in binder_alloc_get_allocated_count()
1106 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
1113 * binder_alloc_vma_close() - invalidate address space
1117 * Clears alloc->mapped to prevent new incoming transactions from
1127 * binder_alloc_free_page() - shrinker callback to free pages
1138 __must_hold(&lru->lock) in binder_alloc_free_page()
1141 struct binder_alloc *alloc = mdata->alloc; in binder_alloc_free_page()
1142 struct mm_struct *mm = alloc->mm; in binder_alloc_free_page()
1144 struct page *page_to_free; in binder_alloc_free_page()
1152 index = mdata->page_index; in binder_alloc_free_page()
1153 page_addr = alloc->vm_start + index * PAGE_SIZE; in binder_alloc_free_page()
1155 /* attempt per-vma lock first */ in binder_alloc_free_page()
1165 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
1178 page_to_free = alloc->pages[index]; in binder_alloc_free_page()
1184 spin_unlock(&lru->lock); in binder_alloc_free_page()
1194 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1205 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1228 NULL, sc->nr_to_scan); in binder_shrink_scan()
1236 alloc->pid = current->group_leader->pid; in __binder_alloc_init()
1237 alloc->mm = current->mm; in __binder_alloc_init()
1238 mmgrab(alloc->mm); in __binder_alloc_init()
1239 mutex_init(&alloc->mutex); in __binder_alloc_init()
1240 INIT_LIST_HEAD(&alloc->buffers); in __binder_alloc_init()
1241 alloc->freelist = freelist; in __binder_alloc_init()
1246 * binder_alloc_init() - called by binder_open() for per-proc initialization
1265 binder_shrinker = shrinker_alloc(0, "android-binder"); in binder_alloc_shrinker_init()
1268 return -ENOMEM; in binder_alloc_shrinker_init()
1271 binder_shrinker->count_objects = binder_shrink_count; in binder_alloc_shrinker_init()
1272 binder_shrinker->scan_objects = binder_shrink_scan; in binder_alloc_shrinker_init()
1286 * check_buffer() - verify that buffer/offset is safe to access
1292 * Check that the @offset/@bytes are within the size of the given
1298 * (buffer->free == 0 && buffer->allow_user_free == 0)
1300 * (buffer->free == 0 && buffer->transaction == NULL).
1311 offset <= buffer_size - bytes && in check_buffer()
1313 !buffer->free && in check_buffer()
1314 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1318 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1340 unsigned long size; in binder_alloc_copy_user_to_buffer() local
1342 struct page *page; in binder_alloc_copy_user_to_buffer() local
1346 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1348 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_copy_user_to_buffer()
1349 kptr = kmap_local_page(page) + pgoff; in binder_alloc_copy_user_to_buffer()
1350 ret = copy_from_user(kptr, from, size); in binder_alloc_copy_user_to_buffer()
1353 return bytes - size + ret; in binder_alloc_copy_user_to_buffer()
1354 bytes -= size; in binder_alloc_copy_user_to_buffer()
1355 from += size; in binder_alloc_copy_user_to_buffer()
1356 buffer_offset += size; in binder_alloc_copy_user_to_buffer()
1368 /* All copies must be 32-bit aligned and 32-bit size */ in binder_alloc_do_buffer_copy()
1370 return -EINVAL; in binder_alloc_do_buffer_copy()
1373 unsigned long size; in binder_alloc_do_buffer_copy() local
1374 struct page *page; in binder_alloc_do_buffer_copy() local
1377 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1379 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_do_buffer_copy()
1381 memcpy_to_page(page, pgoff, ptr, size); in binder_alloc_do_buffer_copy()
1383 memcpy_from_page(ptr, page, pgoff, size); in binder_alloc_do_buffer_copy()
1384 bytes -= size; in binder_alloc_do_buffer_copy()
1386 ptr = ptr + size; in binder_alloc_do_buffer_copy()
1387 buffer_offset += size; in binder_alloc_do_buffer_copy()