xref: /linux/drivers/android/binder_alloc.c (revision bdfa89c489296f092751fcee23b5d171c9fdc7f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 
29 static struct list_lru binder_freelist;
30 
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32 
33 enum {
34 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
35 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40 
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42 		   uint, 0644);
43 
44 #define binder_alloc_debug(mask, x...) \
45 	do { \
46 		if (binder_alloc_debug_mask & mask) \
47 			pr_info_ratelimited(x); \
48 	} while (0)
49 
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54 
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59 
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61 				       struct binder_buffer *buffer)
62 {
63 	if (list_is_last(&buffer->entry, &alloc->buffers))
64 		return alloc->vm_start + alloc->buffer_size - buffer->user_data;
65 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67 
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69 				      struct binder_buffer *new_buffer)
70 {
71 	struct rb_node **p = &alloc->free_buffers.rb_node;
72 	struct rb_node *parent = NULL;
73 	struct binder_buffer *buffer;
74 	size_t buffer_size;
75 	size_t new_buffer_size;
76 
77 	BUG_ON(!new_buffer->free);
78 
79 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80 
81 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82 		     "%d: add free buffer, size %zd, at %pK\n",
83 		      alloc->pid, new_buffer_size, new_buffer);
84 
85 	while (*p) {
86 		parent = *p;
87 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
88 		BUG_ON(!buffer->free);
89 
90 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
91 
92 		if (new_buffer_size < buffer_size)
93 			p = &parent->rb_left;
94 		else
95 			p = &parent->rb_right;
96 	}
97 	rb_link_node(&new_buffer->rb_node, parent, p);
98 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100 
101 static void binder_insert_allocated_buffer_locked(
102 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
105 	struct rb_node *parent = NULL;
106 	struct binder_buffer *buffer;
107 
108 	BUG_ON(new_buffer->free);
109 
110 	while (*p) {
111 		parent = *p;
112 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
113 		BUG_ON(buffer->free);
114 
115 		if (new_buffer->user_data < buffer->user_data)
116 			p = &parent->rb_left;
117 		else if (new_buffer->user_data > buffer->user_data)
118 			p = &parent->rb_right;
119 		else
120 			BUG();
121 	}
122 	rb_link_node(&new_buffer->rb_node, parent, p);
123 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125 
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127 		struct binder_alloc *alloc,
128 		unsigned long user_ptr)
129 {
130 	struct rb_node *n = alloc->allocated_buffers.rb_node;
131 	struct binder_buffer *buffer;
132 
133 	while (n) {
134 		buffer = rb_entry(n, struct binder_buffer, rb_node);
135 		BUG_ON(buffer->free);
136 
137 		if (user_ptr < buffer->user_data) {
138 			n = n->rb_left;
139 		} else if (user_ptr > buffer->user_data) {
140 			n = n->rb_right;
141 		} else {
142 			/*
143 			 * Guard against user threads attempting to
144 			 * free the buffer when in use by kernel or
145 			 * after it's already been freed.
146 			 */
147 			if (!buffer->allow_user_free)
148 				return ERR_PTR(-EPERM);
149 			buffer->allow_user_free = 0;
150 			return buffer;
151 		}
152 	}
153 	return NULL;
154 }
155 
156 /**
157  * binder_alloc_prepare_to_free() - get buffer given user ptr
158  * @alloc:	binder_alloc for this proc
159  * @user_ptr:	User pointer to buffer data
160  *
161  * Validate userspace pointer to buffer data and return buffer corresponding to
162  * that user pointer. Search the rb tree for buffer that matches user data
163  * pointer.
164  *
165  * Return:	Pointer to buffer or NULL
166  */
167 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
168 						   unsigned long user_ptr)
169 {
170 	guard(mutex)(&alloc->mutex);
171 	return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
172 }
173 
174 static inline void
175 binder_set_installed_page(struct binder_alloc *alloc,
176 			  unsigned long index,
177 			  struct page *page)
178 {
179 	/* Pairs with acquire in binder_get_installed_page() */
180 	smp_store_release(&alloc->pages[index], page);
181 }
182 
183 static inline struct page *
184 binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
185 {
186 	/* Pairs with release in binder_set_installed_page() */
187 	return smp_load_acquire(&alloc->pages[index]);
188 }
189 
190 static void binder_lru_freelist_add(struct binder_alloc *alloc,
191 				    unsigned long start, unsigned long end)
192 {
193 	unsigned long page_addr;
194 	struct page *page;
195 
196 	trace_binder_update_page_range(alloc, false, start, end);
197 
198 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
199 		size_t index;
200 		int ret;
201 
202 		index = (page_addr - alloc->vm_start) / PAGE_SIZE;
203 		page = binder_get_installed_page(alloc, index);
204 		if (!page)
205 			continue;
206 
207 		trace_binder_free_lru_start(alloc, index);
208 
209 		ret = list_lru_add(alloc->freelist,
210 				   page_to_lru(page),
211 				   page_to_nid(page),
212 				   NULL);
213 		WARN_ON(!ret);
214 
215 		trace_binder_free_lru_end(alloc, index);
216 	}
217 }
218 
219 static inline
220 void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
221 {
222 	/* pairs with smp_load_acquire in binder_alloc_is_mapped() */
223 	smp_store_release(&alloc->mapped, state);
224 }
225 
226 static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
227 {
228 	/* pairs with smp_store_release in binder_alloc_set_mapped() */
229 	return smp_load_acquire(&alloc->mapped);
230 }
231 
232 static struct page *binder_page_lookup(struct binder_alloc *alloc,
233 				       unsigned long addr)
234 {
235 	struct mm_struct *mm = alloc->mm;
236 	struct page *page;
237 	long npages = 0;
238 
239 	/*
240 	 * Find an existing page in the remote mm. If missing,
241 	 * don't attempt to fault-in just propagate an error.
242 	 */
243 	mmap_read_lock(mm);
244 	if (binder_alloc_is_mapped(alloc))
245 		npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
246 					       &page, NULL);
247 	mmap_read_unlock(mm);
248 
249 	return npages > 0 ? page : NULL;
250 }
251 
252 static int binder_page_insert(struct binder_alloc *alloc,
253 			      unsigned long addr,
254 			      struct page *page)
255 {
256 	struct mm_struct *mm = alloc->mm;
257 	struct vm_area_struct *vma;
258 	int ret = -ESRCH;
259 
260 	/* attempt per-vma lock first */
261 	vma = lock_vma_under_rcu(mm, addr);
262 	if (vma) {
263 		if (binder_alloc_is_mapped(alloc))
264 			ret = vm_insert_page(vma, addr, page);
265 		vma_end_read(vma);
266 		return ret;
267 	}
268 
269 	/* fall back to mmap_lock */
270 	mmap_read_lock(mm);
271 	vma = vma_lookup(mm, addr);
272 	if (vma && binder_alloc_is_mapped(alloc))
273 		ret = vm_insert_page(vma, addr, page);
274 	mmap_read_unlock(mm);
275 
276 	return ret;
277 }
278 
279 static struct page *binder_page_alloc(struct binder_alloc *alloc,
280 				      unsigned long index)
281 {
282 	struct binder_shrinker_mdata *mdata;
283 	struct page *page;
284 
285 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
286 	if (!page)
287 		return NULL;
288 
289 	/* allocate and install shrinker metadata under page->private */
290 	mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
291 	if (!mdata) {
292 		__free_page(page);
293 		return NULL;
294 	}
295 
296 	mdata->alloc = alloc;
297 	mdata->page_index = index;
298 	INIT_LIST_HEAD(&mdata->lru);
299 	set_page_private(page, (unsigned long)mdata);
300 
301 	return page;
302 }
303 
304 static void binder_free_page(struct page *page)
305 {
306 	kfree((struct binder_shrinker_mdata *)page_private(page));
307 	__free_page(page);
308 }
309 
310 static int binder_install_single_page(struct binder_alloc *alloc,
311 				      unsigned long index,
312 				      unsigned long addr)
313 {
314 	struct page *page;
315 	int ret;
316 
317 	if (!mmget_not_zero(alloc->mm))
318 		return -ESRCH;
319 
320 	page = binder_page_alloc(alloc, index);
321 	if (!page) {
322 		ret = -ENOMEM;
323 		goto out;
324 	}
325 
326 	ret = binder_page_insert(alloc, addr, page);
327 	switch (ret) {
328 	case -EBUSY:
329 		/*
330 		 * EBUSY is ok. Someone installed the pte first but the
331 		 * alloc->pages[index] has not been updated yet. Discard
332 		 * our page and look up the one already installed.
333 		 */
334 		ret = 0;
335 		binder_free_page(page);
336 		page = binder_page_lookup(alloc, addr);
337 		if (!page) {
338 			pr_err("%d: failed to find page at offset %lx\n",
339 			       alloc->pid, addr - alloc->vm_start);
340 			ret = -ESRCH;
341 			break;
342 		}
343 		fallthrough;
344 	case 0:
345 		/* Mark page installation complete and safe to use */
346 		binder_set_installed_page(alloc, index, page);
347 		break;
348 	default:
349 		binder_free_page(page);
350 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
351 		       alloc->pid, __func__, addr - alloc->vm_start, ret);
352 		break;
353 	}
354 out:
355 	mmput_async(alloc->mm);
356 	return ret;
357 }
358 
359 static int binder_install_buffer_pages(struct binder_alloc *alloc,
360 				       struct binder_buffer *buffer,
361 				       size_t size)
362 {
363 	unsigned long start, final;
364 	unsigned long page_addr;
365 
366 	start = buffer->user_data & PAGE_MASK;
367 	final = PAGE_ALIGN(buffer->user_data + size);
368 
369 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
370 		unsigned long index;
371 		int ret;
372 
373 		index = (page_addr - alloc->vm_start) / PAGE_SIZE;
374 		if (binder_get_installed_page(alloc, index))
375 			continue;
376 
377 		trace_binder_alloc_page_start(alloc, index);
378 
379 		ret = binder_install_single_page(alloc, index, page_addr);
380 		if (ret)
381 			return ret;
382 
383 		trace_binder_alloc_page_end(alloc, index);
384 	}
385 
386 	return 0;
387 }
388 
389 /* The range of pages should exclude those shared with other buffers */
390 static void binder_lru_freelist_del(struct binder_alloc *alloc,
391 				    unsigned long start, unsigned long end)
392 {
393 	unsigned long page_addr;
394 	struct page *page;
395 
396 	trace_binder_update_page_range(alloc, true, start, end);
397 
398 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
399 		unsigned long index;
400 		bool on_lru;
401 
402 		index = (page_addr - alloc->vm_start) / PAGE_SIZE;
403 		page = binder_get_installed_page(alloc, index);
404 
405 		if (page) {
406 			trace_binder_alloc_lru_start(alloc, index);
407 
408 			on_lru = list_lru_del(alloc->freelist,
409 					      page_to_lru(page),
410 					      page_to_nid(page),
411 					      NULL);
412 			WARN_ON(!on_lru);
413 
414 			trace_binder_alloc_lru_end(alloc, index);
415 			continue;
416 		}
417 
418 		if (index + 1 > alloc->pages_high)
419 			alloc->pages_high = index + 1;
420 	}
421 }
422 
423 static void debug_no_space_locked(struct binder_alloc *alloc)
424 {
425 	size_t largest_alloc_size = 0;
426 	struct binder_buffer *buffer;
427 	size_t allocated_buffers = 0;
428 	size_t largest_free_size = 0;
429 	size_t total_alloc_size = 0;
430 	size_t total_free_size = 0;
431 	size_t free_buffers = 0;
432 	size_t buffer_size;
433 	struct rb_node *n;
434 
435 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
436 		buffer = rb_entry(n, struct binder_buffer, rb_node);
437 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
438 		allocated_buffers++;
439 		total_alloc_size += buffer_size;
440 		if (buffer_size > largest_alloc_size)
441 			largest_alloc_size = buffer_size;
442 	}
443 
444 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
445 		buffer = rb_entry(n, struct binder_buffer, rb_node);
446 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
447 		free_buffers++;
448 		total_free_size += buffer_size;
449 		if (buffer_size > largest_free_size)
450 			largest_free_size = buffer_size;
451 	}
452 
453 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
454 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
455 			   total_alloc_size, allocated_buffers,
456 			   largest_alloc_size, total_free_size,
457 			   free_buffers, largest_free_size);
458 }
459 
460 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
461 {
462 	/*
463 	 * Find the amount and size of buffers allocated by the current caller;
464 	 * The idea is that once we cross the threshold, whoever is responsible
465 	 * for the low async space is likely to try to send another async txn,
466 	 * and at some point we'll catch them in the act. This is more efficient
467 	 * than keeping a map per pid.
468 	 */
469 	struct binder_buffer *buffer;
470 	size_t total_alloc_size = 0;
471 	int pid = current->tgid;
472 	size_t num_buffers = 0;
473 	struct rb_node *n;
474 
475 	/*
476 	 * Only start detecting spammers once we have less than 20% of async
477 	 * space left (which is less than 10% of total buffer size).
478 	 */
479 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
480 		alloc->oneway_spam_detected = false;
481 		return false;
482 	}
483 
484 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
485 		 n = rb_next(n)) {
486 		buffer = rb_entry(n, struct binder_buffer, rb_node);
487 		if (buffer->pid != pid)
488 			continue;
489 		if (!buffer->async_transaction)
490 			continue;
491 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
492 		num_buffers++;
493 	}
494 
495 	/*
496 	 * Warn if this pid has more than 50 transactions, or more than 50% of
497 	 * async space (which is 25% of total buffer size). Oneway spam is only
498 	 * detected when the threshold is exceeded.
499 	 */
500 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
501 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
502 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
503 			      alloc->pid, pid, num_buffers, total_alloc_size);
504 		if (!alloc->oneway_spam_detected) {
505 			alloc->oneway_spam_detected = true;
506 			return true;
507 		}
508 	}
509 	return false;
510 }
511 
512 /* Callers preallocate @new_buffer, it is freed by this function if unused */
513 static struct binder_buffer *binder_alloc_new_buf_locked(
514 				struct binder_alloc *alloc,
515 				struct binder_buffer *new_buffer,
516 				size_t size,
517 				int is_async)
518 {
519 	struct rb_node *n = alloc->free_buffers.rb_node;
520 	struct rb_node *best_fit = NULL;
521 	struct binder_buffer *buffer;
522 	unsigned long next_used_page;
523 	unsigned long curr_last_page;
524 	size_t buffer_size;
525 
526 	if (is_async && alloc->free_async_space < size) {
527 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
528 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
529 			      alloc->pid, size);
530 		buffer = ERR_PTR(-ENOSPC);
531 		goto out;
532 	}
533 
534 	while (n) {
535 		buffer = rb_entry(n, struct binder_buffer, rb_node);
536 		BUG_ON(!buffer->free);
537 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
538 
539 		if (size < buffer_size) {
540 			best_fit = n;
541 			n = n->rb_left;
542 		} else if (size > buffer_size) {
543 			n = n->rb_right;
544 		} else {
545 			best_fit = n;
546 			break;
547 		}
548 	}
549 
550 	if (unlikely(!best_fit)) {
551 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
552 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
553 				   alloc->pid, size);
554 		debug_no_space_locked(alloc);
555 		buffer = ERR_PTR(-ENOSPC);
556 		goto out;
557 	}
558 
559 	if (buffer_size != size) {
560 		/* Found an oversized buffer and needs to be split */
561 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
562 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
563 
564 		WARN_ON(n || buffer_size == size);
565 		new_buffer->user_data = buffer->user_data + size;
566 		list_add(&new_buffer->entry, &buffer->entry);
567 		new_buffer->free = 1;
568 		binder_insert_free_buffer(alloc, new_buffer);
569 		new_buffer = NULL;
570 	}
571 
572 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
573 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
574 		      alloc->pid, size, buffer, buffer_size);
575 
576 	/*
577 	 * Now we remove the pages from the freelist. A clever calculation
578 	 * with buffer_size determines if the last page is shared with an
579 	 * adjacent in-use buffer. In such case, the page has been already
580 	 * removed from the freelist so we trim our range short.
581 	 */
582 	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
583 	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
584 	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
585 				min(next_used_page, curr_last_page));
586 
587 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
588 	buffer->free = 0;
589 	buffer->allow_user_free = 0;
590 	binder_insert_allocated_buffer_locked(alloc, buffer);
591 	buffer->async_transaction = is_async;
592 	buffer->oneway_spam_suspect = false;
593 	if (is_async) {
594 		alloc->free_async_space -= size;
595 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
596 			     "%d: binder_alloc_buf size %zd async free %zd\n",
597 			      alloc->pid, size, alloc->free_async_space);
598 		if (debug_low_async_space_locked(alloc))
599 			buffer->oneway_spam_suspect = true;
600 	}
601 
602 out:
603 	/* Discard possibly unused new_buffer */
604 	kfree(new_buffer);
605 	return buffer;
606 }
607 
608 /* Calculate the sanitized total size, returns 0 for invalid request */
609 static inline size_t sanitized_size(size_t data_size,
610 				    size_t offsets_size,
611 				    size_t extra_buffers_size)
612 {
613 	size_t total, tmp;
614 
615 	/* Align to pointer size and check for overflows */
616 	tmp = ALIGN(data_size, sizeof(void *)) +
617 		ALIGN(offsets_size, sizeof(void *));
618 	if (tmp < data_size || tmp < offsets_size)
619 		return 0;
620 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
621 	if (total < tmp || total < extra_buffers_size)
622 		return 0;
623 
624 	/* Pad 0-sized buffers so they get a unique address */
625 	total = max(total, sizeof(void *));
626 
627 	return total;
628 }
629 
630 /**
631  * binder_alloc_new_buf() - Allocate a new binder buffer
632  * @alloc:              binder_alloc for this proc
633  * @data_size:          size of user data buffer
634  * @offsets_size:       user specified buffer offset
635  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
636  * @is_async:           buffer for async transaction
637  *
638  * Allocate a new buffer given the requested sizes. Returns
639  * the kernel version of the buffer pointer. The size allocated
640  * is the sum of the three given sizes (each rounded up to
641  * pointer-sized boundary)
642  *
643  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
644  */
645 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
646 					   size_t data_size,
647 					   size_t offsets_size,
648 					   size_t extra_buffers_size,
649 					   int is_async)
650 {
651 	struct binder_buffer *buffer, *next;
652 	size_t size;
653 	int ret;
654 
655 	/* Check binder_alloc is fully initialized */
656 	if (!binder_alloc_is_mapped(alloc)) {
657 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
658 				   "%d: binder_alloc_buf, no vma\n",
659 				   alloc->pid);
660 		return ERR_PTR(-ESRCH);
661 	}
662 
663 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
664 	if (unlikely(!size)) {
665 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
666 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
667 				   alloc->pid, data_size, offsets_size,
668 				   extra_buffers_size);
669 		return ERR_PTR(-EINVAL);
670 	}
671 
672 	/* Preallocate the next buffer */
673 	next = kzalloc(sizeof(*next), GFP_KERNEL);
674 	if (!next)
675 		return ERR_PTR(-ENOMEM);
676 
677 	mutex_lock(&alloc->mutex);
678 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
679 	if (IS_ERR(buffer)) {
680 		mutex_unlock(&alloc->mutex);
681 		goto out;
682 	}
683 
684 	buffer->data_size = data_size;
685 	buffer->offsets_size = offsets_size;
686 	buffer->extra_buffers_size = extra_buffers_size;
687 	buffer->pid = current->tgid;
688 	mutex_unlock(&alloc->mutex);
689 
690 	ret = binder_install_buffer_pages(alloc, buffer, size);
691 	if (ret) {
692 		binder_alloc_free_buf(alloc, buffer);
693 		buffer = ERR_PTR(ret);
694 	}
695 out:
696 	return buffer;
697 }
698 
699 static unsigned long buffer_start_page(struct binder_buffer *buffer)
700 {
701 	return buffer->user_data & PAGE_MASK;
702 }
703 
704 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
705 {
706 	return (buffer->user_data - 1) & PAGE_MASK;
707 }
708 
709 static void binder_delete_free_buffer(struct binder_alloc *alloc,
710 				      struct binder_buffer *buffer)
711 {
712 	struct binder_buffer *prev, *next;
713 
714 	if (PAGE_ALIGNED(buffer->user_data))
715 		goto skip_freelist;
716 
717 	BUG_ON(alloc->buffers.next == &buffer->entry);
718 	prev = binder_buffer_prev(buffer);
719 	BUG_ON(!prev->free);
720 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
721 		goto skip_freelist;
722 
723 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
724 		next = binder_buffer_next(buffer);
725 		if (buffer_start_page(next) == buffer_start_page(buffer))
726 			goto skip_freelist;
727 	}
728 
729 	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
730 				buffer_start_page(buffer) + PAGE_SIZE);
731 skip_freelist:
732 	list_del(&buffer->entry);
733 	kfree(buffer);
734 }
735 
736 static void binder_free_buf_locked(struct binder_alloc *alloc,
737 				   struct binder_buffer *buffer)
738 {
739 	size_t size, buffer_size;
740 
741 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
742 
743 	size = ALIGN(buffer->data_size, sizeof(void *)) +
744 		ALIGN(buffer->offsets_size, sizeof(void *)) +
745 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
746 
747 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
748 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
749 		      alloc->pid, buffer, size, buffer_size);
750 
751 	BUG_ON(buffer->free);
752 	BUG_ON(size > buffer_size);
753 	BUG_ON(buffer->transaction != NULL);
754 	BUG_ON(buffer->user_data < alloc->vm_start);
755 	BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
756 
757 	if (buffer->async_transaction) {
758 		alloc->free_async_space += buffer_size;
759 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
760 			     "%d: binder_free_buf size %zd async free %zd\n",
761 			      alloc->pid, size, alloc->free_async_space);
762 	}
763 
764 	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
765 				(buffer->user_data + buffer_size) & PAGE_MASK);
766 
767 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
768 	buffer->free = 1;
769 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
770 		struct binder_buffer *next = binder_buffer_next(buffer);
771 
772 		if (next->free) {
773 			rb_erase(&next->rb_node, &alloc->free_buffers);
774 			binder_delete_free_buffer(alloc, next);
775 		}
776 	}
777 	if (alloc->buffers.next != &buffer->entry) {
778 		struct binder_buffer *prev = binder_buffer_prev(buffer);
779 
780 		if (prev->free) {
781 			binder_delete_free_buffer(alloc, buffer);
782 			rb_erase(&prev->rb_node, &alloc->free_buffers);
783 			buffer = prev;
784 		}
785 	}
786 	binder_insert_free_buffer(alloc, buffer);
787 }
788 
789 /**
790  * binder_alloc_get_page() - get kernel pointer for given buffer offset
791  * @alloc: binder_alloc for this proc
792  * @buffer: binder buffer to be accessed
793  * @buffer_offset: offset into @buffer data
794  * @pgoffp: address to copy final page offset to
795  *
796  * Lookup the struct page corresponding to the address
797  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
798  * NULL, the byte-offset into the page is written there.
799  *
800  * The caller is responsible to ensure that the offset points
801  * to a valid address within the @buffer and that @buffer is
802  * not freeable by the user. Since it can't be freed, we are
803  * guaranteed that the corresponding elements of @alloc->pages[]
804  * cannot change.
805  *
806  * Return: struct page
807  */
808 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
809 					  struct binder_buffer *buffer,
810 					  binder_size_t buffer_offset,
811 					  pgoff_t *pgoffp)
812 {
813 	binder_size_t buffer_space_offset = buffer_offset +
814 		(buffer->user_data - alloc->vm_start);
815 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
816 	size_t index = buffer_space_offset >> PAGE_SHIFT;
817 
818 	*pgoffp = pgoff;
819 
820 	return alloc->pages[index];
821 }
822 
823 /**
824  * binder_alloc_clear_buf() - zero out buffer
825  * @alloc: binder_alloc for this proc
826  * @buffer: binder buffer to be cleared
827  *
828  * memset the given buffer to 0
829  */
830 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
831 				   struct binder_buffer *buffer)
832 {
833 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
834 	binder_size_t buffer_offset = 0;
835 
836 	while (bytes) {
837 		unsigned long size;
838 		struct page *page;
839 		pgoff_t pgoff;
840 
841 		page = binder_alloc_get_page(alloc, buffer,
842 					     buffer_offset, &pgoff);
843 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
844 		memset_page(page, pgoff, 0, size);
845 		bytes -= size;
846 		buffer_offset += size;
847 	}
848 }
849 
850 /**
851  * binder_alloc_free_buf() - free a binder buffer
852  * @alloc:	binder_alloc for this proc
853  * @buffer:	kernel pointer to buffer
854  *
855  * Free the buffer allocated via binder_alloc_new_buf()
856  */
857 void binder_alloc_free_buf(struct binder_alloc *alloc,
858 			    struct binder_buffer *buffer)
859 {
860 	/*
861 	 * We could eliminate the call to binder_alloc_clear_buf()
862 	 * from binder_alloc_deferred_release() by moving this to
863 	 * binder_free_buf_locked(). However, that could
864 	 * increase contention for the alloc mutex if clear_on_free
865 	 * is used frequently for large buffers. The mutex is not
866 	 * needed for correctness here.
867 	 */
868 	if (buffer->clear_on_free) {
869 		binder_alloc_clear_buf(alloc, buffer);
870 		buffer->clear_on_free = false;
871 	}
872 	mutex_lock(&alloc->mutex);
873 	binder_free_buf_locked(alloc, buffer);
874 	mutex_unlock(&alloc->mutex);
875 }
876 
877 /**
878  * binder_alloc_mmap_handler() - map virtual address space for proc
879  * @alloc:	alloc structure for this proc
880  * @vma:	vma passed to mmap()
881  *
882  * Called by binder_mmap() to initialize the space specified in
883  * vma for allocating binder buffers
884  *
885  * Return:
886  *      0 = success
887  *      -EBUSY = address space already mapped
888  *      -ENOMEM = failed to map memory to given address space
889  */
890 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
891 			      struct vm_area_struct *vma)
892 {
893 	struct binder_buffer *buffer;
894 	const char *failure_string;
895 	int ret;
896 
897 	if (unlikely(vma->vm_mm != alloc->mm)) {
898 		ret = -EINVAL;
899 		failure_string = "invalid vma->vm_mm";
900 		goto err_invalid_mm;
901 	}
902 
903 	mutex_lock(&binder_alloc_mmap_lock);
904 	if (alloc->buffer_size) {
905 		ret = -EBUSY;
906 		failure_string = "already mapped";
907 		goto err_already_mapped;
908 	}
909 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
910 				   SZ_4M);
911 	mutex_unlock(&binder_alloc_mmap_lock);
912 
913 	alloc->vm_start = vma->vm_start;
914 
915 	alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
916 				sizeof(alloc->pages[0]),
917 				GFP_KERNEL);
918 	if (!alloc->pages) {
919 		ret = -ENOMEM;
920 		failure_string = "alloc page array";
921 		goto err_alloc_pages_failed;
922 	}
923 
924 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
925 	if (!buffer) {
926 		ret = -ENOMEM;
927 		failure_string = "alloc buffer struct";
928 		goto err_alloc_buf_struct_failed;
929 	}
930 
931 	buffer->user_data = alloc->vm_start;
932 	list_add(&buffer->entry, &alloc->buffers);
933 	buffer->free = 1;
934 	binder_insert_free_buffer(alloc, buffer);
935 	alloc->free_async_space = alloc->buffer_size / 2;
936 
937 	/* Signal binder_alloc is fully initialized */
938 	binder_alloc_set_mapped(alloc, true);
939 
940 	return 0;
941 
942 err_alloc_buf_struct_failed:
943 	kvfree(alloc->pages);
944 	alloc->pages = NULL;
945 err_alloc_pages_failed:
946 	alloc->vm_start = 0;
947 	mutex_lock(&binder_alloc_mmap_lock);
948 	alloc->buffer_size = 0;
949 err_already_mapped:
950 	mutex_unlock(&binder_alloc_mmap_lock);
951 err_invalid_mm:
952 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
953 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
954 			   alloc->pid, vma->vm_start, vma->vm_end,
955 			   failure_string, ret);
956 	return ret;
957 }
958 
959 
960 void binder_alloc_deferred_release(struct binder_alloc *alloc)
961 {
962 	struct rb_node *n;
963 	int buffers, page_count;
964 	struct binder_buffer *buffer;
965 
966 	buffers = 0;
967 	mutex_lock(&alloc->mutex);
968 	BUG_ON(alloc->mapped);
969 
970 	while ((n = rb_first(&alloc->allocated_buffers))) {
971 		buffer = rb_entry(n, struct binder_buffer, rb_node);
972 
973 		/* Transaction should already have been freed */
974 		BUG_ON(buffer->transaction);
975 
976 		if (buffer->clear_on_free) {
977 			binder_alloc_clear_buf(alloc, buffer);
978 			buffer->clear_on_free = false;
979 		}
980 		binder_free_buf_locked(alloc, buffer);
981 		buffers++;
982 	}
983 
984 	while (!list_empty(&alloc->buffers)) {
985 		buffer = list_first_entry(&alloc->buffers,
986 					  struct binder_buffer, entry);
987 		WARN_ON(!buffer->free);
988 
989 		list_del(&buffer->entry);
990 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
991 		kfree(buffer);
992 	}
993 
994 	page_count = 0;
995 	if (alloc->pages) {
996 		int i;
997 
998 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
999 			struct page *page;
1000 			bool on_lru;
1001 
1002 			page = binder_get_installed_page(alloc, i);
1003 			if (!page)
1004 				continue;
1005 
1006 			on_lru = list_lru_del(alloc->freelist,
1007 					      page_to_lru(page),
1008 					      page_to_nid(page),
1009 					      NULL);
1010 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
1011 				     "%s: %d: page %d %s\n",
1012 				     __func__, alloc->pid, i,
1013 				     on_lru ? "on lru" : "active");
1014 			binder_free_page(page);
1015 			page_count++;
1016 		}
1017 	}
1018 	mutex_unlock(&alloc->mutex);
1019 	kvfree(alloc->pages);
1020 	if (alloc->mm)
1021 		mmdrop(alloc->mm);
1022 
1023 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
1024 		     "%s: %d buffers %d, pages %d\n",
1025 		     __func__, alloc->pid, buffers, page_count);
1026 }
1027 
1028 /**
1029  * binder_alloc_print_allocated() - print buffer info
1030  * @m:     seq_file for output via seq_printf()
1031  * @alloc: binder_alloc for this proc
1032  *
1033  * Prints information about every buffer associated with
1034  * the binder_alloc state to the given seq_file
1035  */
1036 void binder_alloc_print_allocated(struct seq_file *m,
1037 				  struct binder_alloc *alloc)
1038 {
1039 	struct binder_buffer *buffer;
1040 	struct rb_node *n;
1041 
1042 	guard(mutex)(&alloc->mutex);
1043 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
1044 		buffer = rb_entry(n, struct binder_buffer, rb_node);
1045 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
1046 			   buffer->debug_id,
1047 			   buffer->user_data - alloc->vm_start,
1048 			   buffer->data_size, buffer->offsets_size,
1049 			   buffer->extra_buffers_size,
1050 			   buffer->transaction ? "active" : "delivered");
1051 	}
1052 }
1053 
1054 /**
1055  * binder_alloc_print_pages() - print page usage
1056  * @m:     seq_file for output via seq_printf()
1057  * @alloc: binder_alloc for this proc
1058  */
1059 void binder_alloc_print_pages(struct seq_file *m,
1060 			      struct binder_alloc *alloc)
1061 {
1062 	struct page *page;
1063 	int i;
1064 	int active = 0;
1065 	int lru = 0;
1066 	int free = 0;
1067 
1068 	mutex_lock(&alloc->mutex);
1069 	/*
1070 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1071 	 * read inconsistent state.
1072 	 */
1073 	if (binder_alloc_is_mapped(alloc)) {
1074 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1075 			page = binder_get_installed_page(alloc, i);
1076 			if (!page)
1077 				free++;
1078 			else if (list_empty(page_to_lru(page)))
1079 				active++;
1080 			else
1081 				lru++;
1082 		}
1083 	}
1084 	mutex_unlock(&alloc->mutex);
1085 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1086 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1087 }
1088 
1089 /**
1090  * binder_alloc_get_allocated_count() - return count of buffers
1091  * @alloc: binder_alloc for this proc
1092  *
1093  * Return: count of allocated buffers
1094  */
1095 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1096 {
1097 	struct rb_node *n;
1098 	int count = 0;
1099 
1100 	guard(mutex)(&alloc->mutex);
1101 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1102 		count++;
1103 	return count;
1104 }
1105 
1106 
1107 /**
1108  * binder_alloc_vma_close() - invalidate address space
1109  * @alloc: binder_alloc for this proc
1110  *
1111  * Called from binder_vma_close() when releasing address space.
1112  * Clears alloc->mapped to prevent new incoming transactions from
1113  * allocating more buffers.
1114  */
1115 void binder_alloc_vma_close(struct binder_alloc *alloc)
1116 {
1117 	binder_alloc_set_mapped(alloc, false);
1118 }
1119 
1120 /**
1121  * binder_alloc_free_page() - shrinker callback to free pages
1122  * @item:   item to free
1123  * @lru:    list_lru instance of the item
1124  * @cb_arg: callback argument
1125  *
1126  * Called from list_lru_walk() in binder_shrink_scan() to free
1127  * up pages when the system is under memory pressure.
1128  */
1129 enum lru_status binder_alloc_free_page(struct list_head *item,
1130 				       struct list_lru_one *lru,
1131 				       void *cb_arg)
1132 	__must_hold(&lru->lock)
1133 {
1134 	struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
1135 	struct binder_alloc *alloc = mdata->alloc;
1136 	struct mm_struct *mm = alloc->mm;
1137 	struct vm_area_struct *vma;
1138 	struct page *page_to_free;
1139 	unsigned long page_addr;
1140 	int mm_locked = 0;
1141 	size_t index;
1142 
1143 	if (!mmget_not_zero(mm))
1144 		goto err_mmget;
1145 
1146 	index = mdata->page_index;
1147 	page_addr = alloc->vm_start + index * PAGE_SIZE;
1148 
1149 	/* attempt per-vma lock first */
1150 	vma = lock_vma_under_rcu(mm, page_addr);
1151 	if (!vma) {
1152 		/* fall back to mmap_lock */
1153 		if (!mmap_read_trylock(mm))
1154 			goto err_mmap_read_lock_failed;
1155 		mm_locked = 1;
1156 		vma = vma_lookup(mm, page_addr);
1157 	}
1158 
1159 	if (!mutex_trylock(&alloc->mutex))
1160 		goto err_get_alloc_mutex_failed;
1161 
1162 	/*
1163 	 * Since a binder_alloc can only be mapped once, we ensure
1164 	 * the vma corresponds to this mapping by checking whether
1165 	 * the binder_alloc is still mapped.
1166 	 */
1167 	if (vma && !binder_alloc_is_mapped(alloc))
1168 		goto err_invalid_vma;
1169 
1170 	trace_binder_unmap_kernel_start(alloc, index);
1171 
1172 	page_to_free = alloc->pages[index];
1173 	binder_set_installed_page(alloc, index, NULL);
1174 
1175 	trace_binder_unmap_kernel_end(alloc, index);
1176 
1177 	list_lru_isolate(lru, item);
1178 	spin_unlock(&lru->lock);
1179 
1180 	if (vma) {
1181 		trace_binder_unmap_user_start(alloc, index);
1182 
1183 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1184 
1185 		trace_binder_unmap_user_end(alloc, index);
1186 	}
1187 
1188 	mutex_unlock(&alloc->mutex);
1189 	if (mm_locked)
1190 		mmap_read_unlock(mm);
1191 	else
1192 		vma_end_read(vma);
1193 	mmput_async(mm);
1194 	binder_free_page(page_to_free);
1195 
1196 	return LRU_REMOVED_RETRY;
1197 
1198 err_invalid_vma:
1199 	mutex_unlock(&alloc->mutex);
1200 err_get_alloc_mutex_failed:
1201 	if (mm_locked)
1202 		mmap_read_unlock(mm);
1203 	else
1204 		vma_end_read(vma);
1205 err_mmap_read_lock_failed:
1206 	mmput_async(mm);
1207 err_mmget:
1208 	return LRU_SKIP;
1209 }
1210 
1211 static unsigned long
1212 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1213 {
1214 	return list_lru_count(&binder_freelist);
1215 }
1216 
1217 static unsigned long
1218 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1219 {
1220 	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1221 			    NULL, sc->nr_to_scan);
1222 }
1223 
1224 static struct shrinker *binder_shrinker;
1225 
1226 static void __binder_alloc_init(struct binder_alloc *alloc,
1227 				struct list_lru *freelist)
1228 {
1229 	alloc->pid = current->group_leader->pid;
1230 	alloc->mm = current->mm;
1231 	mmgrab(alloc->mm);
1232 	mutex_init(&alloc->mutex);
1233 	INIT_LIST_HEAD(&alloc->buffers);
1234 	alloc->freelist = freelist;
1235 }
1236 
1237 /**
1238  * binder_alloc_init() - called by binder_open() for per-proc initialization
1239  * @alloc: binder_alloc for this proc
1240  *
1241  * Called from binder_open() to initialize binder_alloc fields for
1242  * new binder proc
1243  */
1244 void binder_alloc_init(struct binder_alloc *alloc)
1245 {
1246 	__binder_alloc_init(alloc, &binder_freelist);
1247 }
1248 
1249 int binder_alloc_shrinker_init(void)
1250 {
1251 	int ret;
1252 
1253 	ret = list_lru_init(&binder_freelist);
1254 	if (ret)
1255 		return ret;
1256 
1257 	binder_shrinker = shrinker_alloc(0, "android-binder");
1258 	if (!binder_shrinker) {
1259 		list_lru_destroy(&binder_freelist);
1260 		return -ENOMEM;
1261 	}
1262 
1263 	binder_shrinker->count_objects = binder_shrink_count;
1264 	binder_shrinker->scan_objects = binder_shrink_scan;
1265 
1266 	shrinker_register(binder_shrinker);
1267 
1268 	return 0;
1269 }
1270 
1271 void binder_alloc_shrinker_exit(void)
1272 {
1273 	shrinker_free(binder_shrinker);
1274 	list_lru_destroy(&binder_freelist);
1275 }
1276 
1277 /**
1278  * check_buffer() - verify that buffer/offset is safe to access
1279  * @alloc: binder_alloc for this proc
1280  * @buffer: binder buffer to be accessed
1281  * @offset: offset into @buffer data
1282  * @bytes: bytes to access from offset
1283  *
1284  * Check that the @offset/@bytes are within the size of the given
1285  * @buffer and that the buffer is currently active and not freeable.
1286  * Offsets must also be multiples of sizeof(u32). The kernel is
1287  * allowed to touch the buffer in two cases:
1288  *
1289  * 1) when the buffer is being created:
1290  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1291  * 2) when the buffer is being torn down:
1292  *     (buffer->free == 0 && buffer->transaction == NULL).
1293  *
1294  * Return: true if the buffer is safe to access
1295  */
1296 static inline bool check_buffer(struct binder_alloc *alloc,
1297 				struct binder_buffer *buffer,
1298 				binder_size_t offset, size_t bytes)
1299 {
1300 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1301 
1302 	return buffer_size >= bytes &&
1303 		offset <= buffer_size - bytes &&
1304 		IS_ALIGNED(offset, sizeof(u32)) &&
1305 		!buffer->free &&
1306 		(!buffer->allow_user_free || !buffer->transaction);
1307 }
1308 
1309 /**
1310  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1311  * @alloc: binder_alloc for this proc
1312  * @buffer: binder buffer to be accessed
1313  * @buffer_offset: offset into @buffer data
1314  * @from: userspace pointer to source buffer
1315  * @bytes: bytes to copy
1316  *
1317  * Copy bytes from source userspace to target buffer.
1318  *
1319  * Return: bytes remaining to be copied
1320  */
1321 unsigned long
1322 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1323 				 struct binder_buffer *buffer,
1324 				 binder_size_t buffer_offset,
1325 				 const void __user *from,
1326 				 size_t bytes)
1327 {
1328 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1329 		return bytes;
1330 
1331 	while (bytes) {
1332 		unsigned long size;
1333 		unsigned long ret;
1334 		struct page *page;
1335 		pgoff_t pgoff;
1336 		void *kptr;
1337 
1338 		page = binder_alloc_get_page(alloc, buffer,
1339 					     buffer_offset, &pgoff);
1340 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1341 		kptr = kmap_local_page(page) + pgoff;
1342 		ret = copy_from_user(kptr, from, size);
1343 		kunmap_local(kptr);
1344 		if (ret)
1345 			return bytes - size + ret;
1346 		bytes -= size;
1347 		from += size;
1348 		buffer_offset += size;
1349 	}
1350 	return 0;
1351 }
1352 
1353 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1354 				       bool to_buffer,
1355 				       struct binder_buffer *buffer,
1356 				       binder_size_t buffer_offset,
1357 				       void *ptr,
1358 				       size_t bytes)
1359 {
1360 	/* All copies must be 32-bit aligned and 32-bit size */
1361 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1362 		return -EINVAL;
1363 
1364 	while (bytes) {
1365 		unsigned long size;
1366 		struct page *page;
1367 		pgoff_t pgoff;
1368 
1369 		page = binder_alloc_get_page(alloc, buffer,
1370 					     buffer_offset, &pgoff);
1371 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1372 		if (to_buffer)
1373 			memcpy_to_page(page, pgoff, ptr, size);
1374 		else
1375 			memcpy_from_page(ptr, page, pgoff, size);
1376 		bytes -= size;
1377 		pgoff = 0;
1378 		ptr = ptr + size;
1379 		buffer_offset += size;
1380 	}
1381 	return 0;
1382 }
1383 
1384 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1385 				struct binder_buffer *buffer,
1386 				binder_size_t buffer_offset,
1387 				void *src,
1388 				size_t bytes)
1389 {
1390 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1391 					   src, bytes);
1392 }
1393 
1394 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1395 				  void *dest,
1396 				  struct binder_buffer *buffer,
1397 				  binder_size_t buffer_offset,
1398 				  size_t bytes)
1399 {
1400 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1401 					   dest, bytes);
1402 }
1403