xref: /linux/drivers/android/binder.c (revision ea518afc992032f7570c0a89ac9240b387dc0faf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <uapi/linux/android/binder.h>
72 
73 #include <linux/cacheflush.h>
74 
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77 
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80 
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84 
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91 
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94 
95 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
96 
97 enum {
98 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
99 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
100 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
101 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
102 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
103 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
104 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
105 	BINDER_DEBUG_USER_REFS              = 1U << 7,
106 	BINDER_DEBUG_THREADS                = 1U << 8,
107 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
108 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
109 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
110 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
111 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
112 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
113 };
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
120 
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123 
124 static int binder_set_stop_on_user_error(const char *val,
125 					 const struct kernel_param *kp)
126 {
127 	int ret;
128 
129 	ret = param_set_int(val, kp);
130 	if (binder_stop_on_user_error < 2)
131 		wake_up(&binder_user_error_wait);
132 	return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 	param_get_int, &binder_stop_on_user_error, 0644);
136 
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 {
139 	struct va_format vaf;
140 	va_list args;
141 
142 	if (binder_debug_mask & mask) {
143 		va_start(args, format);
144 		vaf.va = &args;
145 		vaf.fmt = format;
146 		pr_info_ratelimited("%pV", &vaf);
147 		va_end(args);
148 	}
149 }
150 
151 #define binder_txn_error(x...) \
152 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153 
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 {
156 	struct va_format vaf;
157 	va_list args;
158 
159 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 		va_start(args, format);
161 		vaf.va = &args;
162 		vaf.fmt = format;
163 		pr_info_ratelimited("%pV", &vaf);
164 		va_end(args);
165 	}
166 
167 	if (binder_stop_on_user_error)
168 		binder_stop_on_user_error = 2;
169 }
170 
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172 	do { \
173 		(ee)->id = _id; \
174 		(ee)->command = _command; \
175 		(ee)->param = _param; \
176 	} while (0)
177 
178 #define to_flat_binder_object(hdr) \
179 	container_of(hdr, struct flat_binder_object, hdr)
180 
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182 
183 #define to_binder_buffer_object(hdr) \
184 	container_of(hdr, struct binder_buffer_object, hdr)
185 
186 #define to_binder_fd_array_object(hdr) \
187 	container_of(hdr, struct binder_fd_array_object, hdr)
188 
189 static struct binder_stats binder_stats;
190 
191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193 	atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195 
196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_created[type]);
199 }
200 
201 struct binder_transaction_log_entry {
202 	int debug_id;
203 	int debug_id_done;
204 	int call_type;
205 	int from_proc;
206 	int from_thread;
207 	int target_handle;
208 	int to_proc;
209 	int to_thread;
210 	int to_node;
211 	int data_size;
212 	int offsets_size;
213 	int return_error_line;
214 	uint32_t return_error;
215 	uint32_t return_error_param;
216 	char context_name[BINDERFS_MAX_NAME + 1];
217 };
218 
219 struct binder_transaction_log {
220 	atomic_t cur;
221 	bool full;
222 	struct binder_transaction_log_entry entry[32];
223 };
224 
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
227 
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 	struct binder_transaction_log *log)
230 {
231 	struct binder_transaction_log_entry *e;
232 	unsigned int cur = atomic_inc_return(&log->cur);
233 
234 	if (cur >= ARRAY_SIZE(log->entry))
235 		log->full = true;
236 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 	WRITE_ONCE(e->debug_id_done, 0);
238 	/*
239 	 * write-barrier to synchronize access to e->debug_id_done.
240 	 * We make sure the initialized 0 value is seen before
241 	 * memset() other fields are zeroed by memset.
242 	 */
243 	smp_wmb();
244 	memset(e, 0, sizeof(*e));
245 	return e;
246 }
247 
248 enum binder_deferred_state {
249 	BINDER_DEFERRED_FLUSH        = 0x01,
250 	BINDER_DEFERRED_RELEASE      = 0x02,
251 };
252 
253 enum {
254 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
255 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
256 	BINDER_LOOPER_STATE_EXITED      = 0x04,
257 	BINDER_LOOPER_STATE_INVALID     = 0x08,
258 	BINDER_LOOPER_STATE_WAITING     = 0x10,
259 	BINDER_LOOPER_STATE_POLL        = 0x20,
260 };
261 
262 /**
263  * binder_proc_lock() - Acquire outer lock for given binder_proc
264  * @proc:         struct binder_proc to acquire
265  *
266  * Acquires proc->outer_lock. Used to protect binder_ref
267  * structures associated with the given proc.
268  */
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
271 _binder_proc_lock(struct binder_proc *proc, int line)
272 	__acquires(&proc->outer_lock)
273 {
274 	binder_debug(BINDER_DEBUG_SPINLOCKS,
275 		     "%s: line=%d\n", __func__, line);
276 	spin_lock(&proc->outer_lock);
277 }
278 
279 /**
280  * binder_proc_unlock() - Release spinlock for given binder_proc
281  * @proc:                struct binder_proc to acquire
282  *
283  * Release lock acquired via binder_proc_lock()
284  */
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 	__releases(&proc->outer_lock)
289 {
290 	binder_debug(BINDER_DEBUG_SPINLOCKS,
291 		     "%s: line=%d\n", __func__, line);
292 	spin_unlock(&proc->outer_lock);
293 }
294 
295 /**
296  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297  * @proc:         struct binder_proc to acquire
298  *
299  * Acquires proc->inner_lock. Used to protect todo lists
300  */
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 	__acquires(&proc->inner_lock)
305 {
306 	binder_debug(BINDER_DEBUG_SPINLOCKS,
307 		     "%s: line=%d\n", __func__, line);
308 	spin_lock(&proc->inner_lock);
309 }
310 
311 /**
312  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313  * @proc:         struct binder_proc to acquire
314  *
315  * Release lock acquired via binder_inner_proc_lock()
316  */
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 	__releases(&proc->inner_lock)
321 {
322 	binder_debug(BINDER_DEBUG_SPINLOCKS,
323 		     "%s: line=%d\n", __func__, line);
324 	spin_unlock(&proc->inner_lock);
325 }
326 
327 /**
328  * binder_node_lock() - Acquire spinlock for given binder_node
329  * @node:         struct binder_node to acquire
330  *
331  * Acquires node->lock. Used to protect binder_node fields
332  */
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
335 _binder_node_lock(struct binder_node *node, int line)
336 	__acquires(&node->lock)
337 {
338 	binder_debug(BINDER_DEBUG_SPINLOCKS,
339 		     "%s: line=%d\n", __func__, line);
340 	spin_lock(&node->lock);
341 }
342 
343 /**
344  * binder_node_unlock() - Release spinlock for given binder_proc
345  * @node:         struct binder_node to acquire
346  *
347  * Release lock acquired via binder_node_lock()
348  */
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
351 _binder_node_unlock(struct binder_node *node, int line)
352 	__releases(&node->lock)
353 {
354 	binder_debug(BINDER_DEBUG_SPINLOCKS,
355 		     "%s: line=%d\n", __func__, line);
356 	spin_unlock(&node->lock);
357 }
358 
359 /**
360  * binder_node_inner_lock() - Acquire node and inner locks
361  * @node:         struct binder_node to acquire
362  *
363  * Acquires node->lock. If node->proc also acquires
364  * proc->inner_lock. Used to protect binder_node fields
365  */
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
368 _binder_node_inner_lock(struct binder_node *node, int line)
369 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 {
371 	binder_debug(BINDER_DEBUG_SPINLOCKS,
372 		     "%s: line=%d\n", __func__, line);
373 	spin_lock(&node->lock);
374 	if (node->proc)
375 		binder_inner_proc_lock(node->proc);
376 	else
377 		/* annotation for sparse */
378 		__acquire(&node->proc->inner_lock);
379 }
380 
381 /**
382  * binder_node_inner_unlock() - Release node and inner locks
383  * @node:         struct binder_node to acquire
384  *
385  * Release lock acquired via binder_node_lock()
386  */
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 	__releases(&node->lock) __releases(&node->proc->inner_lock)
391 {
392 	struct binder_proc *proc = node->proc;
393 
394 	binder_debug(BINDER_DEBUG_SPINLOCKS,
395 		     "%s: line=%d\n", __func__, line);
396 	if (proc)
397 		binder_inner_proc_unlock(proc);
398 	else
399 		/* annotation for sparse */
400 		__release(&node->proc->inner_lock);
401 	spin_unlock(&node->lock);
402 }
403 
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 {
406 	return list_empty(list);
407 }
408 
409 /**
410  * binder_worklist_empty() - Check if no items on the work list
411  * @proc:       binder_proc associated with list
412  * @list:	list to check
413  *
414  * Return: true if there are no items on list, else false
415  */
416 static bool binder_worklist_empty(struct binder_proc *proc,
417 				  struct list_head *list)
418 {
419 	bool ret;
420 
421 	binder_inner_proc_lock(proc);
422 	ret = binder_worklist_empty_ilocked(list);
423 	binder_inner_proc_unlock(proc);
424 	return ret;
425 }
426 
427 /**
428  * binder_enqueue_work_ilocked() - Add an item to the work list
429  * @work:         struct binder_work to add to list
430  * @target_list:  list to add work to
431  *
432  * Adds the work to the specified list. Asserts that work
433  * is not already on a list.
434  *
435  * Requires the proc->inner_lock to be held.
436  */
437 static void
438 binder_enqueue_work_ilocked(struct binder_work *work,
439 			   struct list_head *target_list)
440 {
441 	BUG_ON(target_list == NULL);
442 	BUG_ON(work->entry.next && !list_empty(&work->entry));
443 	list_add_tail(&work->entry, target_list);
444 }
445 
446 /**
447  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448  * @thread:       thread to queue work to
449  * @work:         struct binder_work to add to list
450  *
451  * Adds the work to the todo list of the thread. Doesn't set the process_todo
452  * flag, which means that (if it wasn't already set) the thread will go to
453  * sleep without handling this work when it calls read.
454  *
455  * Requires the proc->inner_lock to be held.
456  */
457 static void
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 					    struct binder_work *work)
460 {
461 	WARN_ON(!list_empty(&thread->waiting_thread_node));
462 	binder_enqueue_work_ilocked(work, &thread->todo);
463 }
464 
465 /**
466  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467  * @thread:       thread to queue work to
468  * @work:         struct binder_work to add to list
469  *
470  * Adds the work to the todo list of the thread, and enables processing
471  * of the todo queue.
472  *
473  * Requires the proc->inner_lock to be held.
474  */
475 static void
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 				   struct binder_work *work)
478 {
479 	WARN_ON(!list_empty(&thread->waiting_thread_node));
480 	binder_enqueue_work_ilocked(work, &thread->todo);
481 	thread->process_todo = true;
482 }
483 
484 /**
485  * binder_enqueue_thread_work() - Add an item to the thread work list
486  * @thread:       thread to queue work to
487  * @work:         struct binder_work to add to list
488  *
489  * Adds the work to the todo list of the thread, and enables processing
490  * of the todo queue.
491  */
492 static void
493 binder_enqueue_thread_work(struct binder_thread *thread,
494 			   struct binder_work *work)
495 {
496 	binder_inner_proc_lock(thread->proc);
497 	binder_enqueue_thread_work_ilocked(thread, work);
498 	binder_inner_proc_unlock(thread->proc);
499 }
500 
501 static void
502 binder_dequeue_work_ilocked(struct binder_work *work)
503 {
504 	list_del_init(&work->entry);
505 }
506 
507 /**
508  * binder_dequeue_work() - Removes an item from the work list
509  * @proc:         binder_proc associated with list
510  * @work:         struct binder_work to remove from list
511  *
512  * Removes the specified work item from whatever list it is on.
513  * Can safely be called if work is not on any list.
514  */
515 static void
516 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
517 {
518 	binder_inner_proc_lock(proc);
519 	binder_dequeue_work_ilocked(work);
520 	binder_inner_proc_unlock(proc);
521 }
522 
523 static struct binder_work *binder_dequeue_work_head_ilocked(
524 					struct list_head *list)
525 {
526 	struct binder_work *w;
527 
528 	w = list_first_entry_or_null(list, struct binder_work, entry);
529 	if (w)
530 		list_del_init(&w->entry);
531 	return w;
532 }
533 
534 static void
535 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
536 static void binder_free_thread(struct binder_thread *thread);
537 static void binder_free_proc(struct binder_proc *proc);
538 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
539 
540 static bool binder_has_work_ilocked(struct binder_thread *thread,
541 				    bool do_proc_work)
542 {
543 	return thread->process_todo ||
544 		thread->looper_need_return ||
545 		(do_proc_work &&
546 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
547 }
548 
549 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
550 {
551 	bool has_work;
552 
553 	binder_inner_proc_lock(thread->proc);
554 	has_work = binder_has_work_ilocked(thread, do_proc_work);
555 	binder_inner_proc_unlock(thread->proc);
556 
557 	return has_work;
558 }
559 
560 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
561 {
562 	return !thread->transaction_stack &&
563 		binder_worklist_empty_ilocked(&thread->todo) &&
564 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
565 				   BINDER_LOOPER_STATE_REGISTERED));
566 }
567 
568 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
569 					       bool sync)
570 {
571 	struct rb_node *n;
572 	struct binder_thread *thread;
573 
574 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
575 		thread = rb_entry(n, struct binder_thread, rb_node);
576 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
577 		    binder_available_for_proc_work_ilocked(thread)) {
578 			if (sync)
579 				wake_up_interruptible_sync(&thread->wait);
580 			else
581 				wake_up_interruptible(&thread->wait);
582 		}
583 	}
584 }
585 
586 /**
587  * binder_select_thread_ilocked() - selects a thread for doing proc work.
588  * @proc:	process to select a thread from
589  *
590  * Note that calling this function moves the thread off the waiting_threads
591  * list, so it can only be woken up by the caller of this function, or a
592  * signal. Therefore, callers *should* always wake up the thread this function
593  * returns.
594  *
595  * Return:	If there's a thread currently waiting for process work,
596  *		returns that thread. Otherwise returns NULL.
597  */
598 static struct binder_thread *
599 binder_select_thread_ilocked(struct binder_proc *proc)
600 {
601 	struct binder_thread *thread;
602 
603 	assert_spin_locked(&proc->inner_lock);
604 	thread = list_first_entry_or_null(&proc->waiting_threads,
605 					  struct binder_thread,
606 					  waiting_thread_node);
607 
608 	if (thread)
609 		list_del_init(&thread->waiting_thread_node);
610 
611 	return thread;
612 }
613 
614 /**
615  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
616  * @proc:	process to wake up a thread in
617  * @thread:	specific thread to wake-up (may be NULL)
618  * @sync:	whether to do a synchronous wake-up
619  *
620  * This function wakes up a thread in the @proc process.
621  * The caller may provide a specific thread to wake-up in
622  * the @thread parameter. If @thread is NULL, this function
623  * will wake up threads that have called poll().
624  *
625  * Note that for this function to work as expected, callers
626  * should first call binder_select_thread() to find a thread
627  * to handle the work (if they don't have a thread already),
628  * and pass the result into the @thread parameter.
629  */
630 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
631 					 struct binder_thread *thread,
632 					 bool sync)
633 {
634 	assert_spin_locked(&proc->inner_lock);
635 
636 	if (thread) {
637 		if (sync)
638 			wake_up_interruptible_sync(&thread->wait);
639 		else
640 			wake_up_interruptible(&thread->wait);
641 		return;
642 	}
643 
644 	/* Didn't find a thread waiting for proc work; this can happen
645 	 * in two scenarios:
646 	 * 1. All threads are busy handling transactions
647 	 *    In that case, one of those threads should call back into
648 	 *    the kernel driver soon and pick up this work.
649 	 * 2. Threads are using the (e)poll interface, in which case
650 	 *    they may be blocked on the waitqueue without having been
651 	 *    added to waiting_threads. For this case, we just iterate
652 	 *    over all threads not handling transaction work, and
653 	 *    wake them all up. We wake all because we don't know whether
654 	 *    a thread that called into (e)poll is handling non-binder
655 	 *    work currently.
656 	 */
657 	binder_wakeup_poll_threads_ilocked(proc, sync);
658 }
659 
660 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
661 {
662 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
663 
664 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
665 }
666 
667 static void binder_set_nice(long nice)
668 {
669 	long min_nice;
670 
671 	if (can_nice(current, nice)) {
672 		set_user_nice(current, nice);
673 		return;
674 	}
675 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
676 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
677 		     "%d: nice value %ld not allowed use %ld instead\n",
678 		      current->pid, nice, min_nice);
679 	set_user_nice(current, min_nice);
680 	if (min_nice <= MAX_NICE)
681 		return;
682 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
683 }
684 
685 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
686 						   binder_uintptr_t ptr)
687 {
688 	struct rb_node *n = proc->nodes.rb_node;
689 	struct binder_node *node;
690 
691 	assert_spin_locked(&proc->inner_lock);
692 
693 	while (n) {
694 		node = rb_entry(n, struct binder_node, rb_node);
695 
696 		if (ptr < node->ptr)
697 			n = n->rb_left;
698 		else if (ptr > node->ptr)
699 			n = n->rb_right;
700 		else {
701 			/*
702 			 * take an implicit weak reference
703 			 * to ensure node stays alive until
704 			 * call to binder_put_node()
705 			 */
706 			binder_inc_node_tmpref_ilocked(node);
707 			return node;
708 		}
709 	}
710 	return NULL;
711 }
712 
713 static struct binder_node *binder_get_node(struct binder_proc *proc,
714 					   binder_uintptr_t ptr)
715 {
716 	struct binder_node *node;
717 
718 	binder_inner_proc_lock(proc);
719 	node = binder_get_node_ilocked(proc, ptr);
720 	binder_inner_proc_unlock(proc);
721 	return node;
722 }
723 
724 static struct binder_node *binder_init_node_ilocked(
725 						struct binder_proc *proc,
726 						struct binder_node *new_node,
727 						struct flat_binder_object *fp)
728 {
729 	struct rb_node **p = &proc->nodes.rb_node;
730 	struct rb_node *parent = NULL;
731 	struct binder_node *node;
732 	binder_uintptr_t ptr = fp ? fp->binder : 0;
733 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
734 	__u32 flags = fp ? fp->flags : 0;
735 
736 	assert_spin_locked(&proc->inner_lock);
737 
738 	while (*p) {
739 
740 		parent = *p;
741 		node = rb_entry(parent, struct binder_node, rb_node);
742 
743 		if (ptr < node->ptr)
744 			p = &(*p)->rb_left;
745 		else if (ptr > node->ptr)
746 			p = &(*p)->rb_right;
747 		else {
748 			/*
749 			 * A matching node is already in
750 			 * the rb tree. Abandon the init
751 			 * and return it.
752 			 */
753 			binder_inc_node_tmpref_ilocked(node);
754 			return node;
755 		}
756 	}
757 	node = new_node;
758 	binder_stats_created(BINDER_STAT_NODE);
759 	node->tmp_refs++;
760 	rb_link_node(&node->rb_node, parent, p);
761 	rb_insert_color(&node->rb_node, &proc->nodes);
762 	node->debug_id = atomic_inc_return(&binder_last_id);
763 	node->proc = proc;
764 	node->ptr = ptr;
765 	node->cookie = cookie;
766 	node->work.type = BINDER_WORK_NODE;
767 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
768 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
769 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
770 	spin_lock_init(&node->lock);
771 	INIT_LIST_HEAD(&node->work.entry);
772 	INIT_LIST_HEAD(&node->async_todo);
773 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
774 		     "%d:%d node %d u%016llx c%016llx created\n",
775 		     proc->pid, current->pid, node->debug_id,
776 		     (u64)node->ptr, (u64)node->cookie);
777 
778 	return node;
779 }
780 
781 static struct binder_node *binder_new_node(struct binder_proc *proc,
782 					   struct flat_binder_object *fp)
783 {
784 	struct binder_node *node;
785 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
786 
787 	if (!new_node)
788 		return NULL;
789 	binder_inner_proc_lock(proc);
790 	node = binder_init_node_ilocked(proc, new_node, fp);
791 	binder_inner_proc_unlock(proc);
792 	if (node != new_node)
793 		/*
794 		 * The node was already added by another thread
795 		 */
796 		kfree(new_node);
797 
798 	return node;
799 }
800 
801 static void binder_free_node(struct binder_node *node)
802 {
803 	kfree(node);
804 	binder_stats_deleted(BINDER_STAT_NODE);
805 }
806 
807 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
808 				    int internal,
809 				    struct list_head *target_list)
810 {
811 	struct binder_proc *proc = node->proc;
812 
813 	assert_spin_locked(&node->lock);
814 	if (proc)
815 		assert_spin_locked(&proc->inner_lock);
816 	if (strong) {
817 		if (internal) {
818 			if (target_list == NULL &&
819 			    node->internal_strong_refs == 0 &&
820 			    !(node->proc &&
821 			      node == node->proc->context->binder_context_mgr_node &&
822 			      node->has_strong_ref)) {
823 				pr_err("invalid inc strong node for %d\n",
824 					node->debug_id);
825 				return -EINVAL;
826 			}
827 			node->internal_strong_refs++;
828 		} else
829 			node->local_strong_refs++;
830 		if (!node->has_strong_ref && target_list) {
831 			struct binder_thread *thread = container_of(target_list,
832 						    struct binder_thread, todo);
833 			binder_dequeue_work_ilocked(&node->work);
834 			BUG_ON(&thread->todo != target_list);
835 			binder_enqueue_deferred_thread_work_ilocked(thread,
836 								   &node->work);
837 		}
838 	} else {
839 		if (!internal)
840 			node->local_weak_refs++;
841 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
842 			if (target_list == NULL) {
843 				pr_err("invalid inc weak node for %d\n",
844 					node->debug_id);
845 				return -EINVAL;
846 			}
847 			/*
848 			 * See comment above
849 			 */
850 			binder_enqueue_work_ilocked(&node->work, target_list);
851 		}
852 	}
853 	return 0;
854 }
855 
856 static int binder_inc_node(struct binder_node *node, int strong, int internal,
857 			   struct list_head *target_list)
858 {
859 	int ret;
860 
861 	binder_node_inner_lock(node);
862 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
863 	binder_node_inner_unlock(node);
864 
865 	return ret;
866 }
867 
868 static bool binder_dec_node_nilocked(struct binder_node *node,
869 				     int strong, int internal)
870 {
871 	struct binder_proc *proc = node->proc;
872 
873 	assert_spin_locked(&node->lock);
874 	if (proc)
875 		assert_spin_locked(&proc->inner_lock);
876 	if (strong) {
877 		if (internal)
878 			node->internal_strong_refs--;
879 		else
880 			node->local_strong_refs--;
881 		if (node->local_strong_refs || node->internal_strong_refs)
882 			return false;
883 	} else {
884 		if (!internal)
885 			node->local_weak_refs--;
886 		if (node->local_weak_refs || node->tmp_refs ||
887 				!hlist_empty(&node->refs))
888 			return false;
889 	}
890 
891 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
892 		if (list_empty(&node->work.entry)) {
893 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
894 			binder_wakeup_proc_ilocked(proc);
895 		}
896 	} else {
897 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
898 		    !node->local_weak_refs && !node->tmp_refs) {
899 			if (proc) {
900 				binder_dequeue_work_ilocked(&node->work);
901 				rb_erase(&node->rb_node, &proc->nodes);
902 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
903 					     "refless node %d deleted\n",
904 					     node->debug_id);
905 			} else {
906 				BUG_ON(!list_empty(&node->work.entry));
907 				spin_lock(&binder_dead_nodes_lock);
908 				/*
909 				 * tmp_refs could have changed so
910 				 * check it again
911 				 */
912 				if (node->tmp_refs) {
913 					spin_unlock(&binder_dead_nodes_lock);
914 					return false;
915 				}
916 				hlist_del(&node->dead_node);
917 				spin_unlock(&binder_dead_nodes_lock);
918 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
919 					     "dead node %d deleted\n",
920 					     node->debug_id);
921 			}
922 			return true;
923 		}
924 	}
925 	return false;
926 }
927 
928 static void binder_dec_node(struct binder_node *node, int strong, int internal)
929 {
930 	bool free_node;
931 
932 	binder_node_inner_lock(node);
933 	free_node = binder_dec_node_nilocked(node, strong, internal);
934 	binder_node_inner_unlock(node);
935 	if (free_node)
936 		binder_free_node(node);
937 }
938 
939 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
940 {
941 	/*
942 	 * No call to binder_inc_node() is needed since we
943 	 * don't need to inform userspace of any changes to
944 	 * tmp_refs
945 	 */
946 	node->tmp_refs++;
947 }
948 
949 /**
950  * binder_inc_node_tmpref() - take a temporary reference on node
951  * @node:	node to reference
952  *
953  * Take reference on node to prevent the node from being freed
954  * while referenced only by a local variable. The inner lock is
955  * needed to serialize with the node work on the queue (which
956  * isn't needed after the node is dead). If the node is dead
957  * (node->proc is NULL), use binder_dead_nodes_lock to protect
958  * node->tmp_refs against dead-node-only cases where the node
959  * lock cannot be acquired (eg traversing the dead node list to
960  * print nodes)
961  */
962 static void binder_inc_node_tmpref(struct binder_node *node)
963 {
964 	binder_node_lock(node);
965 	if (node->proc)
966 		binder_inner_proc_lock(node->proc);
967 	else
968 		spin_lock(&binder_dead_nodes_lock);
969 	binder_inc_node_tmpref_ilocked(node);
970 	if (node->proc)
971 		binder_inner_proc_unlock(node->proc);
972 	else
973 		spin_unlock(&binder_dead_nodes_lock);
974 	binder_node_unlock(node);
975 }
976 
977 /**
978  * binder_dec_node_tmpref() - remove a temporary reference on node
979  * @node:	node to reference
980  *
981  * Release temporary reference on node taken via binder_inc_node_tmpref()
982  */
983 static void binder_dec_node_tmpref(struct binder_node *node)
984 {
985 	bool free_node;
986 
987 	binder_node_inner_lock(node);
988 	if (!node->proc)
989 		spin_lock(&binder_dead_nodes_lock);
990 	else
991 		__acquire(&binder_dead_nodes_lock);
992 	node->tmp_refs--;
993 	BUG_ON(node->tmp_refs < 0);
994 	if (!node->proc)
995 		spin_unlock(&binder_dead_nodes_lock);
996 	else
997 		__release(&binder_dead_nodes_lock);
998 	/*
999 	 * Call binder_dec_node() to check if all refcounts are 0
1000 	 * and cleanup is needed. Calling with strong=0 and internal=1
1001 	 * causes no actual reference to be released in binder_dec_node().
1002 	 * If that changes, a change is needed here too.
1003 	 */
1004 	free_node = binder_dec_node_nilocked(node, 0, 1);
1005 	binder_node_inner_unlock(node);
1006 	if (free_node)
1007 		binder_free_node(node);
1008 }
1009 
1010 static void binder_put_node(struct binder_node *node)
1011 {
1012 	binder_dec_node_tmpref(node);
1013 }
1014 
1015 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1016 						 u32 desc, bool need_strong_ref)
1017 {
1018 	struct rb_node *n = proc->refs_by_desc.rb_node;
1019 	struct binder_ref *ref;
1020 
1021 	while (n) {
1022 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1023 
1024 		if (desc < ref->data.desc) {
1025 			n = n->rb_left;
1026 		} else if (desc > ref->data.desc) {
1027 			n = n->rb_right;
1028 		} else if (need_strong_ref && !ref->data.strong) {
1029 			binder_user_error("tried to use weak ref as strong ref\n");
1030 			return NULL;
1031 		} else {
1032 			return ref;
1033 		}
1034 	}
1035 	return NULL;
1036 }
1037 
1038 /**
1039  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1040  * @proc:	binder_proc that owns the ref
1041  * @node:	binder_node of target
1042  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1043  *
1044  * Look up the ref for the given node and return it if it exists
1045  *
1046  * If it doesn't exist and the caller provides a newly allocated
1047  * ref, initialize the fields of the newly allocated ref and insert
1048  * into the given proc rb_trees and node refs list.
1049  *
1050  * Return:	the ref for node. It is possible that another thread
1051  *		allocated/initialized the ref first in which case the
1052  *		returned ref would be different than the passed-in
1053  *		new_ref. new_ref must be kfree'd by the caller in
1054  *		this case.
1055  */
1056 static struct binder_ref *binder_get_ref_for_node_olocked(
1057 					struct binder_proc *proc,
1058 					struct binder_node *node,
1059 					struct binder_ref *new_ref)
1060 {
1061 	struct binder_context *context = proc->context;
1062 	struct rb_node **p = &proc->refs_by_node.rb_node;
1063 	struct rb_node *parent = NULL;
1064 	struct binder_ref *ref;
1065 	struct rb_node *n;
1066 
1067 	while (*p) {
1068 		parent = *p;
1069 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1070 
1071 		if (node < ref->node)
1072 			p = &(*p)->rb_left;
1073 		else if (node > ref->node)
1074 			p = &(*p)->rb_right;
1075 		else
1076 			return ref;
1077 	}
1078 	if (!new_ref)
1079 		return NULL;
1080 
1081 	binder_stats_created(BINDER_STAT_REF);
1082 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1083 	new_ref->proc = proc;
1084 	new_ref->node = node;
1085 	rb_link_node(&new_ref->rb_node_node, parent, p);
1086 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1087 
1088 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1089 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1090 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1091 		if (ref->data.desc > new_ref->data.desc)
1092 			break;
1093 		new_ref->data.desc = ref->data.desc + 1;
1094 	}
1095 
1096 	p = &proc->refs_by_desc.rb_node;
1097 	while (*p) {
1098 		parent = *p;
1099 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1100 
1101 		if (new_ref->data.desc < ref->data.desc)
1102 			p = &(*p)->rb_left;
1103 		else if (new_ref->data.desc > ref->data.desc)
1104 			p = &(*p)->rb_right;
1105 		else
1106 			BUG();
1107 	}
1108 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1109 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1110 
1111 	binder_node_lock(node);
1112 	hlist_add_head(&new_ref->node_entry, &node->refs);
1113 
1114 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1115 		     "%d new ref %d desc %d for node %d\n",
1116 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1117 		      node->debug_id);
1118 	binder_node_unlock(node);
1119 	return new_ref;
1120 }
1121 
1122 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1123 {
1124 	bool delete_node = false;
1125 
1126 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1127 		     "%d delete ref %d desc %d for node %d\n",
1128 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1129 		      ref->node->debug_id);
1130 
1131 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1132 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1133 
1134 	binder_node_inner_lock(ref->node);
1135 	if (ref->data.strong)
1136 		binder_dec_node_nilocked(ref->node, 1, 1);
1137 
1138 	hlist_del(&ref->node_entry);
1139 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1140 	binder_node_inner_unlock(ref->node);
1141 	/*
1142 	 * Clear ref->node unless we want the caller to free the node
1143 	 */
1144 	if (!delete_node) {
1145 		/*
1146 		 * The caller uses ref->node to determine
1147 		 * whether the node needs to be freed. Clear
1148 		 * it since the node is still alive.
1149 		 */
1150 		ref->node = NULL;
1151 	}
1152 
1153 	if (ref->death) {
1154 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1155 			     "%d delete ref %d desc %d has death notification\n",
1156 			      ref->proc->pid, ref->data.debug_id,
1157 			      ref->data.desc);
1158 		binder_dequeue_work(ref->proc, &ref->death->work);
1159 		binder_stats_deleted(BINDER_STAT_DEATH);
1160 	}
1161 	binder_stats_deleted(BINDER_STAT_REF);
1162 }
1163 
1164 /**
1165  * binder_inc_ref_olocked() - increment the ref for given handle
1166  * @ref:         ref to be incremented
1167  * @strong:      if true, strong increment, else weak
1168  * @target_list: list to queue node work on
1169  *
1170  * Increment the ref. @ref->proc->outer_lock must be held on entry
1171  *
1172  * Return: 0, if successful, else errno
1173  */
1174 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1175 				  struct list_head *target_list)
1176 {
1177 	int ret;
1178 
1179 	if (strong) {
1180 		if (ref->data.strong == 0) {
1181 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1182 			if (ret)
1183 				return ret;
1184 		}
1185 		ref->data.strong++;
1186 	} else {
1187 		if (ref->data.weak == 0) {
1188 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1189 			if (ret)
1190 				return ret;
1191 		}
1192 		ref->data.weak++;
1193 	}
1194 	return 0;
1195 }
1196 
1197 /**
1198  * binder_dec_ref_olocked() - dec the ref for given handle
1199  * @ref:	ref to be decremented
1200  * @strong:	if true, strong decrement, else weak
1201  *
1202  * Decrement the ref.
1203  *
1204  * Return: %true if ref is cleaned up and ready to be freed.
1205  */
1206 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1207 {
1208 	if (strong) {
1209 		if (ref->data.strong == 0) {
1210 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1211 					  ref->proc->pid, ref->data.debug_id,
1212 					  ref->data.desc, ref->data.strong,
1213 					  ref->data.weak);
1214 			return false;
1215 		}
1216 		ref->data.strong--;
1217 		if (ref->data.strong == 0)
1218 			binder_dec_node(ref->node, strong, 1);
1219 	} else {
1220 		if (ref->data.weak == 0) {
1221 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1222 					  ref->proc->pid, ref->data.debug_id,
1223 					  ref->data.desc, ref->data.strong,
1224 					  ref->data.weak);
1225 			return false;
1226 		}
1227 		ref->data.weak--;
1228 	}
1229 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1230 		binder_cleanup_ref_olocked(ref);
1231 		return true;
1232 	}
1233 	return false;
1234 }
1235 
1236 /**
1237  * binder_get_node_from_ref() - get the node from the given proc/desc
1238  * @proc:	proc containing the ref
1239  * @desc:	the handle associated with the ref
1240  * @need_strong_ref: if true, only return node if ref is strong
1241  * @rdata:	the id/refcount data for the ref
1242  *
1243  * Given a proc and ref handle, return the associated binder_node
1244  *
1245  * Return: a binder_node or NULL if not found or not strong when strong required
1246  */
1247 static struct binder_node *binder_get_node_from_ref(
1248 		struct binder_proc *proc,
1249 		u32 desc, bool need_strong_ref,
1250 		struct binder_ref_data *rdata)
1251 {
1252 	struct binder_node *node;
1253 	struct binder_ref *ref;
1254 
1255 	binder_proc_lock(proc);
1256 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1257 	if (!ref)
1258 		goto err_no_ref;
1259 	node = ref->node;
1260 	/*
1261 	 * Take an implicit reference on the node to ensure
1262 	 * it stays alive until the call to binder_put_node()
1263 	 */
1264 	binder_inc_node_tmpref(node);
1265 	if (rdata)
1266 		*rdata = ref->data;
1267 	binder_proc_unlock(proc);
1268 
1269 	return node;
1270 
1271 err_no_ref:
1272 	binder_proc_unlock(proc);
1273 	return NULL;
1274 }
1275 
1276 /**
1277  * binder_free_ref() - free the binder_ref
1278  * @ref:	ref to free
1279  *
1280  * Free the binder_ref. Free the binder_node indicated by ref->node
1281  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1282  */
1283 static void binder_free_ref(struct binder_ref *ref)
1284 {
1285 	if (ref->node)
1286 		binder_free_node(ref->node);
1287 	kfree(ref->death);
1288 	kfree(ref);
1289 }
1290 
1291 /**
1292  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1293  * @proc:	proc containing the ref
1294  * @desc:	the handle associated with the ref
1295  * @increment:	true=inc reference, false=dec reference
1296  * @strong:	true=strong reference, false=weak reference
1297  * @rdata:	the id/refcount data for the ref
1298  *
1299  * Given a proc and ref handle, increment or decrement the ref
1300  * according to "increment" arg.
1301  *
1302  * Return: 0 if successful, else errno
1303  */
1304 static int binder_update_ref_for_handle(struct binder_proc *proc,
1305 		uint32_t desc, bool increment, bool strong,
1306 		struct binder_ref_data *rdata)
1307 {
1308 	int ret = 0;
1309 	struct binder_ref *ref;
1310 	bool delete_ref = false;
1311 
1312 	binder_proc_lock(proc);
1313 	ref = binder_get_ref_olocked(proc, desc, strong);
1314 	if (!ref) {
1315 		ret = -EINVAL;
1316 		goto err_no_ref;
1317 	}
1318 	if (increment)
1319 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1320 	else
1321 		delete_ref = binder_dec_ref_olocked(ref, strong);
1322 
1323 	if (rdata)
1324 		*rdata = ref->data;
1325 	binder_proc_unlock(proc);
1326 
1327 	if (delete_ref)
1328 		binder_free_ref(ref);
1329 	return ret;
1330 
1331 err_no_ref:
1332 	binder_proc_unlock(proc);
1333 	return ret;
1334 }
1335 
1336 /**
1337  * binder_dec_ref_for_handle() - dec the ref for given handle
1338  * @proc:	proc containing the ref
1339  * @desc:	the handle associated with the ref
1340  * @strong:	true=strong reference, false=weak reference
1341  * @rdata:	the id/refcount data for the ref
1342  *
1343  * Just calls binder_update_ref_for_handle() to decrement the ref.
1344  *
1345  * Return: 0 if successful, else errno
1346  */
1347 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1348 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1349 {
1350 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1351 }
1352 
1353 
1354 /**
1355  * binder_inc_ref_for_node() - increment the ref for given proc/node
1356  * @proc:	 proc containing the ref
1357  * @node:	 target node
1358  * @strong:	 true=strong reference, false=weak reference
1359  * @target_list: worklist to use if node is incremented
1360  * @rdata:	 the id/refcount data for the ref
1361  *
1362  * Given a proc and node, increment the ref. Create the ref if it
1363  * doesn't already exist
1364  *
1365  * Return: 0 if successful, else errno
1366  */
1367 static int binder_inc_ref_for_node(struct binder_proc *proc,
1368 			struct binder_node *node,
1369 			bool strong,
1370 			struct list_head *target_list,
1371 			struct binder_ref_data *rdata)
1372 {
1373 	struct binder_ref *ref;
1374 	struct binder_ref *new_ref = NULL;
1375 	int ret = 0;
1376 
1377 	binder_proc_lock(proc);
1378 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1379 	if (!ref) {
1380 		binder_proc_unlock(proc);
1381 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1382 		if (!new_ref)
1383 			return -ENOMEM;
1384 		binder_proc_lock(proc);
1385 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1386 	}
1387 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1388 	*rdata = ref->data;
1389 	if (ret && ref == new_ref) {
1390 		/*
1391 		 * Cleanup the failed reference here as the target
1392 		 * could now be dead and have already released its
1393 		 * references by now. Calling on the new reference
1394 		 * with strong=0 and a tmp_refs will not decrement
1395 		 * the node. The new_ref gets kfree'd below.
1396 		 */
1397 		binder_cleanup_ref_olocked(new_ref);
1398 		ref = NULL;
1399 	}
1400 
1401 	binder_proc_unlock(proc);
1402 	if (new_ref && ref != new_ref)
1403 		/*
1404 		 * Another thread created the ref first so
1405 		 * free the one we allocated
1406 		 */
1407 		kfree(new_ref);
1408 	return ret;
1409 }
1410 
1411 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1412 					   struct binder_transaction *t)
1413 {
1414 	BUG_ON(!target_thread);
1415 	assert_spin_locked(&target_thread->proc->inner_lock);
1416 	BUG_ON(target_thread->transaction_stack != t);
1417 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1418 	target_thread->transaction_stack =
1419 		target_thread->transaction_stack->from_parent;
1420 	t->from = NULL;
1421 }
1422 
1423 /**
1424  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1425  * @thread:	thread to decrement
1426  *
1427  * A thread needs to be kept alive while being used to create or
1428  * handle a transaction. binder_get_txn_from() is used to safely
1429  * extract t->from from a binder_transaction and keep the thread
1430  * indicated by t->from from being freed. When done with that
1431  * binder_thread, this function is called to decrement the
1432  * tmp_ref and free if appropriate (thread has been released
1433  * and no transaction being processed by the driver)
1434  */
1435 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1436 {
1437 	/*
1438 	 * atomic is used to protect the counter value while
1439 	 * it cannot reach zero or thread->is_dead is false
1440 	 */
1441 	binder_inner_proc_lock(thread->proc);
1442 	atomic_dec(&thread->tmp_ref);
1443 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1444 		binder_inner_proc_unlock(thread->proc);
1445 		binder_free_thread(thread);
1446 		return;
1447 	}
1448 	binder_inner_proc_unlock(thread->proc);
1449 }
1450 
1451 /**
1452  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1453  * @proc:	proc to decrement
1454  *
1455  * A binder_proc needs to be kept alive while being used to create or
1456  * handle a transaction. proc->tmp_ref is incremented when
1457  * creating a new transaction or the binder_proc is currently in-use
1458  * by threads that are being released. When done with the binder_proc,
1459  * this function is called to decrement the counter and free the
1460  * proc if appropriate (proc has been released, all threads have
1461  * been released and not currenly in-use to process a transaction).
1462  */
1463 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1464 {
1465 	binder_inner_proc_lock(proc);
1466 	proc->tmp_ref--;
1467 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1468 			!proc->tmp_ref) {
1469 		binder_inner_proc_unlock(proc);
1470 		binder_free_proc(proc);
1471 		return;
1472 	}
1473 	binder_inner_proc_unlock(proc);
1474 }
1475 
1476 /**
1477  * binder_get_txn_from() - safely extract the "from" thread in transaction
1478  * @t:	binder transaction for t->from
1479  *
1480  * Atomically return the "from" thread and increment the tmp_ref
1481  * count for the thread to ensure it stays alive until
1482  * binder_thread_dec_tmpref() is called.
1483  *
1484  * Return: the value of t->from
1485  */
1486 static struct binder_thread *binder_get_txn_from(
1487 		struct binder_transaction *t)
1488 {
1489 	struct binder_thread *from;
1490 
1491 	spin_lock(&t->lock);
1492 	from = t->from;
1493 	if (from)
1494 		atomic_inc(&from->tmp_ref);
1495 	spin_unlock(&t->lock);
1496 	return from;
1497 }
1498 
1499 /**
1500  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1501  * @t:	binder transaction for t->from
1502  *
1503  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1504  * to guarantee that the thread cannot be released while operating on it.
1505  * The caller must call binder_inner_proc_unlock() to release the inner lock
1506  * as well as call binder_dec_thread_txn() to release the reference.
1507  *
1508  * Return: the value of t->from
1509  */
1510 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1511 		struct binder_transaction *t)
1512 	__acquires(&t->from->proc->inner_lock)
1513 {
1514 	struct binder_thread *from;
1515 
1516 	from = binder_get_txn_from(t);
1517 	if (!from) {
1518 		__acquire(&from->proc->inner_lock);
1519 		return NULL;
1520 	}
1521 	binder_inner_proc_lock(from->proc);
1522 	if (t->from) {
1523 		BUG_ON(from != t->from);
1524 		return from;
1525 	}
1526 	binder_inner_proc_unlock(from->proc);
1527 	__acquire(&from->proc->inner_lock);
1528 	binder_thread_dec_tmpref(from);
1529 	return NULL;
1530 }
1531 
1532 /**
1533  * binder_free_txn_fixups() - free unprocessed fd fixups
1534  * @t:	binder transaction for t->from
1535  *
1536  * If the transaction is being torn down prior to being
1537  * processed by the target process, free all of the
1538  * fd fixups and fput the file structs. It is safe to
1539  * call this function after the fixups have been
1540  * processed -- in that case, the list will be empty.
1541  */
1542 static void binder_free_txn_fixups(struct binder_transaction *t)
1543 {
1544 	struct binder_txn_fd_fixup *fixup, *tmp;
1545 
1546 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1547 		fput(fixup->file);
1548 		if (fixup->target_fd >= 0)
1549 			put_unused_fd(fixup->target_fd);
1550 		list_del(&fixup->fixup_entry);
1551 		kfree(fixup);
1552 	}
1553 }
1554 
1555 static void binder_txn_latency_free(struct binder_transaction *t)
1556 {
1557 	int from_proc, from_thread, to_proc, to_thread;
1558 
1559 	spin_lock(&t->lock);
1560 	from_proc = t->from ? t->from->proc->pid : 0;
1561 	from_thread = t->from ? t->from->pid : 0;
1562 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1563 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1564 	spin_unlock(&t->lock);
1565 
1566 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1567 }
1568 
1569 static void binder_free_transaction(struct binder_transaction *t)
1570 {
1571 	struct binder_proc *target_proc = t->to_proc;
1572 
1573 	if (target_proc) {
1574 		binder_inner_proc_lock(target_proc);
1575 		target_proc->outstanding_txns--;
1576 		if (target_proc->outstanding_txns < 0)
1577 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1578 				__func__, target_proc->outstanding_txns);
1579 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1580 			wake_up_interruptible_all(&target_proc->freeze_wait);
1581 		if (t->buffer)
1582 			t->buffer->transaction = NULL;
1583 		binder_inner_proc_unlock(target_proc);
1584 	}
1585 	if (trace_binder_txn_latency_free_enabled())
1586 		binder_txn_latency_free(t);
1587 	/*
1588 	 * If the transaction has no target_proc, then
1589 	 * t->buffer->transaction has already been cleared.
1590 	 */
1591 	binder_free_txn_fixups(t);
1592 	kfree(t);
1593 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1594 }
1595 
1596 static void binder_send_failed_reply(struct binder_transaction *t,
1597 				     uint32_t error_code)
1598 {
1599 	struct binder_thread *target_thread;
1600 	struct binder_transaction *next;
1601 
1602 	BUG_ON(t->flags & TF_ONE_WAY);
1603 	while (1) {
1604 		target_thread = binder_get_txn_from_and_acq_inner(t);
1605 		if (target_thread) {
1606 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1607 				     "send failed reply for transaction %d to %d:%d\n",
1608 				      t->debug_id,
1609 				      target_thread->proc->pid,
1610 				      target_thread->pid);
1611 
1612 			binder_pop_transaction_ilocked(target_thread, t);
1613 			if (target_thread->reply_error.cmd == BR_OK) {
1614 				target_thread->reply_error.cmd = error_code;
1615 				binder_enqueue_thread_work_ilocked(
1616 					target_thread,
1617 					&target_thread->reply_error.work);
1618 				wake_up_interruptible(&target_thread->wait);
1619 			} else {
1620 				/*
1621 				 * Cannot get here for normal operation, but
1622 				 * we can if multiple synchronous transactions
1623 				 * are sent without blocking for responses.
1624 				 * Just ignore the 2nd error in this case.
1625 				 */
1626 				pr_warn("Unexpected reply error: %u\n",
1627 					target_thread->reply_error.cmd);
1628 			}
1629 			binder_inner_proc_unlock(target_thread->proc);
1630 			binder_thread_dec_tmpref(target_thread);
1631 			binder_free_transaction(t);
1632 			return;
1633 		}
1634 		__release(&target_thread->proc->inner_lock);
1635 		next = t->from_parent;
1636 
1637 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1638 			     "send failed reply for transaction %d, target dead\n",
1639 			     t->debug_id);
1640 
1641 		binder_free_transaction(t);
1642 		if (next == NULL) {
1643 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1644 				     "reply failed, no target thread at root\n");
1645 			return;
1646 		}
1647 		t = next;
1648 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1649 			     "reply failed, no target thread -- retry %d\n",
1650 			      t->debug_id);
1651 	}
1652 }
1653 
1654 /**
1655  * binder_cleanup_transaction() - cleans up undelivered transaction
1656  * @t:		transaction that needs to be cleaned up
1657  * @reason:	reason the transaction wasn't delivered
1658  * @error_code:	error to return to caller (if synchronous call)
1659  */
1660 static void binder_cleanup_transaction(struct binder_transaction *t,
1661 				       const char *reason,
1662 				       uint32_t error_code)
1663 {
1664 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1665 		binder_send_failed_reply(t, error_code);
1666 	} else {
1667 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1668 			"undelivered transaction %d, %s\n",
1669 			t->debug_id, reason);
1670 		binder_free_transaction(t);
1671 	}
1672 }
1673 
1674 /**
1675  * binder_get_object() - gets object and checks for valid metadata
1676  * @proc:	binder_proc owning the buffer
1677  * @u:		sender's user pointer to base of buffer
1678  * @buffer:	binder_buffer that we're parsing.
1679  * @offset:	offset in the @buffer at which to validate an object.
1680  * @object:	struct binder_object to read into
1681  *
1682  * Copy the binder object at the given offset into @object. If @u is
1683  * provided then the copy is from the sender's buffer. If not, then
1684  * it is copied from the target's @buffer.
1685  *
1686  * Return:	If there's a valid metadata object at @offset, the
1687  *		size of that object. Otherwise, it returns zero. The object
1688  *		is read into the struct binder_object pointed to by @object.
1689  */
1690 static size_t binder_get_object(struct binder_proc *proc,
1691 				const void __user *u,
1692 				struct binder_buffer *buffer,
1693 				unsigned long offset,
1694 				struct binder_object *object)
1695 {
1696 	size_t read_size;
1697 	struct binder_object_header *hdr;
1698 	size_t object_size = 0;
1699 
1700 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1701 	if (offset > buffer->data_size || read_size < sizeof(*hdr))
1702 		return 0;
1703 	if (u) {
1704 		if (copy_from_user(object, u + offset, read_size))
1705 			return 0;
1706 	} else {
1707 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1708 						  offset, read_size))
1709 			return 0;
1710 	}
1711 
1712 	/* Ok, now see if we read a complete object. */
1713 	hdr = &object->hdr;
1714 	switch (hdr->type) {
1715 	case BINDER_TYPE_BINDER:
1716 	case BINDER_TYPE_WEAK_BINDER:
1717 	case BINDER_TYPE_HANDLE:
1718 	case BINDER_TYPE_WEAK_HANDLE:
1719 		object_size = sizeof(struct flat_binder_object);
1720 		break;
1721 	case BINDER_TYPE_FD:
1722 		object_size = sizeof(struct binder_fd_object);
1723 		break;
1724 	case BINDER_TYPE_PTR:
1725 		object_size = sizeof(struct binder_buffer_object);
1726 		break;
1727 	case BINDER_TYPE_FDA:
1728 		object_size = sizeof(struct binder_fd_array_object);
1729 		break;
1730 	default:
1731 		return 0;
1732 	}
1733 	if (offset <= buffer->data_size - object_size &&
1734 	    buffer->data_size >= object_size)
1735 		return object_size;
1736 	else
1737 		return 0;
1738 }
1739 
1740 /**
1741  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1742  * @proc:	binder_proc owning the buffer
1743  * @b:		binder_buffer containing the object
1744  * @object:	struct binder_object to read into
1745  * @index:	index in offset array at which the binder_buffer_object is
1746  *		located
1747  * @start_offset: points to the start of the offset array
1748  * @object_offsetp: offset of @object read from @b
1749  * @num_valid:	the number of valid offsets in the offset array
1750  *
1751  * Return:	If @index is within the valid range of the offset array
1752  *		described by @start and @num_valid, and if there's a valid
1753  *		binder_buffer_object at the offset found in index @index
1754  *		of the offset array, that object is returned. Otherwise,
1755  *		%NULL is returned.
1756  *		Note that the offset found in index @index itself is not
1757  *		verified; this function assumes that @num_valid elements
1758  *		from @start were previously verified to have valid offsets.
1759  *		If @object_offsetp is non-NULL, then the offset within
1760  *		@b is written to it.
1761  */
1762 static struct binder_buffer_object *binder_validate_ptr(
1763 						struct binder_proc *proc,
1764 						struct binder_buffer *b,
1765 						struct binder_object *object,
1766 						binder_size_t index,
1767 						binder_size_t start_offset,
1768 						binder_size_t *object_offsetp,
1769 						binder_size_t num_valid)
1770 {
1771 	size_t object_size;
1772 	binder_size_t object_offset;
1773 	unsigned long buffer_offset;
1774 
1775 	if (index >= num_valid)
1776 		return NULL;
1777 
1778 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1779 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1780 					  b, buffer_offset,
1781 					  sizeof(object_offset)))
1782 		return NULL;
1783 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1784 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1785 		return NULL;
1786 	if (object_offsetp)
1787 		*object_offsetp = object_offset;
1788 
1789 	return &object->bbo;
1790 }
1791 
1792 /**
1793  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1794  * @proc:		binder_proc owning the buffer
1795  * @b:			transaction buffer
1796  * @objects_start_offset: offset to start of objects buffer
1797  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1798  * @fixup_offset:	start offset in @buffer to fix up
1799  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1800  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1801  *
1802  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1803  *			allowed.
1804  *
1805  * For safety reasons, we only allow fixups inside a buffer to happen
1806  * at increasing offsets; additionally, we only allow fixup on the last
1807  * buffer object that was verified, or one of its parents.
1808  *
1809  * Example of what is allowed:
1810  *
1811  * A
1812  *   B (parent = A, offset = 0)
1813  *   C (parent = A, offset = 16)
1814  *     D (parent = C, offset = 0)
1815  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1816  *
1817  * Examples of what is not allowed:
1818  *
1819  * Decreasing offsets within the same parent:
1820  * A
1821  *   C (parent = A, offset = 16)
1822  *   B (parent = A, offset = 0) // decreasing offset within A
1823  *
1824  * Referring to a parent that wasn't the last object or any of its parents:
1825  * A
1826  *   B (parent = A, offset = 0)
1827  *   C (parent = A, offset = 0)
1828  *   C (parent = A, offset = 16)
1829  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1830  */
1831 static bool binder_validate_fixup(struct binder_proc *proc,
1832 				  struct binder_buffer *b,
1833 				  binder_size_t objects_start_offset,
1834 				  binder_size_t buffer_obj_offset,
1835 				  binder_size_t fixup_offset,
1836 				  binder_size_t last_obj_offset,
1837 				  binder_size_t last_min_offset)
1838 {
1839 	if (!last_obj_offset) {
1840 		/* Nothing to fix up in */
1841 		return false;
1842 	}
1843 
1844 	while (last_obj_offset != buffer_obj_offset) {
1845 		unsigned long buffer_offset;
1846 		struct binder_object last_object;
1847 		struct binder_buffer_object *last_bbo;
1848 		size_t object_size = binder_get_object(proc, NULL, b,
1849 						       last_obj_offset,
1850 						       &last_object);
1851 		if (object_size != sizeof(*last_bbo))
1852 			return false;
1853 
1854 		last_bbo = &last_object.bbo;
1855 		/*
1856 		 * Safe to retrieve the parent of last_obj, since it
1857 		 * was already previously verified by the driver.
1858 		 */
1859 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1860 			return false;
1861 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1862 		buffer_offset = objects_start_offset +
1863 			sizeof(binder_size_t) * last_bbo->parent;
1864 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1865 						  &last_obj_offset,
1866 						  b, buffer_offset,
1867 						  sizeof(last_obj_offset)))
1868 			return false;
1869 	}
1870 	return (fixup_offset >= last_min_offset);
1871 }
1872 
1873 /**
1874  * struct binder_task_work_cb - for deferred close
1875  *
1876  * @twork:                callback_head for task work
1877  * @fd:                   fd to close
1878  *
1879  * Structure to pass task work to be handled after
1880  * returning from binder_ioctl() via task_work_add().
1881  */
1882 struct binder_task_work_cb {
1883 	struct callback_head twork;
1884 	struct file *file;
1885 };
1886 
1887 /**
1888  * binder_do_fd_close() - close list of file descriptors
1889  * @twork:	callback head for task work
1890  *
1891  * It is not safe to call ksys_close() during the binder_ioctl()
1892  * function if there is a chance that binder's own file descriptor
1893  * might be closed. This is to meet the requirements for using
1894  * fdget() (see comments for __fget_light()). Therefore use
1895  * task_work_add() to schedule the close operation once we have
1896  * returned from binder_ioctl(). This function is a callback
1897  * for that mechanism and does the actual ksys_close() on the
1898  * given file descriptor.
1899  */
1900 static void binder_do_fd_close(struct callback_head *twork)
1901 {
1902 	struct binder_task_work_cb *twcb = container_of(twork,
1903 			struct binder_task_work_cb, twork);
1904 
1905 	fput(twcb->file);
1906 	kfree(twcb);
1907 }
1908 
1909 /**
1910  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1911  * @fd:		file-descriptor to close
1912  *
1913  * See comments in binder_do_fd_close(). This function is used to schedule
1914  * a file-descriptor to be closed after returning from binder_ioctl().
1915  */
1916 static void binder_deferred_fd_close(int fd)
1917 {
1918 	struct binder_task_work_cb *twcb;
1919 
1920 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1921 	if (!twcb)
1922 		return;
1923 	init_task_work(&twcb->twork, binder_do_fd_close);
1924 	twcb->file = file_close_fd(fd);
1925 	if (twcb->file) {
1926 		// pin it until binder_do_fd_close(); see comments there
1927 		get_file(twcb->file);
1928 		filp_close(twcb->file, current->files);
1929 		task_work_add(current, &twcb->twork, TWA_RESUME);
1930 	} else {
1931 		kfree(twcb);
1932 	}
1933 }
1934 
1935 static void binder_transaction_buffer_release(struct binder_proc *proc,
1936 					      struct binder_thread *thread,
1937 					      struct binder_buffer *buffer,
1938 					      binder_size_t off_end_offset,
1939 					      bool is_failure)
1940 {
1941 	int debug_id = buffer->debug_id;
1942 	binder_size_t off_start_offset, buffer_offset;
1943 
1944 	binder_debug(BINDER_DEBUG_TRANSACTION,
1945 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1946 		     proc->pid, buffer->debug_id,
1947 		     buffer->data_size, buffer->offsets_size,
1948 		     (unsigned long long)off_end_offset);
1949 
1950 	if (buffer->target_node)
1951 		binder_dec_node(buffer->target_node, 1, 0);
1952 
1953 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1954 
1955 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1956 	     buffer_offset += sizeof(binder_size_t)) {
1957 		struct binder_object_header *hdr;
1958 		size_t object_size = 0;
1959 		struct binder_object object;
1960 		binder_size_t object_offset;
1961 
1962 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1963 						   buffer, buffer_offset,
1964 						   sizeof(object_offset)))
1965 			object_size = binder_get_object(proc, NULL, buffer,
1966 							object_offset, &object);
1967 		if (object_size == 0) {
1968 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1969 			       debug_id, (u64)object_offset, buffer->data_size);
1970 			continue;
1971 		}
1972 		hdr = &object.hdr;
1973 		switch (hdr->type) {
1974 		case BINDER_TYPE_BINDER:
1975 		case BINDER_TYPE_WEAK_BINDER: {
1976 			struct flat_binder_object *fp;
1977 			struct binder_node *node;
1978 
1979 			fp = to_flat_binder_object(hdr);
1980 			node = binder_get_node(proc, fp->binder);
1981 			if (node == NULL) {
1982 				pr_err("transaction release %d bad node %016llx\n",
1983 				       debug_id, (u64)fp->binder);
1984 				break;
1985 			}
1986 			binder_debug(BINDER_DEBUG_TRANSACTION,
1987 				     "        node %d u%016llx\n",
1988 				     node->debug_id, (u64)node->ptr);
1989 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1990 					0);
1991 			binder_put_node(node);
1992 		} break;
1993 		case BINDER_TYPE_HANDLE:
1994 		case BINDER_TYPE_WEAK_HANDLE: {
1995 			struct flat_binder_object *fp;
1996 			struct binder_ref_data rdata;
1997 			int ret;
1998 
1999 			fp = to_flat_binder_object(hdr);
2000 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2001 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2002 
2003 			if (ret) {
2004 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2005 				 debug_id, fp->handle, ret);
2006 				break;
2007 			}
2008 			binder_debug(BINDER_DEBUG_TRANSACTION,
2009 				     "        ref %d desc %d\n",
2010 				     rdata.debug_id, rdata.desc);
2011 		} break;
2012 
2013 		case BINDER_TYPE_FD: {
2014 			/*
2015 			 * No need to close the file here since user-space
2016 			 * closes it for successfully delivered
2017 			 * transactions. For transactions that weren't
2018 			 * delivered, the new fd was never allocated so
2019 			 * there is no need to close and the fput on the
2020 			 * file is done when the transaction is torn
2021 			 * down.
2022 			 */
2023 		} break;
2024 		case BINDER_TYPE_PTR:
2025 			/*
2026 			 * Nothing to do here, this will get cleaned up when the
2027 			 * transaction buffer gets freed
2028 			 */
2029 			break;
2030 		case BINDER_TYPE_FDA: {
2031 			struct binder_fd_array_object *fda;
2032 			struct binder_buffer_object *parent;
2033 			struct binder_object ptr_object;
2034 			binder_size_t fda_offset;
2035 			size_t fd_index;
2036 			binder_size_t fd_buf_size;
2037 			binder_size_t num_valid;
2038 
2039 			if (is_failure) {
2040 				/*
2041 				 * The fd fixups have not been applied so no
2042 				 * fds need to be closed.
2043 				 */
2044 				continue;
2045 			}
2046 
2047 			num_valid = (buffer_offset - off_start_offset) /
2048 						sizeof(binder_size_t);
2049 			fda = to_binder_fd_array_object(hdr);
2050 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2051 						     fda->parent,
2052 						     off_start_offset,
2053 						     NULL,
2054 						     num_valid);
2055 			if (!parent) {
2056 				pr_err("transaction release %d bad parent offset\n",
2057 				       debug_id);
2058 				continue;
2059 			}
2060 			fd_buf_size = sizeof(u32) * fda->num_fds;
2061 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2062 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2063 				       debug_id, (u64)fda->num_fds);
2064 				continue;
2065 			}
2066 			if (fd_buf_size > parent->length ||
2067 			    fda->parent_offset > parent->length - fd_buf_size) {
2068 				/* No space for all file descriptors here. */
2069 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2070 				       debug_id, (u64)fda->num_fds);
2071 				continue;
2072 			}
2073 			/*
2074 			 * the source data for binder_buffer_object is visible
2075 			 * to user-space and the @buffer element is the user
2076 			 * pointer to the buffer_object containing the fd_array.
2077 			 * Convert the address to an offset relative to
2078 			 * the base of the transaction buffer.
2079 			 */
2080 			fda_offset = parent->buffer - buffer->user_data +
2081 				fda->parent_offset;
2082 			for (fd_index = 0; fd_index < fda->num_fds;
2083 			     fd_index++) {
2084 				u32 fd;
2085 				int err;
2086 				binder_size_t offset = fda_offset +
2087 					fd_index * sizeof(fd);
2088 
2089 				err = binder_alloc_copy_from_buffer(
2090 						&proc->alloc, &fd, buffer,
2091 						offset, sizeof(fd));
2092 				WARN_ON(err);
2093 				if (!err) {
2094 					binder_deferred_fd_close(fd);
2095 					/*
2096 					 * Need to make sure the thread goes
2097 					 * back to userspace to complete the
2098 					 * deferred close
2099 					 */
2100 					if (thread)
2101 						thread->looper_need_return = true;
2102 				}
2103 			}
2104 		} break;
2105 		default:
2106 			pr_err("transaction release %d bad object type %x\n",
2107 				debug_id, hdr->type);
2108 			break;
2109 		}
2110 	}
2111 }
2112 
2113 /* Clean up all the objects in the buffer */
2114 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2115 						struct binder_thread *thread,
2116 						struct binder_buffer *buffer,
2117 						bool is_failure)
2118 {
2119 	binder_size_t off_end_offset;
2120 
2121 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2122 	off_end_offset += buffer->offsets_size;
2123 
2124 	binder_transaction_buffer_release(proc, thread, buffer,
2125 					  off_end_offset, is_failure);
2126 }
2127 
2128 static int binder_translate_binder(struct flat_binder_object *fp,
2129 				   struct binder_transaction *t,
2130 				   struct binder_thread *thread)
2131 {
2132 	struct binder_node *node;
2133 	struct binder_proc *proc = thread->proc;
2134 	struct binder_proc *target_proc = t->to_proc;
2135 	struct binder_ref_data rdata;
2136 	int ret = 0;
2137 
2138 	node = binder_get_node(proc, fp->binder);
2139 	if (!node) {
2140 		node = binder_new_node(proc, fp);
2141 		if (!node)
2142 			return -ENOMEM;
2143 	}
2144 	if (fp->cookie != node->cookie) {
2145 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2146 				  proc->pid, thread->pid, (u64)fp->binder,
2147 				  node->debug_id, (u64)fp->cookie,
2148 				  (u64)node->cookie);
2149 		ret = -EINVAL;
2150 		goto done;
2151 	}
2152 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2153 		ret = -EPERM;
2154 		goto done;
2155 	}
2156 
2157 	ret = binder_inc_ref_for_node(target_proc, node,
2158 			fp->hdr.type == BINDER_TYPE_BINDER,
2159 			&thread->todo, &rdata);
2160 	if (ret)
2161 		goto done;
2162 
2163 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2164 		fp->hdr.type = BINDER_TYPE_HANDLE;
2165 	else
2166 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2167 	fp->binder = 0;
2168 	fp->handle = rdata.desc;
2169 	fp->cookie = 0;
2170 
2171 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2172 	binder_debug(BINDER_DEBUG_TRANSACTION,
2173 		     "        node %d u%016llx -> ref %d desc %d\n",
2174 		     node->debug_id, (u64)node->ptr,
2175 		     rdata.debug_id, rdata.desc);
2176 done:
2177 	binder_put_node(node);
2178 	return ret;
2179 }
2180 
2181 static int binder_translate_handle(struct flat_binder_object *fp,
2182 				   struct binder_transaction *t,
2183 				   struct binder_thread *thread)
2184 {
2185 	struct binder_proc *proc = thread->proc;
2186 	struct binder_proc *target_proc = t->to_proc;
2187 	struct binder_node *node;
2188 	struct binder_ref_data src_rdata;
2189 	int ret = 0;
2190 
2191 	node = binder_get_node_from_ref(proc, fp->handle,
2192 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2193 	if (!node) {
2194 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2195 				  proc->pid, thread->pid, fp->handle);
2196 		return -EINVAL;
2197 	}
2198 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2199 		ret = -EPERM;
2200 		goto done;
2201 	}
2202 
2203 	binder_node_lock(node);
2204 	if (node->proc == target_proc) {
2205 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2206 			fp->hdr.type = BINDER_TYPE_BINDER;
2207 		else
2208 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2209 		fp->binder = node->ptr;
2210 		fp->cookie = node->cookie;
2211 		if (node->proc)
2212 			binder_inner_proc_lock(node->proc);
2213 		else
2214 			__acquire(&node->proc->inner_lock);
2215 		binder_inc_node_nilocked(node,
2216 					 fp->hdr.type == BINDER_TYPE_BINDER,
2217 					 0, NULL);
2218 		if (node->proc)
2219 			binder_inner_proc_unlock(node->proc);
2220 		else
2221 			__release(&node->proc->inner_lock);
2222 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2223 		binder_debug(BINDER_DEBUG_TRANSACTION,
2224 			     "        ref %d desc %d -> node %d u%016llx\n",
2225 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2226 			     (u64)node->ptr);
2227 		binder_node_unlock(node);
2228 	} else {
2229 		struct binder_ref_data dest_rdata;
2230 
2231 		binder_node_unlock(node);
2232 		ret = binder_inc_ref_for_node(target_proc, node,
2233 				fp->hdr.type == BINDER_TYPE_HANDLE,
2234 				NULL, &dest_rdata);
2235 		if (ret)
2236 			goto done;
2237 
2238 		fp->binder = 0;
2239 		fp->handle = dest_rdata.desc;
2240 		fp->cookie = 0;
2241 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2242 						    &dest_rdata);
2243 		binder_debug(BINDER_DEBUG_TRANSACTION,
2244 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2245 			     src_rdata.debug_id, src_rdata.desc,
2246 			     dest_rdata.debug_id, dest_rdata.desc,
2247 			     node->debug_id);
2248 	}
2249 done:
2250 	binder_put_node(node);
2251 	return ret;
2252 }
2253 
2254 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2255 			       struct binder_transaction *t,
2256 			       struct binder_thread *thread,
2257 			       struct binder_transaction *in_reply_to)
2258 {
2259 	struct binder_proc *proc = thread->proc;
2260 	struct binder_proc *target_proc = t->to_proc;
2261 	struct binder_txn_fd_fixup *fixup;
2262 	struct file *file;
2263 	int ret = 0;
2264 	bool target_allows_fd;
2265 
2266 	if (in_reply_to)
2267 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2268 	else
2269 		target_allows_fd = t->buffer->target_node->accept_fds;
2270 	if (!target_allows_fd) {
2271 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2272 				  proc->pid, thread->pid,
2273 				  in_reply_to ? "reply" : "transaction",
2274 				  fd);
2275 		ret = -EPERM;
2276 		goto err_fd_not_accepted;
2277 	}
2278 
2279 	file = fget(fd);
2280 	if (!file) {
2281 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2282 				  proc->pid, thread->pid, fd);
2283 		ret = -EBADF;
2284 		goto err_fget;
2285 	}
2286 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2287 	if (ret < 0) {
2288 		ret = -EPERM;
2289 		goto err_security;
2290 	}
2291 
2292 	/*
2293 	 * Add fixup record for this transaction. The allocation
2294 	 * of the fd in the target needs to be done from a
2295 	 * target thread.
2296 	 */
2297 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2298 	if (!fixup) {
2299 		ret = -ENOMEM;
2300 		goto err_alloc;
2301 	}
2302 	fixup->file = file;
2303 	fixup->offset = fd_offset;
2304 	fixup->target_fd = -1;
2305 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2306 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2307 
2308 	return ret;
2309 
2310 err_alloc:
2311 err_security:
2312 	fput(file);
2313 err_fget:
2314 err_fd_not_accepted:
2315 	return ret;
2316 }
2317 
2318 /**
2319  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2320  * @offset	offset in target buffer to fixup
2321  * @skip_size	bytes to skip in copy (fixup will be written later)
2322  * @fixup_data	data to write at fixup offset
2323  * @node	list node
2324  *
2325  * This is used for the pointer fixup list (pf) which is created and consumed
2326  * during binder_transaction() and is only accessed locally. No
2327  * locking is necessary.
2328  *
2329  * The list is ordered by @offset.
2330  */
2331 struct binder_ptr_fixup {
2332 	binder_size_t offset;
2333 	size_t skip_size;
2334 	binder_uintptr_t fixup_data;
2335 	struct list_head node;
2336 };
2337 
2338 /**
2339  * struct binder_sg_copy - scatter-gather data to be copied
2340  * @offset		offset in target buffer
2341  * @sender_uaddr	user address in source buffer
2342  * @length		bytes to copy
2343  * @node		list node
2344  *
2345  * This is used for the sg copy list (sgc) which is created and consumed
2346  * during binder_transaction() and is only accessed locally. No
2347  * locking is necessary.
2348  *
2349  * The list is ordered by @offset.
2350  */
2351 struct binder_sg_copy {
2352 	binder_size_t offset;
2353 	const void __user *sender_uaddr;
2354 	size_t length;
2355 	struct list_head node;
2356 };
2357 
2358 /**
2359  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2360  * @alloc:	binder_alloc associated with @buffer
2361  * @buffer:	binder buffer in target process
2362  * @sgc_head:	list_head of scatter-gather copy list
2363  * @pf_head:	list_head of pointer fixup list
2364  *
2365  * Processes all elements of @sgc_head, applying fixups from @pf_head
2366  * and copying the scatter-gather data from the source process' user
2367  * buffer to the target's buffer. It is expected that the list creation
2368  * and processing all occurs during binder_transaction() so these lists
2369  * are only accessed in local context.
2370  *
2371  * Return: 0=success, else -errno
2372  */
2373 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2374 					 struct binder_buffer *buffer,
2375 					 struct list_head *sgc_head,
2376 					 struct list_head *pf_head)
2377 {
2378 	int ret = 0;
2379 	struct binder_sg_copy *sgc, *tmpsgc;
2380 	struct binder_ptr_fixup *tmppf;
2381 	struct binder_ptr_fixup *pf =
2382 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2383 					 node);
2384 
2385 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2386 		size_t bytes_copied = 0;
2387 
2388 		while (bytes_copied < sgc->length) {
2389 			size_t copy_size;
2390 			size_t bytes_left = sgc->length - bytes_copied;
2391 			size_t offset = sgc->offset + bytes_copied;
2392 
2393 			/*
2394 			 * We copy up to the fixup (pointed to by pf)
2395 			 */
2396 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2397 				       : bytes_left;
2398 			if (!ret && copy_size)
2399 				ret = binder_alloc_copy_user_to_buffer(
2400 						alloc, buffer,
2401 						offset,
2402 						sgc->sender_uaddr + bytes_copied,
2403 						copy_size);
2404 			bytes_copied += copy_size;
2405 			if (copy_size != bytes_left) {
2406 				BUG_ON(!pf);
2407 				/* we stopped at a fixup offset */
2408 				if (pf->skip_size) {
2409 					/*
2410 					 * we are just skipping. This is for
2411 					 * BINDER_TYPE_FDA where the translated
2412 					 * fds will be fixed up when we get
2413 					 * to target context.
2414 					 */
2415 					bytes_copied += pf->skip_size;
2416 				} else {
2417 					/* apply the fixup indicated by pf */
2418 					if (!ret)
2419 						ret = binder_alloc_copy_to_buffer(
2420 							alloc, buffer,
2421 							pf->offset,
2422 							&pf->fixup_data,
2423 							sizeof(pf->fixup_data));
2424 					bytes_copied += sizeof(pf->fixup_data);
2425 				}
2426 				list_del(&pf->node);
2427 				kfree(pf);
2428 				pf = list_first_entry_or_null(pf_head,
2429 						struct binder_ptr_fixup, node);
2430 			}
2431 		}
2432 		list_del(&sgc->node);
2433 		kfree(sgc);
2434 	}
2435 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2436 		BUG_ON(pf->skip_size == 0);
2437 		list_del(&pf->node);
2438 		kfree(pf);
2439 	}
2440 	BUG_ON(!list_empty(sgc_head));
2441 
2442 	return ret > 0 ? -EINVAL : ret;
2443 }
2444 
2445 /**
2446  * binder_cleanup_deferred_txn_lists() - free specified lists
2447  * @sgc_head:	list_head of scatter-gather copy list
2448  * @pf_head:	list_head of pointer fixup list
2449  *
2450  * Called to clean up @sgc_head and @pf_head if there is an
2451  * error.
2452  */
2453 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2454 					      struct list_head *pf_head)
2455 {
2456 	struct binder_sg_copy *sgc, *tmpsgc;
2457 	struct binder_ptr_fixup *pf, *tmppf;
2458 
2459 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2460 		list_del(&sgc->node);
2461 		kfree(sgc);
2462 	}
2463 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2464 		list_del(&pf->node);
2465 		kfree(pf);
2466 	}
2467 }
2468 
2469 /**
2470  * binder_defer_copy() - queue a scatter-gather buffer for copy
2471  * @sgc_head:		list_head of scatter-gather copy list
2472  * @offset:		binder buffer offset in target process
2473  * @sender_uaddr:	user address in source process
2474  * @length:		bytes to copy
2475  *
2476  * Specify a scatter-gather block to be copied. The actual copy must
2477  * be deferred until all the needed fixups are identified and queued.
2478  * Then the copy and fixups are done together so un-translated values
2479  * from the source are never visible in the target buffer.
2480  *
2481  * We are guaranteed that repeated calls to this function will have
2482  * monotonically increasing @offset values so the list will naturally
2483  * be ordered.
2484  *
2485  * Return: 0=success, else -errno
2486  */
2487 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2488 			     const void __user *sender_uaddr, size_t length)
2489 {
2490 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2491 
2492 	if (!bc)
2493 		return -ENOMEM;
2494 
2495 	bc->offset = offset;
2496 	bc->sender_uaddr = sender_uaddr;
2497 	bc->length = length;
2498 	INIT_LIST_HEAD(&bc->node);
2499 
2500 	/*
2501 	 * We are guaranteed that the deferred copies are in-order
2502 	 * so just add to the tail.
2503 	 */
2504 	list_add_tail(&bc->node, sgc_head);
2505 
2506 	return 0;
2507 }
2508 
2509 /**
2510  * binder_add_fixup() - queue a fixup to be applied to sg copy
2511  * @pf_head:	list_head of binder ptr fixup list
2512  * @offset:	binder buffer offset in target process
2513  * @fixup:	bytes to be copied for fixup
2514  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2515  *
2516  * Add the specified fixup to a list ordered by @offset. When copying
2517  * the scatter-gather buffers, the fixup will be copied instead of
2518  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2519  * will be applied later (in target process context), so we just skip
2520  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2521  * value in @fixup.
2522  *
2523  * This function is called *mostly* in @offset order, but there are
2524  * exceptions. Since out-of-order inserts are relatively uncommon,
2525  * we insert the new element by searching backward from the tail of
2526  * the list.
2527  *
2528  * Return: 0=success, else -errno
2529  */
2530 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2531 			    binder_uintptr_t fixup, size_t skip_size)
2532 {
2533 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2534 	struct binder_ptr_fixup *tmppf;
2535 
2536 	if (!pf)
2537 		return -ENOMEM;
2538 
2539 	pf->offset = offset;
2540 	pf->fixup_data = fixup;
2541 	pf->skip_size = skip_size;
2542 	INIT_LIST_HEAD(&pf->node);
2543 
2544 	/* Fixups are *mostly* added in-order, but there are some
2545 	 * exceptions. Look backwards through list for insertion point.
2546 	 */
2547 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2548 		if (tmppf->offset < pf->offset) {
2549 			list_add(&pf->node, &tmppf->node);
2550 			return 0;
2551 		}
2552 	}
2553 	/*
2554 	 * if we get here, then the new offset is the lowest so
2555 	 * insert at the head
2556 	 */
2557 	list_add(&pf->node, pf_head);
2558 	return 0;
2559 }
2560 
2561 static int binder_translate_fd_array(struct list_head *pf_head,
2562 				     struct binder_fd_array_object *fda,
2563 				     const void __user *sender_ubuffer,
2564 				     struct binder_buffer_object *parent,
2565 				     struct binder_buffer_object *sender_uparent,
2566 				     struct binder_transaction *t,
2567 				     struct binder_thread *thread,
2568 				     struct binder_transaction *in_reply_to)
2569 {
2570 	binder_size_t fdi, fd_buf_size;
2571 	binder_size_t fda_offset;
2572 	const void __user *sender_ufda_base;
2573 	struct binder_proc *proc = thread->proc;
2574 	int ret;
2575 
2576 	if (fda->num_fds == 0)
2577 		return 0;
2578 
2579 	fd_buf_size = sizeof(u32) * fda->num_fds;
2580 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2581 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2582 				  proc->pid, thread->pid, (u64)fda->num_fds);
2583 		return -EINVAL;
2584 	}
2585 	if (fd_buf_size > parent->length ||
2586 	    fda->parent_offset > parent->length - fd_buf_size) {
2587 		/* No space for all file descriptors here. */
2588 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2589 				  proc->pid, thread->pid, (u64)fda->num_fds);
2590 		return -EINVAL;
2591 	}
2592 	/*
2593 	 * the source data for binder_buffer_object is visible
2594 	 * to user-space and the @buffer element is the user
2595 	 * pointer to the buffer_object containing the fd_array.
2596 	 * Convert the address to an offset relative to
2597 	 * the base of the transaction buffer.
2598 	 */
2599 	fda_offset = parent->buffer - t->buffer->user_data +
2600 		fda->parent_offset;
2601 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2602 				fda->parent_offset;
2603 
2604 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2605 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2606 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2607 				  proc->pid, thread->pid);
2608 		return -EINVAL;
2609 	}
2610 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2611 	if (ret)
2612 		return ret;
2613 
2614 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2615 		u32 fd;
2616 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2617 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2618 
2619 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2620 		if (!ret)
2621 			ret = binder_translate_fd(fd, offset, t, thread,
2622 						  in_reply_to);
2623 		if (ret)
2624 			return ret > 0 ? -EINVAL : ret;
2625 	}
2626 	return 0;
2627 }
2628 
2629 static int binder_fixup_parent(struct list_head *pf_head,
2630 			       struct binder_transaction *t,
2631 			       struct binder_thread *thread,
2632 			       struct binder_buffer_object *bp,
2633 			       binder_size_t off_start_offset,
2634 			       binder_size_t num_valid,
2635 			       binder_size_t last_fixup_obj_off,
2636 			       binder_size_t last_fixup_min_off)
2637 {
2638 	struct binder_buffer_object *parent;
2639 	struct binder_buffer *b = t->buffer;
2640 	struct binder_proc *proc = thread->proc;
2641 	struct binder_proc *target_proc = t->to_proc;
2642 	struct binder_object object;
2643 	binder_size_t buffer_offset;
2644 	binder_size_t parent_offset;
2645 
2646 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2647 		return 0;
2648 
2649 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2650 				     off_start_offset, &parent_offset,
2651 				     num_valid);
2652 	if (!parent) {
2653 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2654 				  proc->pid, thread->pid);
2655 		return -EINVAL;
2656 	}
2657 
2658 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2659 				   parent_offset, bp->parent_offset,
2660 				   last_fixup_obj_off,
2661 				   last_fixup_min_off)) {
2662 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2663 				  proc->pid, thread->pid);
2664 		return -EINVAL;
2665 	}
2666 
2667 	if (parent->length < sizeof(binder_uintptr_t) ||
2668 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2669 		/* No space for a pointer here! */
2670 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2671 				  proc->pid, thread->pid);
2672 		return -EINVAL;
2673 	}
2674 
2675 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2676 
2677 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2678 }
2679 
2680 /**
2681  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2682  * @t1: the pending async txn in the frozen process
2683  * @t2: the new async txn to supersede the outdated pending one
2684  *
2685  * Return:  true if t2 can supersede t1
2686  *          false if t2 can not supersede t1
2687  */
2688 static bool binder_can_update_transaction(struct binder_transaction *t1,
2689 					  struct binder_transaction *t2)
2690 {
2691 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2692 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2693 		return false;
2694 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2695 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2696 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2697 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2698 		return true;
2699 	return false;
2700 }
2701 
2702 /**
2703  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2704  * @t:		 new async transaction
2705  * @target_list: list to find outdated transaction
2706  *
2707  * Return: the outdated transaction if found
2708  *         NULL if no outdated transacton can be found
2709  *
2710  * Requires the proc->inner_lock to be held.
2711  */
2712 static struct binder_transaction *
2713 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2714 					 struct list_head *target_list)
2715 {
2716 	struct binder_work *w;
2717 
2718 	list_for_each_entry(w, target_list, entry) {
2719 		struct binder_transaction *t_queued;
2720 
2721 		if (w->type != BINDER_WORK_TRANSACTION)
2722 			continue;
2723 		t_queued = container_of(w, struct binder_transaction, work);
2724 		if (binder_can_update_transaction(t_queued, t))
2725 			return t_queued;
2726 	}
2727 	return NULL;
2728 }
2729 
2730 /**
2731  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2732  * @t:		transaction to send
2733  * @proc:	process to send the transaction to
2734  * @thread:	thread in @proc to send the transaction to (may be NULL)
2735  *
2736  * This function queues a transaction to the specified process. It will try
2737  * to find a thread in the target process to handle the transaction and
2738  * wake it up. If no thread is found, the work is queued to the proc
2739  * waitqueue.
2740  *
2741  * If the @thread parameter is not NULL, the transaction is always queued
2742  * to the waitlist of that specific thread.
2743  *
2744  * Return:	0 if the transaction was successfully queued
2745  *		BR_DEAD_REPLY if the target process or thread is dead
2746  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2747  *			the sync transaction was rejected
2748  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2749  *		and the async transaction was successfully queued
2750  */
2751 static int binder_proc_transaction(struct binder_transaction *t,
2752 				    struct binder_proc *proc,
2753 				    struct binder_thread *thread)
2754 {
2755 	struct binder_node *node = t->buffer->target_node;
2756 	bool oneway = !!(t->flags & TF_ONE_WAY);
2757 	bool pending_async = false;
2758 	struct binder_transaction *t_outdated = NULL;
2759 	bool frozen = false;
2760 
2761 	BUG_ON(!node);
2762 	binder_node_lock(node);
2763 	if (oneway) {
2764 		BUG_ON(thread);
2765 		if (node->has_async_transaction)
2766 			pending_async = true;
2767 		else
2768 			node->has_async_transaction = true;
2769 	}
2770 
2771 	binder_inner_proc_lock(proc);
2772 	if (proc->is_frozen) {
2773 		frozen = true;
2774 		proc->sync_recv |= !oneway;
2775 		proc->async_recv |= oneway;
2776 	}
2777 
2778 	if ((frozen && !oneway) || proc->is_dead ||
2779 			(thread && thread->is_dead)) {
2780 		binder_inner_proc_unlock(proc);
2781 		binder_node_unlock(node);
2782 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2783 	}
2784 
2785 	if (!thread && !pending_async)
2786 		thread = binder_select_thread_ilocked(proc);
2787 
2788 	if (thread) {
2789 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2790 	} else if (!pending_async) {
2791 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2792 	} else {
2793 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2794 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2795 									      &node->async_todo);
2796 			if (t_outdated) {
2797 				binder_debug(BINDER_DEBUG_TRANSACTION,
2798 					     "txn %d supersedes %d\n",
2799 					     t->debug_id, t_outdated->debug_id);
2800 				list_del_init(&t_outdated->work.entry);
2801 				proc->outstanding_txns--;
2802 			}
2803 		}
2804 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2805 	}
2806 
2807 	if (!pending_async)
2808 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2809 
2810 	proc->outstanding_txns++;
2811 	binder_inner_proc_unlock(proc);
2812 	binder_node_unlock(node);
2813 
2814 	/*
2815 	 * To reduce potential contention, free the outdated transaction and
2816 	 * buffer after releasing the locks.
2817 	 */
2818 	if (t_outdated) {
2819 		struct binder_buffer *buffer = t_outdated->buffer;
2820 
2821 		t_outdated->buffer = NULL;
2822 		buffer->transaction = NULL;
2823 		trace_binder_transaction_update_buffer_release(buffer);
2824 		binder_release_entire_buffer(proc, NULL, buffer, false);
2825 		binder_alloc_free_buf(&proc->alloc, buffer);
2826 		kfree(t_outdated);
2827 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2828 	}
2829 
2830 	if (oneway && frozen)
2831 		return BR_TRANSACTION_PENDING_FROZEN;
2832 
2833 	return 0;
2834 }
2835 
2836 /**
2837  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2838  * @node:         struct binder_node for which to get refs
2839  * @procp:        returns @node->proc if valid
2840  * @error:        if no @procp then returns BR_DEAD_REPLY
2841  *
2842  * User-space normally keeps the node alive when creating a transaction
2843  * since it has a reference to the target. The local strong ref keeps it
2844  * alive if the sending process dies before the target process processes
2845  * the transaction. If the source process is malicious or has a reference
2846  * counting bug, relying on the local strong ref can fail.
2847  *
2848  * Since user-space can cause the local strong ref to go away, we also take
2849  * a tmpref on the node to ensure it survives while we are constructing
2850  * the transaction. We also need a tmpref on the proc while we are
2851  * constructing the transaction, so we take that here as well.
2852  *
2853  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2854  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2855  * target proc has died, @error is set to BR_DEAD_REPLY.
2856  */
2857 static struct binder_node *binder_get_node_refs_for_txn(
2858 		struct binder_node *node,
2859 		struct binder_proc **procp,
2860 		uint32_t *error)
2861 {
2862 	struct binder_node *target_node = NULL;
2863 
2864 	binder_node_inner_lock(node);
2865 	if (node->proc) {
2866 		target_node = node;
2867 		binder_inc_node_nilocked(node, 1, 0, NULL);
2868 		binder_inc_node_tmpref_ilocked(node);
2869 		node->proc->tmp_ref++;
2870 		*procp = node->proc;
2871 	} else
2872 		*error = BR_DEAD_REPLY;
2873 	binder_node_inner_unlock(node);
2874 
2875 	return target_node;
2876 }
2877 
2878 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2879 				      uint32_t command, int32_t param)
2880 {
2881 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2882 
2883 	if (!from) {
2884 		/* annotation for sparse */
2885 		__release(&from->proc->inner_lock);
2886 		return;
2887 	}
2888 
2889 	/* don't override existing errors */
2890 	if (from->ee.command == BR_OK)
2891 		binder_set_extended_error(&from->ee, id, command, param);
2892 	binder_inner_proc_unlock(from->proc);
2893 	binder_thread_dec_tmpref(from);
2894 }
2895 
2896 static void binder_transaction(struct binder_proc *proc,
2897 			       struct binder_thread *thread,
2898 			       struct binder_transaction_data *tr, int reply,
2899 			       binder_size_t extra_buffers_size)
2900 {
2901 	int ret;
2902 	struct binder_transaction *t;
2903 	struct binder_work *w;
2904 	struct binder_work *tcomplete;
2905 	binder_size_t buffer_offset = 0;
2906 	binder_size_t off_start_offset, off_end_offset;
2907 	binder_size_t off_min;
2908 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2909 	binder_size_t user_offset = 0;
2910 	struct binder_proc *target_proc = NULL;
2911 	struct binder_thread *target_thread = NULL;
2912 	struct binder_node *target_node = NULL;
2913 	struct binder_transaction *in_reply_to = NULL;
2914 	struct binder_transaction_log_entry *e;
2915 	uint32_t return_error = 0;
2916 	uint32_t return_error_param = 0;
2917 	uint32_t return_error_line = 0;
2918 	binder_size_t last_fixup_obj_off = 0;
2919 	binder_size_t last_fixup_min_off = 0;
2920 	struct binder_context *context = proc->context;
2921 	int t_debug_id = atomic_inc_return(&binder_last_id);
2922 	ktime_t t_start_time = ktime_get();
2923 	char *secctx = NULL;
2924 	u32 secctx_sz = 0;
2925 	struct list_head sgc_head;
2926 	struct list_head pf_head;
2927 	const void __user *user_buffer = (const void __user *)
2928 				(uintptr_t)tr->data.ptr.buffer;
2929 	INIT_LIST_HEAD(&sgc_head);
2930 	INIT_LIST_HEAD(&pf_head);
2931 
2932 	e = binder_transaction_log_add(&binder_transaction_log);
2933 	e->debug_id = t_debug_id;
2934 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2935 	e->from_proc = proc->pid;
2936 	e->from_thread = thread->pid;
2937 	e->target_handle = tr->target.handle;
2938 	e->data_size = tr->data_size;
2939 	e->offsets_size = tr->offsets_size;
2940 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2941 
2942 	binder_inner_proc_lock(proc);
2943 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2944 	binder_inner_proc_unlock(proc);
2945 
2946 	if (reply) {
2947 		binder_inner_proc_lock(proc);
2948 		in_reply_to = thread->transaction_stack;
2949 		if (in_reply_to == NULL) {
2950 			binder_inner_proc_unlock(proc);
2951 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2952 					  proc->pid, thread->pid);
2953 			return_error = BR_FAILED_REPLY;
2954 			return_error_param = -EPROTO;
2955 			return_error_line = __LINE__;
2956 			goto err_empty_call_stack;
2957 		}
2958 		if (in_reply_to->to_thread != thread) {
2959 			spin_lock(&in_reply_to->lock);
2960 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2961 				proc->pid, thread->pid, in_reply_to->debug_id,
2962 				in_reply_to->to_proc ?
2963 				in_reply_to->to_proc->pid : 0,
2964 				in_reply_to->to_thread ?
2965 				in_reply_to->to_thread->pid : 0);
2966 			spin_unlock(&in_reply_to->lock);
2967 			binder_inner_proc_unlock(proc);
2968 			return_error = BR_FAILED_REPLY;
2969 			return_error_param = -EPROTO;
2970 			return_error_line = __LINE__;
2971 			in_reply_to = NULL;
2972 			goto err_bad_call_stack;
2973 		}
2974 		thread->transaction_stack = in_reply_to->to_parent;
2975 		binder_inner_proc_unlock(proc);
2976 		binder_set_nice(in_reply_to->saved_priority);
2977 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2978 		if (target_thread == NULL) {
2979 			/* annotation for sparse */
2980 			__release(&target_thread->proc->inner_lock);
2981 			binder_txn_error("%d:%d reply target not found\n",
2982 				thread->pid, proc->pid);
2983 			return_error = BR_DEAD_REPLY;
2984 			return_error_line = __LINE__;
2985 			goto err_dead_binder;
2986 		}
2987 		if (target_thread->transaction_stack != in_reply_to) {
2988 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2989 				proc->pid, thread->pid,
2990 				target_thread->transaction_stack ?
2991 				target_thread->transaction_stack->debug_id : 0,
2992 				in_reply_to->debug_id);
2993 			binder_inner_proc_unlock(target_thread->proc);
2994 			return_error = BR_FAILED_REPLY;
2995 			return_error_param = -EPROTO;
2996 			return_error_line = __LINE__;
2997 			in_reply_to = NULL;
2998 			target_thread = NULL;
2999 			goto err_dead_binder;
3000 		}
3001 		target_proc = target_thread->proc;
3002 		target_proc->tmp_ref++;
3003 		binder_inner_proc_unlock(target_thread->proc);
3004 	} else {
3005 		if (tr->target.handle) {
3006 			struct binder_ref *ref;
3007 
3008 			/*
3009 			 * There must already be a strong ref
3010 			 * on this node. If so, do a strong
3011 			 * increment on the node to ensure it
3012 			 * stays alive until the transaction is
3013 			 * done.
3014 			 */
3015 			binder_proc_lock(proc);
3016 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3017 						     true);
3018 			if (ref) {
3019 				target_node = binder_get_node_refs_for_txn(
3020 						ref->node, &target_proc,
3021 						&return_error);
3022 			} else {
3023 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3024 						  proc->pid, thread->pid, tr->target.handle);
3025 				return_error = BR_FAILED_REPLY;
3026 			}
3027 			binder_proc_unlock(proc);
3028 		} else {
3029 			mutex_lock(&context->context_mgr_node_lock);
3030 			target_node = context->binder_context_mgr_node;
3031 			if (target_node)
3032 				target_node = binder_get_node_refs_for_txn(
3033 						target_node, &target_proc,
3034 						&return_error);
3035 			else
3036 				return_error = BR_DEAD_REPLY;
3037 			mutex_unlock(&context->context_mgr_node_lock);
3038 			if (target_node && target_proc->pid == proc->pid) {
3039 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3040 						  proc->pid, thread->pid);
3041 				return_error = BR_FAILED_REPLY;
3042 				return_error_param = -EINVAL;
3043 				return_error_line = __LINE__;
3044 				goto err_invalid_target_handle;
3045 			}
3046 		}
3047 		if (!target_node) {
3048 			binder_txn_error("%d:%d cannot find target node\n",
3049 				thread->pid, proc->pid);
3050 			/*
3051 			 * return_error is set above
3052 			 */
3053 			return_error_param = -EINVAL;
3054 			return_error_line = __LINE__;
3055 			goto err_dead_binder;
3056 		}
3057 		e->to_node = target_node->debug_id;
3058 		if (WARN_ON(proc == target_proc)) {
3059 			binder_txn_error("%d:%d self transactions not allowed\n",
3060 				thread->pid, proc->pid);
3061 			return_error = BR_FAILED_REPLY;
3062 			return_error_param = -EINVAL;
3063 			return_error_line = __LINE__;
3064 			goto err_invalid_target_handle;
3065 		}
3066 		if (security_binder_transaction(proc->cred,
3067 						target_proc->cred) < 0) {
3068 			binder_txn_error("%d:%d transaction credentials failed\n",
3069 				thread->pid, proc->pid);
3070 			return_error = BR_FAILED_REPLY;
3071 			return_error_param = -EPERM;
3072 			return_error_line = __LINE__;
3073 			goto err_invalid_target_handle;
3074 		}
3075 		binder_inner_proc_lock(proc);
3076 
3077 		w = list_first_entry_or_null(&thread->todo,
3078 					     struct binder_work, entry);
3079 		if (!(tr->flags & TF_ONE_WAY) && w &&
3080 		    w->type == BINDER_WORK_TRANSACTION) {
3081 			/*
3082 			 * Do not allow new outgoing transaction from a
3083 			 * thread that has a transaction at the head of
3084 			 * its todo list. Only need to check the head
3085 			 * because binder_select_thread_ilocked picks a
3086 			 * thread from proc->waiting_threads to enqueue
3087 			 * the transaction, and nothing is queued to the
3088 			 * todo list while the thread is on waiting_threads.
3089 			 */
3090 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3091 					  proc->pid, thread->pid);
3092 			binder_inner_proc_unlock(proc);
3093 			return_error = BR_FAILED_REPLY;
3094 			return_error_param = -EPROTO;
3095 			return_error_line = __LINE__;
3096 			goto err_bad_todo_list;
3097 		}
3098 
3099 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3100 			struct binder_transaction *tmp;
3101 
3102 			tmp = thread->transaction_stack;
3103 			if (tmp->to_thread != thread) {
3104 				spin_lock(&tmp->lock);
3105 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3106 					proc->pid, thread->pid, tmp->debug_id,
3107 					tmp->to_proc ? tmp->to_proc->pid : 0,
3108 					tmp->to_thread ?
3109 					tmp->to_thread->pid : 0);
3110 				spin_unlock(&tmp->lock);
3111 				binder_inner_proc_unlock(proc);
3112 				return_error = BR_FAILED_REPLY;
3113 				return_error_param = -EPROTO;
3114 				return_error_line = __LINE__;
3115 				goto err_bad_call_stack;
3116 			}
3117 			while (tmp) {
3118 				struct binder_thread *from;
3119 
3120 				spin_lock(&tmp->lock);
3121 				from = tmp->from;
3122 				if (from && from->proc == target_proc) {
3123 					atomic_inc(&from->tmp_ref);
3124 					target_thread = from;
3125 					spin_unlock(&tmp->lock);
3126 					break;
3127 				}
3128 				spin_unlock(&tmp->lock);
3129 				tmp = tmp->from_parent;
3130 			}
3131 		}
3132 		binder_inner_proc_unlock(proc);
3133 	}
3134 	if (target_thread)
3135 		e->to_thread = target_thread->pid;
3136 	e->to_proc = target_proc->pid;
3137 
3138 	/* TODO: reuse incoming transaction for reply */
3139 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3140 	if (t == NULL) {
3141 		binder_txn_error("%d:%d cannot allocate transaction\n",
3142 			thread->pid, proc->pid);
3143 		return_error = BR_FAILED_REPLY;
3144 		return_error_param = -ENOMEM;
3145 		return_error_line = __LINE__;
3146 		goto err_alloc_t_failed;
3147 	}
3148 	INIT_LIST_HEAD(&t->fd_fixups);
3149 	binder_stats_created(BINDER_STAT_TRANSACTION);
3150 	spin_lock_init(&t->lock);
3151 
3152 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3153 	if (tcomplete == NULL) {
3154 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3155 			thread->pid, proc->pid);
3156 		return_error = BR_FAILED_REPLY;
3157 		return_error_param = -ENOMEM;
3158 		return_error_line = __LINE__;
3159 		goto err_alloc_tcomplete_failed;
3160 	}
3161 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3162 
3163 	t->debug_id = t_debug_id;
3164 	t->start_time = t_start_time;
3165 
3166 	if (reply)
3167 		binder_debug(BINDER_DEBUG_TRANSACTION,
3168 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3169 			     proc->pid, thread->pid, t->debug_id,
3170 			     target_proc->pid, target_thread->pid,
3171 			     (u64)tr->data.ptr.buffer,
3172 			     (u64)tr->data.ptr.offsets,
3173 			     (u64)tr->data_size, (u64)tr->offsets_size,
3174 			     (u64)extra_buffers_size);
3175 	else
3176 		binder_debug(BINDER_DEBUG_TRANSACTION,
3177 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3178 			     proc->pid, thread->pid, t->debug_id,
3179 			     target_proc->pid, target_node->debug_id,
3180 			     (u64)tr->data.ptr.buffer,
3181 			     (u64)tr->data.ptr.offsets,
3182 			     (u64)tr->data_size, (u64)tr->offsets_size,
3183 			     (u64)extra_buffers_size);
3184 
3185 	if (!reply && !(tr->flags & TF_ONE_WAY))
3186 		t->from = thread;
3187 	else
3188 		t->from = NULL;
3189 	t->from_pid = proc->pid;
3190 	t->from_tid = thread->pid;
3191 	t->sender_euid = task_euid(proc->tsk);
3192 	t->to_proc = target_proc;
3193 	t->to_thread = target_thread;
3194 	t->code = tr->code;
3195 	t->flags = tr->flags;
3196 	t->priority = task_nice(current);
3197 
3198 	if (target_node && target_node->txn_security_ctx) {
3199 		u32 secid;
3200 		size_t added_size;
3201 
3202 		security_cred_getsecid(proc->cred, &secid);
3203 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3204 		if (ret) {
3205 			binder_txn_error("%d:%d failed to get security context\n",
3206 				thread->pid, proc->pid);
3207 			return_error = BR_FAILED_REPLY;
3208 			return_error_param = ret;
3209 			return_error_line = __LINE__;
3210 			goto err_get_secctx_failed;
3211 		}
3212 		added_size = ALIGN(secctx_sz, sizeof(u64));
3213 		extra_buffers_size += added_size;
3214 		if (extra_buffers_size < added_size) {
3215 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3216 				thread->pid, proc->pid);
3217 			return_error = BR_FAILED_REPLY;
3218 			return_error_param = -EINVAL;
3219 			return_error_line = __LINE__;
3220 			goto err_bad_extra_size;
3221 		}
3222 	}
3223 
3224 	trace_binder_transaction(reply, t, target_node);
3225 
3226 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3227 		tr->offsets_size, extra_buffers_size,
3228 		!reply && (t->flags & TF_ONE_WAY));
3229 	if (IS_ERR(t->buffer)) {
3230 		char *s;
3231 
3232 		ret = PTR_ERR(t->buffer);
3233 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3234 			: (ret == -ENOSPC) ? ": no space left"
3235 			: (ret == -ENOMEM) ? ": memory allocation failed"
3236 			: "";
3237 		binder_txn_error("cannot allocate buffer%s", s);
3238 
3239 		return_error_param = PTR_ERR(t->buffer);
3240 		return_error = return_error_param == -ESRCH ?
3241 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3242 		return_error_line = __LINE__;
3243 		t->buffer = NULL;
3244 		goto err_binder_alloc_buf_failed;
3245 	}
3246 	if (secctx) {
3247 		int err;
3248 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3249 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3250 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3251 				    ALIGN(secctx_sz, sizeof(u64));
3252 
3253 		t->security_ctx = t->buffer->user_data + buf_offset;
3254 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3255 						  t->buffer, buf_offset,
3256 						  secctx, secctx_sz);
3257 		if (err) {
3258 			t->security_ctx = 0;
3259 			WARN_ON(1);
3260 		}
3261 		security_release_secctx(secctx, secctx_sz);
3262 		secctx = NULL;
3263 	}
3264 	t->buffer->debug_id = t->debug_id;
3265 	t->buffer->transaction = t;
3266 	t->buffer->target_node = target_node;
3267 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3268 	trace_binder_transaction_alloc_buf(t->buffer);
3269 
3270 	if (binder_alloc_copy_user_to_buffer(
3271 				&target_proc->alloc,
3272 				t->buffer,
3273 				ALIGN(tr->data_size, sizeof(void *)),
3274 				(const void __user *)
3275 					(uintptr_t)tr->data.ptr.offsets,
3276 				tr->offsets_size)) {
3277 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3278 				proc->pid, thread->pid);
3279 		return_error = BR_FAILED_REPLY;
3280 		return_error_param = -EFAULT;
3281 		return_error_line = __LINE__;
3282 		goto err_copy_data_failed;
3283 	}
3284 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3285 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3286 				proc->pid, thread->pid, (u64)tr->offsets_size);
3287 		return_error = BR_FAILED_REPLY;
3288 		return_error_param = -EINVAL;
3289 		return_error_line = __LINE__;
3290 		goto err_bad_offset;
3291 	}
3292 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3293 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3294 				  proc->pid, thread->pid,
3295 				  (u64)extra_buffers_size);
3296 		return_error = BR_FAILED_REPLY;
3297 		return_error_param = -EINVAL;
3298 		return_error_line = __LINE__;
3299 		goto err_bad_offset;
3300 	}
3301 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3302 	buffer_offset = off_start_offset;
3303 	off_end_offset = off_start_offset + tr->offsets_size;
3304 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3305 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3306 		ALIGN(secctx_sz, sizeof(u64));
3307 	off_min = 0;
3308 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3309 	     buffer_offset += sizeof(binder_size_t)) {
3310 		struct binder_object_header *hdr;
3311 		size_t object_size;
3312 		struct binder_object object;
3313 		binder_size_t object_offset;
3314 		binder_size_t copy_size;
3315 
3316 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3317 						  &object_offset,
3318 						  t->buffer,
3319 						  buffer_offset,
3320 						  sizeof(object_offset))) {
3321 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3322 				thread->pid, proc->pid);
3323 			return_error = BR_FAILED_REPLY;
3324 			return_error_param = -EINVAL;
3325 			return_error_line = __LINE__;
3326 			goto err_bad_offset;
3327 		}
3328 
3329 		/*
3330 		 * Copy the source user buffer up to the next object
3331 		 * that will be processed.
3332 		 */
3333 		copy_size = object_offset - user_offset;
3334 		if (copy_size && (user_offset > object_offset ||
3335 				binder_alloc_copy_user_to_buffer(
3336 					&target_proc->alloc,
3337 					t->buffer, user_offset,
3338 					user_buffer + user_offset,
3339 					copy_size))) {
3340 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3341 					proc->pid, thread->pid);
3342 			return_error = BR_FAILED_REPLY;
3343 			return_error_param = -EFAULT;
3344 			return_error_line = __LINE__;
3345 			goto err_copy_data_failed;
3346 		}
3347 		object_size = binder_get_object(target_proc, user_buffer,
3348 				t->buffer, object_offset, &object);
3349 		if (object_size == 0 || object_offset < off_min) {
3350 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3351 					  proc->pid, thread->pid,
3352 					  (u64)object_offset,
3353 					  (u64)off_min,
3354 					  (u64)t->buffer->data_size);
3355 			return_error = BR_FAILED_REPLY;
3356 			return_error_param = -EINVAL;
3357 			return_error_line = __LINE__;
3358 			goto err_bad_offset;
3359 		}
3360 		/*
3361 		 * Set offset to the next buffer fragment to be
3362 		 * copied
3363 		 */
3364 		user_offset = object_offset + object_size;
3365 
3366 		hdr = &object.hdr;
3367 		off_min = object_offset + object_size;
3368 		switch (hdr->type) {
3369 		case BINDER_TYPE_BINDER:
3370 		case BINDER_TYPE_WEAK_BINDER: {
3371 			struct flat_binder_object *fp;
3372 
3373 			fp = to_flat_binder_object(hdr);
3374 			ret = binder_translate_binder(fp, t, thread);
3375 
3376 			if (ret < 0 ||
3377 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3378 							t->buffer,
3379 							object_offset,
3380 							fp, sizeof(*fp))) {
3381 				binder_txn_error("%d:%d translate binder failed\n",
3382 					thread->pid, proc->pid);
3383 				return_error = BR_FAILED_REPLY;
3384 				return_error_param = ret;
3385 				return_error_line = __LINE__;
3386 				goto err_translate_failed;
3387 			}
3388 		} break;
3389 		case BINDER_TYPE_HANDLE:
3390 		case BINDER_TYPE_WEAK_HANDLE: {
3391 			struct flat_binder_object *fp;
3392 
3393 			fp = to_flat_binder_object(hdr);
3394 			ret = binder_translate_handle(fp, t, thread);
3395 			if (ret < 0 ||
3396 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3397 							t->buffer,
3398 							object_offset,
3399 							fp, sizeof(*fp))) {
3400 				binder_txn_error("%d:%d translate handle failed\n",
3401 					thread->pid, proc->pid);
3402 				return_error = BR_FAILED_REPLY;
3403 				return_error_param = ret;
3404 				return_error_line = __LINE__;
3405 				goto err_translate_failed;
3406 			}
3407 		} break;
3408 
3409 		case BINDER_TYPE_FD: {
3410 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3411 			binder_size_t fd_offset = object_offset +
3412 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3413 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3414 						      thread, in_reply_to);
3415 
3416 			fp->pad_binder = 0;
3417 			if (ret < 0 ||
3418 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3419 							t->buffer,
3420 							object_offset,
3421 							fp, sizeof(*fp))) {
3422 				binder_txn_error("%d:%d translate fd failed\n",
3423 					thread->pid, proc->pid);
3424 				return_error = BR_FAILED_REPLY;
3425 				return_error_param = ret;
3426 				return_error_line = __LINE__;
3427 				goto err_translate_failed;
3428 			}
3429 		} break;
3430 		case BINDER_TYPE_FDA: {
3431 			struct binder_object ptr_object;
3432 			binder_size_t parent_offset;
3433 			struct binder_object user_object;
3434 			size_t user_parent_size;
3435 			struct binder_fd_array_object *fda =
3436 				to_binder_fd_array_object(hdr);
3437 			size_t num_valid = (buffer_offset - off_start_offset) /
3438 						sizeof(binder_size_t);
3439 			struct binder_buffer_object *parent =
3440 				binder_validate_ptr(target_proc, t->buffer,
3441 						    &ptr_object, fda->parent,
3442 						    off_start_offset,
3443 						    &parent_offset,
3444 						    num_valid);
3445 			if (!parent) {
3446 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3447 						  proc->pid, thread->pid);
3448 				return_error = BR_FAILED_REPLY;
3449 				return_error_param = -EINVAL;
3450 				return_error_line = __LINE__;
3451 				goto err_bad_parent;
3452 			}
3453 			if (!binder_validate_fixup(target_proc, t->buffer,
3454 						   off_start_offset,
3455 						   parent_offset,
3456 						   fda->parent_offset,
3457 						   last_fixup_obj_off,
3458 						   last_fixup_min_off)) {
3459 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3460 						  proc->pid, thread->pid);
3461 				return_error = BR_FAILED_REPLY;
3462 				return_error_param = -EINVAL;
3463 				return_error_line = __LINE__;
3464 				goto err_bad_parent;
3465 			}
3466 			/*
3467 			 * We need to read the user version of the parent
3468 			 * object to get the original user offset
3469 			 */
3470 			user_parent_size =
3471 				binder_get_object(proc, user_buffer, t->buffer,
3472 						  parent_offset, &user_object);
3473 			if (user_parent_size != sizeof(user_object.bbo)) {
3474 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3475 						  proc->pid, thread->pid,
3476 						  user_parent_size,
3477 						  sizeof(user_object.bbo));
3478 				return_error = BR_FAILED_REPLY;
3479 				return_error_param = -EINVAL;
3480 				return_error_line = __LINE__;
3481 				goto err_bad_parent;
3482 			}
3483 			ret = binder_translate_fd_array(&pf_head, fda,
3484 							user_buffer, parent,
3485 							&user_object.bbo, t,
3486 							thread, in_reply_to);
3487 			if (!ret)
3488 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3489 								  t->buffer,
3490 								  object_offset,
3491 								  fda, sizeof(*fda));
3492 			if (ret) {
3493 				binder_txn_error("%d:%d translate fd array failed\n",
3494 					thread->pid, proc->pid);
3495 				return_error = BR_FAILED_REPLY;
3496 				return_error_param = ret > 0 ? -EINVAL : ret;
3497 				return_error_line = __LINE__;
3498 				goto err_translate_failed;
3499 			}
3500 			last_fixup_obj_off = parent_offset;
3501 			last_fixup_min_off =
3502 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3503 		} break;
3504 		case BINDER_TYPE_PTR: {
3505 			struct binder_buffer_object *bp =
3506 				to_binder_buffer_object(hdr);
3507 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3508 			size_t num_valid;
3509 
3510 			if (bp->length > buf_left) {
3511 				binder_user_error("%d:%d got transaction with too large buffer\n",
3512 						  proc->pid, thread->pid);
3513 				return_error = BR_FAILED_REPLY;
3514 				return_error_param = -EINVAL;
3515 				return_error_line = __LINE__;
3516 				goto err_bad_offset;
3517 			}
3518 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3519 				(const void __user *)(uintptr_t)bp->buffer,
3520 				bp->length);
3521 			if (ret) {
3522 				binder_txn_error("%d:%d deferred copy failed\n",
3523 					thread->pid, proc->pid);
3524 				return_error = BR_FAILED_REPLY;
3525 				return_error_param = ret;
3526 				return_error_line = __LINE__;
3527 				goto err_translate_failed;
3528 			}
3529 			/* Fixup buffer pointer to target proc address space */
3530 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3531 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3532 
3533 			num_valid = (buffer_offset - off_start_offset) /
3534 					sizeof(binder_size_t);
3535 			ret = binder_fixup_parent(&pf_head, t,
3536 						  thread, bp,
3537 						  off_start_offset,
3538 						  num_valid,
3539 						  last_fixup_obj_off,
3540 						  last_fixup_min_off);
3541 			if (ret < 0 ||
3542 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3543 							t->buffer,
3544 							object_offset,
3545 							bp, sizeof(*bp))) {
3546 				binder_txn_error("%d:%d failed to fixup parent\n",
3547 					thread->pid, proc->pid);
3548 				return_error = BR_FAILED_REPLY;
3549 				return_error_param = ret;
3550 				return_error_line = __LINE__;
3551 				goto err_translate_failed;
3552 			}
3553 			last_fixup_obj_off = object_offset;
3554 			last_fixup_min_off = 0;
3555 		} break;
3556 		default:
3557 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3558 				proc->pid, thread->pid, hdr->type);
3559 			return_error = BR_FAILED_REPLY;
3560 			return_error_param = -EINVAL;
3561 			return_error_line = __LINE__;
3562 			goto err_bad_object_type;
3563 		}
3564 	}
3565 	/* Done processing objects, copy the rest of the buffer */
3566 	if (binder_alloc_copy_user_to_buffer(
3567 				&target_proc->alloc,
3568 				t->buffer, user_offset,
3569 				user_buffer + user_offset,
3570 				tr->data_size - user_offset)) {
3571 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3572 				proc->pid, thread->pid);
3573 		return_error = BR_FAILED_REPLY;
3574 		return_error_param = -EFAULT;
3575 		return_error_line = __LINE__;
3576 		goto err_copy_data_failed;
3577 	}
3578 
3579 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3580 					    &sgc_head, &pf_head);
3581 	if (ret) {
3582 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3583 				  proc->pid, thread->pid);
3584 		return_error = BR_FAILED_REPLY;
3585 		return_error_param = ret;
3586 		return_error_line = __LINE__;
3587 		goto err_copy_data_failed;
3588 	}
3589 	if (t->buffer->oneway_spam_suspect)
3590 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3591 	else
3592 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3593 	t->work.type = BINDER_WORK_TRANSACTION;
3594 
3595 	if (reply) {
3596 		binder_enqueue_thread_work(thread, tcomplete);
3597 		binder_inner_proc_lock(target_proc);
3598 		if (target_thread->is_dead) {
3599 			return_error = BR_DEAD_REPLY;
3600 			binder_inner_proc_unlock(target_proc);
3601 			goto err_dead_proc_or_thread;
3602 		}
3603 		BUG_ON(t->buffer->async_transaction != 0);
3604 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3605 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3606 		target_proc->outstanding_txns++;
3607 		binder_inner_proc_unlock(target_proc);
3608 		wake_up_interruptible_sync(&target_thread->wait);
3609 		binder_free_transaction(in_reply_to);
3610 	} else if (!(t->flags & TF_ONE_WAY)) {
3611 		BUG_ON(t->buffer->async_transaction != 0);
3612 		binder_inner_proc_lock(proc);
3613 		/*
3614 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3615 		 * userspace immediately; this allows the target process to
3616 		 * immediately start processing this transaction, reducing
3617 		 * latency. We will then return the TRANSACTION_COMPLETE when
3618 		 * the target replies (or there is an error).
3619 		 */
3620 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3621 		t->need_reply = 1;
3622 		t->from_parent = thread->transaction_stack;
3623 		thread->transaction_stack = t;
3624 		binder_inner_proc_unlock(proc);
3625 		return_error = binder_proc_transaction(t,
3626 				target_proc, target_thread);
3627 		if (return_error) {
3628 			binder_inner_proc_lock(proc);
3629 			binder_pop_transaction_ilocked(thread, t);
3630 			binder_inner_proc_unlock(proc);
3631 			goto err_dead_proc_or_thread;
3632 		}
3633 	} else {
3634 		BUG_ON(target_node == NULL);
3635 		BUG_ON(t->buffer->async_transaction != 1);
3636 		return_error = binder_proc_transaction(t, target_proc, NULL);
3637 		/*
3638 		 * Let the caller know when async transaction reaches a frozen
3639 		 * process and is put in a pending queue, waiting for the target
3640 		 * process to be unfrozen.
3641 		 */
3642 		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3643 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3644 		binder_enqueue_thread_work(thread, tcomplete);
3645 		if (return_error &&
3646 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3647 			goto err_dead_proc_or_thread;
3648 	}
3649 	if (target_thread)
3650 		binder_thread_dec_tmpref(target_thread);
3651 	binder_proc_dec_tmpref(target_proc);
3652 	if (target_node)
3653 		binder_dec_node_tmpref(target_node);
3654 	/*
3655 	 * write barrier to synchronize with initialization
3656 	 * of log entry
3657 	 */
3658 	smp_wmb();
3659 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3660 	return;
3661 
3662 err_dead_proc_or_thread:
3663 	binder_txn_error("%d:%d dead process or thread\n",
3664 		thread->pid, proc->pid);
3665 	return_error_line = __LINE__;
3666 	binder_dequeue_work(proc, tcomplete);
3667 err_translate_failed:
3668 err_bad_object_type:
3669 err_bad_offset:
3670 err_bad_parent:
3671 err_copy_data_failed:
3672 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3673 	binder_free_txn_fixups(t);
3674 	trace_binder_transaction_failed_buffer_release(t->buffer);
3675 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3676 					  buffer_offset, true);
3677 	if (target_node)
3678 		binder_dec_node_tmpref(target_node);
3679 	target_node = NULL;
3680 	t->buffer->transaction = NULL;
3681 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3682 err_binder_alloc_buf_failed:
3683 err_bad_extra_size:
3684 	if (secctx)
3685 		security_release_secctx(secctx, secctx_sz);
3686 err_get_secctx_failed:
3687 	kfree(tcomplete);
3688 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3689 err_alloc_tcomplete_failed:
3690 	if (trace_binder_txn_latency_free_enabled())
3691 		binder_txn_latency_free(t);
3692 	kfree(t);
3693 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3694 err_alloc_t_failed:
3695 err_bad_todo_list:
3696 err_bad_call_stack:
3697 err_empty_call_stack:
3698 err_dead_binder:
3699 err_invalid_target_handle:
3700 	if (target_node) {
3701 		binder_dec_node(target_node, 1, 0);
3702 		binder_dec_node_tmpref(target_node);
3703 	}
3704 
3705 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3706 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3707 		     proc->pid, thread->pid, reply ? "reply" :
3708 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3709 		     target_proc ? target_proc->pid : 0,
3710 		     target_thread ? target_thread->pid : 0,
3711 		     t_debug_id, return_error, return_error_param,
3712 		     (u64)tr->data_size, (u64)tr->offsets_size,
3713 		     return_error_line);
3714 
3715 	if (target_thread)
3716 		binder_thread_dec_tmpref(target_thread);
3717 	if (target_proc)
3718 		binder_proc_dec_tmpref(target_proc);
3719 
3720 	{
3721 		struct binder_transaction_log_entry *fe;
3722 
3723 		e->return_error = return_error;
3724 		e->return_error_param = return_error_param;
3725 		e->return_error_line = return_error_line;
3726 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3727 		*fe = *e;
3728 		/*
3729 		 * write barrier to synchronize with initialization
3730 		 * of log entry
3731 		 */
3732 		smp_wmb();
3733 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3734 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3735 	}
3736 
3737 	BUG_ON(thread->return_error.cmd != BR_OK);
3738 	if (in_reply_to) {
3739 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3740 				return_error, return_error_param);
3741 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3742 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3743 		binder_send_failed_reply(in_reply_to, return_error);
3744 	} else {
3745 		binder_inner_proc_lock(proc);
3746 		binder_set_extended_error(&thread->ee, t_debug_id,
3747 				return_error, return_error_param);
3748 		binder_inner_proc_unlock(proc);
3749 		thread->return_error.cmd = return_error;
3750 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3751 	}
3752 }
3753 
3754 /**
3755  * binder_free_buf() - free the specified buffer
3756  * @proc:	binder proc that owns buffer
3757  * @buffer:	buffer to be freed
3758  * @is_failure:	failed to send transaction
3759  *
3760  * If buffer for an async transaction, enqueue the next async
3761  * transaction from the node.
3762  *
3763  * Cleanup buffer and free it.
3764  */
3765 static void
3766 binder_free_buf(struct binder_proc *proc,
3767 		struct binder_thread *thread,
3768 		struct binder_buffer *buffer, bool is_failure)
3769 {
3770 	binder_inner_proc_lock(proc);
3771 	if (buffer->transaction) {
3772 		buffer->transaction->buffer = NULL;
3773 		buffer->transaction = NULL;
3774 	}
3775 	binder_inner_proc_unlock(proc);
3776 	if (buffer->async_transaction && buffer->target_node) {
3777 		struct binder_node *buf_node;
3778 		struct binder_work *w;
3779 
3780 		buf_node = buffer->target_node;
3781 		binder_node_inner_lock(buf_node);
3782 		BUG_ON(!buf_node->has_async_transaction);
3783 		BUG_ON(buf_node->proc != proc);
3784 		w = binder_dequeue_work_head_ilocked(
3785 				&buf_node->async_todo);
3786 		if (!w) {
3787 			buf_node->has_async_transaction = false;
3788 		} else {
3789 			binder_enqueue_work_ilocked(
3790 					w, &proc->todo);
3791 			binder_wakeup_proc_ilocked(proc);
3792 		}
3793 		binder_node_inner_unlock(buf_node);
3794 	}
3795 	trace_binder_transaction_buffer_release(buffer);
3796 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
3797 	binder_alloc_free_buf(&proc->alloc, buffer);
3798 }
3799 
3800 static int binder_thread_write(struct binder_proc *proc,
3801 			struct binder_thread *thread,
3802 			binder_uintptr_t binder_buffer, size_t size,
3803 			binder_size_t *consumed)
3804 {
3805 	uint32_t cmd;
3806 	struct binder_context *context = proc->context;
3807 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3808 	void __user *ptr = buffer + *consumed;
3809 	void __user *end = buffer + size;
3810 
3811 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3812 		int ret;
3813 
3814 		if (get_user(cmd, (uint32_t __user *)ptr))
3815 			return -EFAULT;
3816 		ptr += sizeof(uint32_t);
3817 		trace_binder_command(cmd);
3818 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3819 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3820 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3821 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3822 		}
3823 		switch (cmd) {
3824 		case BC_INCREFS:
3825 		case BC_ACQUIRE:
3826 		case BC_RELEASE:
3827 		case BC_DECREFS: {
3828 			uint32_t target;
3829 			const char *debug_string;
3830 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3831 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3832 			struct binder_ref_data rdata;
3833 
3834 			if (get_user(target, (uint32_t __user *)ptr))
3835 				return -EFAULT;
3836 
3837 			ptr += sizeof(uint32_t);
3838 			ret = -1;
3839 			if (increment && !target) {
3840 				struct binder_node *ctx_mgr_node;
3841 
3842 				mutex_lock(&context->context_mgr_node_lock);
3843 				ctx_mgr_node = context->binder_context_mgr_node;
3844 				if (ctx_mgr_node) {
3845 					if (ctx_mgr_node->proc == proc) {
3846 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3847 								  proc->pid, thread->pid);
3848 						mutex_unlock(&context->context_mgr_node_lock);
3849 						return -EINVAL;
3850 					}
3851 					ret = binder_inc_ref_for_node(
3852 							proc, ctx_mgr_node,
3853 							strong, NULL, &rdata);
3854 				}
3855 				mutex_unlock(&context->context_mgr_node_lock);
3856 			}
3857 			if (ret)
3858 				ret = binder_update_ref_for_handle(
3859 						proc, target, increment, strong,
3860 						&rdata);
3861 			if (!ret && rdata.desc != target) {
3862 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3863 					proc->pid, thread->pid,
3864 					target, rdata.desc);
3865 			}
3866 			switch (cmd) {
3867 			case BC_INCREFS:
3868 				debug_string = "IncRefs";
3869 				break;
3870 			case BC_ACQUIRE:
3871 				debug_string = "Acquire";
3872 				break;
3873 			case BC_RELEASE:
3874 				debug_string = "Release";
3875 				break;
3876 			case BC_DECREFS:
3877 			default:
3878 				debug_string = "DecRefs";
3879 				break;
3880 			}
3881 			if (ret) {
3882 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3883 					proc->pid, thread->pid, debug_string,
3884 					strong, target, ret);
3885 				break;
3886 			}
3887 			binder_debug(BINDER_DEBUG_USER_REFS,
3888 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3889 				     proc->pid, thread->pid, debug_string,
3890 				     rdata.debug_id, rdata.desc, rdata.strong,
3891 				     rdata.weak);
3892 			break;
3893 		}
3894 		case BC_INCREFS_DONE:
3895 		case BC_ACQUIRE_DONE: {
3896 			binder_uintptr_t node_ptr;
3897 			binder_uintptr_t cookie;
3898 			struct binder_node *node;
3899 			bool free_node;
3900 
3901 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3902 				return -EFAULT;
3903 			ptr += sizeof(binder_uintptr_t);
3904 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3905 				return -EFAULT;
3906 			ptr += sizeof(binder_uintptr_t);
3907 			node = binder_get_node(proc, node_ptr);
3908 			if (node == NULL) {
3909 				binder_user_error("%d:%d %s u%016llx no match\n",
3910 					proc->pid, thread->pid,
3911 					cmd == BC_INCREFS_DONE ?
3912 					"BC_INCREFS_DONE" :
3913 					"BC_ACQUIRE_DONE",
3914 					(u64)node_ptr);
3915 				break;
3916 			}
3917 			if (cookie != node->cookie) {
3918 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3919 					proc->pid, thread->pid,
3920 					cmd == BC_INCREFS_DONE ?
3921 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3922 					(u64)node_ptr, node->debug_id,
3923 					(u64)cookie, (u64)node->cookie);
3924 				binder_put_node(node);
3925 				break;
3926 			}
3927 			binder_node_inner_lock(node);
3928 			if (cmd == BC_ACQUIRE_DONE) {
3929 				if (node->pending_strong_ref == 0) {
3930 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3931 						proc->pid, thread->pid,
3932 						node->debug_id);
3933 					binder_node_inner_unlock(node);
3934 					binder_put_node(node);
3935 					break;
3936 				}
3937 				node->pending_strong_ref = 0;
3938 			} else {
3939 				if (node->pending_weak_ref == 0) {
3940 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3941 						proc->pid, thread->pid,
3942 						node->debug_id);
3943 					binder_node_inner_unlock(node);
3944 					binder_put_node(node);
3945 					break;
3946 				}
3947 				node->pending_weak_ref = 0;
3948 			}
3949 			free_node = binder_dec_node_nilocked(node,
3950 					cmd == BC_ACQUIRE_DONE, 0);
3951 			WARN_ON(free_node);
3952 			binder_debug(BINDER_DEBUG_USER_REFS,
3953 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3954 				     proc->pid, thread->pid,
3955 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3956 				     node->debug_id, node->local_strong_refs,
3957 				     node->local_weak_refs, node->tmp_refs);
3958 			binder_node_inner_unlock(node);
3959 			binder_put_node(node);
3960 			break;
3961 		}
3962 		case BC_ATTEMPT_ACQUIRE:
3963 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3964 			return -EINVAL;
3965 		case BC_ACQUIRE_RESULT:
3966 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3967 			return -EINVAL;
3968 
3969 		case BC_FREE_BUFFER: {
3970 			binder_uintptr_t data_ptr;
3971 			struct binder_buffer *buffer;
3972 
3973 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3974 				return -EFAULT;
3975 			ptr += sizeof(binder_uintptr_t);
3976 
3977 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3978 							      data_ptr);
3979 			if (IS_ERR_OR_NULL(buffer)) {
3980 				if (PTR_ERR(buffer) == -EPERM) {
3981 					binder_user_error(
3982 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3983 						proc->pid, thread->pid,
3984 						(u64)data_ptr);
3985 				} else {
3986 					binder_user_error(
3987 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3988 						proc->pid, thread->pid,
3989 						(u64)data_ptr);
3990 				}
3991 				break;
3992 			}
3993 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3994 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3995 				     proc->pid, thread->pid, (u64)data_ptr,
3996 				     buffer->debug_id,
3997 				     buffer->transaction ? "active" : "finished");
3998 			binder_free_buf(proc, thread, buffer, false);
3999 			break;
4000 		}
4001 
4002 		case BC_TRANSACTION_SG:
4003 		case BC_REPLY_SG: {
4004 			struct binder_transaction_data_sg tr;
4005 
4006 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4007 				return -EFAULT;
4008 			ptr += sizeof(tr);
4009 			binder_transaction(proc, thread, &tr.transaction_data,
4010 					   cmd == BC_REPLY_SG, tr.buffers_size);
4011 			break;
4012 		}
4013 		case BC_TRANSACTION:
4014 		case BC_REPLY: {
4015 			struct binder_transaction_data tr;
4016 
4017 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4018 				return -EFAULT;
4019 			ptr += sizeof(tr);
4020 			binder_transaction(proc, thread, &tr,
4021 					   cmd == BC_REPLY, 0);
4022 			break;
4023 		}
4024 
4025 		case BC_REGISTER_LOOPER:
4026 			binder_debug(BINDER_DEBUG_THREADS,
4027 				     "%d:%d BC_REGISTER_LOOPER\n",
4028 				     proc->pid, thread->pid);
4029 			binder_inner_proc_lock(proc);
4030 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4031 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4032 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4033 					proc->pid, thread->pid);
4034 			} else if (proc->requested_threads == 0) {
4035 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4036 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4037 					proc->pid, thread->pid);
4038 			} else {
4039 				proc->requested_threads--;
4040 				proc->requested_threads_started++;
4041 			}
4042 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4043 			binder_inner_proc_unlock(proc);
4044 			break;
4045 		case BC_ENTER_LOOPER:
4046 			binder_debug(BINDER_DEBUG_THREADS,
4047 				     "%d:%d BC_ENTER_LOOPER\n",
4048 				     proc->pid, thread->pid);
4049 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4050 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4051 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4052 					proc->pid, thread->pid);
4053 			}
4054 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4055 			break;
4056 		case BC_EXIT_LOOPER:
4057 			binder_debug(BINDER_DEBUG_THREADS,
4058 				     "%d:%d BC_EXIT_LOOPER\n",
4059 				     proc->pid, thread->pid);
4060 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4061 			break;
4062 
4063 		case BC_REQUEST_DEATH_NOTIFICATION:
4064 		case BC_CLEAR_DEATH_NOTIFICATION: {
4065 			uint32_t target;
4066 			binder_uintptr_t cookie;
4067 			struct binder_ref *ref;
4068 			struct binder_ref_death *death = NULL;
4069 
4070 			if (get_user(target, (uint32_t __user *)ptr))
4071 				return -EFAULT;
4072 			ptr += sizeof(uint32_t);
4073 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4074 				return -EFAULT;
4075 			ptr += sizeof(binder_uintptr_t);
4076 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4077 				/*
4078 				 * Allocate memory for death notification
4079 				 * before taking lock
4080 				 */
4081 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4082 				if (death == NULL) {
4083 					WARN_ON(thread->return_error.cmd !=
4084 						BR_OK);
4085 					thread->return_error.cmd = BR_ERROR;
4086 					binder_enqueue_thread_work(
4087 						thread,
4088 						&thread->return_error.work);
4089 					binder_debug(
4090 						BINDER_DEBUG_FAILED_TRANSACTION,
4091 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4092 						proc->pid, thread->pid);
4093 					break;
4094 				}
4095 			}
4096 			binder_proc_lock(proc);
4097 			ref = binder_get_ref_olocked(proc, target, false);
4098 			if (ref == NULL) {
4099 				binder_user_error("%d:%d %s invalid ref %d\n",
4100 					proc->pid, thread->pid,
4101 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4102 					"BC_REQUEST_DEATH_NOTIFICATION" :
4103 					"BC_CLEAR_DEATH_NOTIFICATION",
4104 					target);
4105 				binder_proc_unlock(proc);
4106 				kfree(death);
4107 				break;
4108 			}
4109 
4110 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4111 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4112 				     proc->pid, thread->pid,
4113 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4114 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4115 				     "BC_CLEAR_DEATH_NOTIFICATION",
4116 				     (u64)cookie, ref->data.debug_id,
4117 				     ref->data.desc, ref->data.strong,
4118 				     ref->data.weak, ref->node->debug_id);
4119 
4120 			binder_node_lock(ref->node);
4121 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4122 				if (ref->death) {
4123 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4124 						proc->pid, thread->pid);
4125 					binder_node_unlock(ref->node);
4126 					binder_proc_unlock(proc);
4127 					kfree(death);
4128 					break;
4129 				}
4130 				binder_stats_created(BINDER_STAT_DEATH);
4131 				INIT_LIST_HEAD(&death->work.entry);
4132 				death->cookie = cookie;
4133 				ref->death = death;
4134 				if (ref->node->proc == NULL) {
4135 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4136 
4137 					binder_inner_proc_lock(proc);
4138 					binder_enqueue_work_ilocked(
4139 						&ref->death->work, &proc->todo);
4140 					binder_wakeup_proc_ilocked(proc);
4141 					binder_inner_proc_unlock(proc);
4142 				}
4143 			} else {
4144 				if (ref->death == NULL) {
4145 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4146 						proc->pid, thread->pid);
4147 					binder_node_unlock(ref->node);
4148 					binder_proc_unlock(proc);
4149 					break;
4150 				}
4151 				death = ref->death;
4152 				if (death->cookie != cookie) {
4153 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4154 						proc->pid, thread->pid,
4155 						(u64)death->cookie,
4156 						(u64)cookie);
4157 					binder_node_unlock(ref->node);
4158 					binder_proc_unlock(proc);
4159 					break;
4160 				}
4161 				ref->death = NULL;
4162 				binder_inner_proc_lock(proc);
4163 				if (list_empty(&death->work.entry)) {
4164 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4165 					if (thread->looper &
4166 					    (BINDER_LOOPER_STATE_REGISTERED |
4167 					     BINDER_LOOPER_STATE_ENTERED))
4168 						binder_enqueue_thread_work_ilocked(
4169 								thread,
4170 								&death->work);
4171 					else {
4172 						binder_enqueue_work_ilocked(
4173 								&death->work,
4174 								&proc->todo);
4175 						binder_wakeup_proc_ilocked(
4176 								proc);
4177 					}
4178 				} else {
4179 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4180 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4181 				}
4182 				binder_inner_proc_unlock(proc);
4183 			}
4184 			binder_node_unlock(ref->node);
4185 			binder_proc_unlock(proc);
4186 		} break;
4187 		case BC_DEAD_BINDER_DONE: {
4188 			struct binder_work *w;
4189 			binder_uintptr_t cookie;
4190 			struct binder_ref_death *death = NULL;
4191 
4192 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4193 				return -EFAULT;
4194 
4195 			ptr += sizeof(cookie);
4196 			binder_inner_proc_lock(proc);
4197 			list_for_each_entry(w, &proc->delivered_death,
4198 					    entry) {
4199 				struct binder_ref_death *tmp_death =
4200 					container_of(w,
4201 						     struct binder_ref_death,
4202 						     work);
4203 
4204 				if (tmp_death->cookie == cookie) {
4205 					death = tmp_death;
4206 					break;
4207 				}
4208 			}
4209 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4210 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4211 				     proc->pid, thread->pid, (u64)cookie,
4212 				     death);
4213 			if (death == NULL) {
4214 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4215 					proc->pid, thread->pid, (u64)cookie);
4216 				binder_inner_proc_unlock(proc);
4217 				break;
4218 			}
4219 			binder_dequeue_work_ilocked(&death->work);
4220 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4221 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4222 				if (thread->looper &
4223 					(BINDER_LOOPER_STATE_REGISTERED |
4224 					 BINDER_LOOPER_STATE_ENTERED))
4225 					binder_enqueue_thread_work_ilocked(
4226 						thread, &death->work);
4227 				else {
4228 					binder_enqueue_work_ilocked(
4229 							&death->work,
4230 							&proc->todo);
4231 					binder_wakeup_proc_ilocked(proc);
4232 				}
4233 			}
4234 			binder_inner_proc_unlock(proc);
4235 		} break;
4236 
4237 		default:
4238 			pr_err("%d:%d unknown command %u\n",
4239 			       proc->pid, thread->pid, cmd);
4240 			return -EINVAL;
4241 		}
4242 		*consumed = ptr - buffer;
4243 	}
4244 	return 0;
4245 }
4246 
4247 static void binder_stat_br(struct binder_proc *proc,
4248 			   struct binder_thread *thread, uint32_t cmd)
4249 {
4250 	trace_binder_return(cmd);
4251 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4252 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4253 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4254 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4255 	}
4256 }
4257 
4258 static int binder_put_node_cmd(struct binder_proc *proc,
4259 			       struct binder_thread *thread,
4260 			       void __user **ptrp,
4261 			       binder_uintptr_t node_ptr,
4262 			       binder_uintptr_t node_cookie,
4263 			       int node_debug_id,
4264 			       uint32_t cmd, const char *cmd_name)
4265 {
4266 	void __user *ptr = *ptrp;
4267 
4268 	if (put_user(cmd, (uint32_t __user *)ptr))
4269 		return -EFAULT;
4270 	ptr += sizeof(uint32_t);
4271 
4272 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4273 		return -EFAULT;
4274 	ptr += sizeof(binder_uintptr_t);
4275 
4276 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4277 		return -EFAULT;
4278 	ptr += sizeof(binder_uintptr_t);
4279 
4280 	binder_stat_br(proc, thread, cmd);
4281 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4282 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4283 		     (u64)node_ptr, (u64)node_cookie);
4284 
4285 	*ptrp = ptr;
4286 	return 0;
4287 }
4288 
4289 static int binder_wait_for_work(struct binder_thread *thread,
4290 				bool do_proc_work)
4291 {
4292 	DEFINE_WAIT(wait);
4293 	struct binder_proc *proc = thread->proc;
4294 	int ret = 0;
4295 
4296 	binder_inner_proc_lock(proc);
4297 	for (;;) {
4298 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4299 		if (binder_has_work_ilocked(thread, do_proc_work))
4300 			break;
4301 		if (do_proc_work)
4302 			list_add(&thread->waiting_thread_node,
4303 				 &proc->waiting_threads);
4304 		binder_inner_proc_unlock(proc);
4305 		schedule();
4306 		binder_inner_proc_lock(proc);
4307 		list_del_init(&thread->waiting_thread_node);
4308 		if (signal_pending(current)) {
4309 			ret = -EINTR;
4310 			break;
4311 		}
4312 	}
4313 	finish_wait(&thread->wait, &wait);
4314 	binder_inner_proc_unlock(proc);
4315 
4316 	return ret;
4317 }
4318 
4319 /**
4320  * binder_apply_fd_fixups() - finish fd translation
4321  * @proc:         binder_proc associated @t->buffer
4322  * @t:	binder transaction with list of fd fixups
4323  *
4324  * Now that we are in the context of the transaction target
4325  * process, we can allocate and install fds. Process the
4326  * list of fds to translate and fixup the buffer with the
4327  * new fds first and only then install the files.
4328  *
4329  * If we fail to allocate an fd, skip the install and release
4330  * any fds that have already been allocated.
4331  */
4332 static int binder_apply_fd_fixups(struct binder_proc *proc,
4333 				  struct binder_transaction *t)
4334 {
4335 	struct binder_txn_fd_fixup *fixup, *tmp;
4336 	int ret = 0;
4337 
4338 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4339 		int fd = get_unused_fd_flags(O_CLOEXEC);
4340 
4341 		if (fd < 0) {
4342 			binder_debug(BINDER_DEBUG_TRANSACTION,
4343 				     "failed fd fixup txn %d fd %d\n",
4344 				     t->debug_id, fd);
4345 			ret = -ENOMEM;
4346 			goto err;
4347 		}
4348 		binder_debug(BINDER_DEBUG_TRANSACTION,
4349 			     "fd fixup txn %d fd %d\n",
4350 			     t->debug_id, fd);
4351 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4352 		fixup->target_fd = fd;
4353 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4354 						fixup->offset, &fd,
4355 						sizeof(u32))) {
4356 			ret = -EINVAL;
4357 			goto err;
4358 		}
4359 	}
4360 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4361 		fd_install(fixup->target_fd, fixup->file);
4362 		list_del(&fixup->fixup_entry);
4363 		kfree(fixup);
4364 	}
4365 
4366 	return ret;
4367 
4368 err:
4369 	binder_free_txn_fixups(t);
4370 	return ret;
4371 }
4372 
4373 static int binder_thread_read(struct binder_proc *proc,
4374 			      struct binder_thread *thread,
4375 			      binder_uintptr_t binder_buffer, size_t size,
4376 			      binder_size_t *consumed, int non_block)
4377 {
4378 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4379 	void __user *ptr = buffer + *consumed;
4380 	void __user *end = buffer + size;
4381 
4382 	int ret = 0;
4383 	int wait_for_proc_work;
4384 
4385 	if (*consumed == 0) {
4386 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4387 			return -EFAULT;
4388 		ptr += sizeof(uint32_t);
4389 	}
4390 
4391 retry:
4392 	binder_inner_proc_lock(proc);
4393 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4394 	binder_inner_proc_unlock(proc);
4395 
4396 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4397 
4398 	trace_binder_wait_for_work(wait_for_proc_work,
4399 				   !!thread->transaction_stack,
4400 				   !binder_worklist_empty(proc, &thread->todo));
4401 	if (wait_for_proc_work) {
4402 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4403 					BINDER_LOOPER_STATE_ENTERED))) {
4404 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4405 				proc->pid, thread->pid, thread->looper);
4406 			wait_event_interruptible(binder_user_error_wait,
4407 						 binder_stop_on_user_error < 2);
4408 		}
4409 		binder_set_nice(proc->default_priority);
4410 	}
4411 
4412 	if (non_block) {
4413 		if (!binder_has_work(thread, wait_for_proc_work))
4414 			ret = -EAGAIN;
4415 	} else {
4416 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4417 	}
4418 
4419 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4420 
4421 	if (ret)
4422 		return ret;
4423 
4424 	while (1) {
4425 		uint32_t cmd;
4426 		struct binder_transaction_data_secctx tr;
4427 		struct binder_transaction_data *trd = &tr.transaction_data;
4428 		struct binder_work *w = NULL;
4429 		struct list_head *list = NULL;
4430 		struct binder_transaction *t = NULL;
4431 		struct binder_thread *t_from;
4432 		size_t trsize = sizeof(*trd);
4433 
4434 		binder_inner_proc_lock(proc);
4435 		if (!binder_worklist_empty_ilocked(&thread->todo))
4436 			list = &thread->todo;
4437 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4438 			   wait_for_proc_work)
4439 			list = &proc->todo;
4440 		else {
4441 			binder_inner_proc_unlock(proc);
4442 
4443 			/* no data added */
4444 			if (ptr - buffer == 4 && !thread->looper_need_return)
4445 				goto retry;
4446 			break;
4447 		}
4448 
4449 		if (end - ptr < sizeof(tr) + 4) {
4450 			binder_inner_proc_unlock(proc);
4451 			break;
4452 		}
4453 		w = binder_dequeue_work_head_ilocked(list);
4454 		if (binder_worklist_empty_ilocked(&thread->todo))
4455 			thread->process_todo = false;
4456 
4457 		switch (w->type) {
4458 		case BINDER_WORK_TRANSACTION: {
4459 			binder_inner_proc_unlock(proc);
4460 			t = container_of(w, struct binder_transaction, work);
4461 		} break;
4462 		case BINDER_WORK_RETURN_ERROR: {
4463 			struct binder_error *e = container_of(
4464 					w, struct binder_error, work);
4465 
4466 			WARN_ON(e->cmd == BR_OK);
4467 			binder_inner_proc_unlock(proc);
4468 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4469 				return -EFAULT;
4470 			cmd = e->cmd;
4471 			e->cmd = BR_OK;
4472 			ptr += sizeof(uint32_t);
4473 
4474 			binder_stat_br(proc, thread, cmd);
4475 		} break;
4476 		case BINDER_WORK_TRANSACTION_COMPLETE:
4477 		case BINDER_WORK_TRANSACTION_PENDING:
4478 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4479 			if (proc->oneway_spam_detection_enabled &&
4480 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4481 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4482 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4483 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4484 			else
4485 				cmd = BR_TRANSACTION_COMPLETE;
4486 			binder_inner_proc_unlock(proc);
4487 			kfree(w);
4488 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4489 			if (put_user(cmd, (uint32_t __user *)ptr))
4490 				return -EFAULT;
4491 			ptr += sizeof(uint32_t);
4492 
4493 			binder_stat_br(proc, thread, cmd);
4494 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4495 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4496 				     proc->pid, thread->pid);
4497 		} break;
4498 		case BINDER_WORK_NODE: {
4499 			struct binder_node *node = container_of(w, struct binder_node, work);
4500 			int strong, weak;
4501 			binder_uintptr_t node_ptr = node->ptr;
4502 			binder_uintptr_t node_cookie = node->cookie;
4503 			int node_debug_id = node->debug_id;
4504 			int has_weak_ref;
4505 			int has_strong_ref;
4506 			void __user *orig_ptr = ptr;
4507 
4508 			BUG_ON(proc != node->proc);
4509 			strong = node->internal_strong_refs ||
4510 					node->local_strong_refs;
4511 			weak = !hlist_empty(&node->refs) ||
4512 					node->local_weak_refs ||
4513 					node->tmp_refs || strong;
4514 			has_strong_ref = node->has_strong_ref;
4515 			has_weak_ref = node->has_weak_ref;
4516 
4517 			if (weak && !has_weak_ref) {
4518 				node->has_weak_ref = 1;
4519 				node->pending_weak_ref = 1;
4520 				node->local_weak_refs++;
4521 			}
4522 			if (strong && !has_strong_ref) {
4523 				node->has_strong_ref = 1;
4524 				node->pending_strong_ref = 1;
4525 				node->local_strong_refs++;
4526 			}
4527 			if (!strong && has_strong_ref)
4528 				node->has_strong_ref = 0;
4529 			if (!weak && has_weak_ref)
4530 				node->has_weak_ref = 0;
4531 			if (!weak && !strong) {
4532 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4533 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4534 					     proc->pid, thread->pid,
4535 					     node_debug_id,
4536 					     (u64)node_ptr,
4537 					     (u64)node_cookie);
4538 				rb_erase(&node->rb_node, &proc->nodes);
4539 				binder_inner_proc_unlock(proc);
4540 				binder_node_lock(node);
4541 				/*
4542 				 * Acquire the node lock before freeing the
4543 				 * node to serialize with other threads that
4544 				 * may have been holding the node lock while
4545 				 * decrementing this node (avoids race where
4546 				 * this thread frees while the other thread
4547 				 * is unlocking the node after the final
4548 				 * decrement)
4549 				 */
4550 				binder_node_unlock(node);
4551 				binder_free_node(node);
4552 			} else
4553 				binder_inner_proc_unlock(proc);
4554 
4555 			if (weak && !has_weak_ref)
4556 				ret = binder_put_node_cmd(
4557 						proc, thread, &ptr, node_ptr,
4558 						node_cookie, node_debug_id,
4559 						BR_INCREFS, "BR_INCREFS");
4560 			if (!ret && strong && !has_strong_ref)
4561 				ret = binder_put_node_cmd(
4562 						proc, thread, &ptr, node_ptr,
4563 						node_cookie, node_debug_id,
4564 						BR_ACQUIRE, "BR_ACQUIRE");
4565 			if (!ret && !strong && has_strong_ref)
4566 				ret = binder_put_node_cmd(
4567 						proc, thread, &ptr, node_ptr,
4568 						node_cookie, node_debug_id,
4569 						BR_RELEASE, "BR_RELEASE");
4570 			if (!ret && !weak && has_weak_ref)
4571 				ret = binder_put_node_cmd(
4572 						proc, thread, &ptr, node_ptr,
4573 						node_cookie, node_debug_id,
4574 						BR_DECREFS, "BR_DECREFS");
4575 			if (orig_ptr == ptr)
4576 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4577 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4578 					     proc->pid, thread->pid,
4579 					     node_debug_id,
4580 					     (u64)node_ptr,
4581 					     (u64)node_cookie);
4582 			if (ret)
4583 				return ret;
4584 		} break;
4585 		case BINDER_WORK_DEAD_BINDER:
4586 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4587 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4588 			struct binder_ref_death *death;
4589 			uint32_t cmd;
4590 			binder_uintptr_t cookie;
4591 
4592 			death = container_of(w, struct binder_ref_death, work);
4593 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4594 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4595 			else
4596 				cmd = BR_DEAD_BINDER;
4597 			cookie = death->cookie;
4598 
4599 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4600 				     "%d:%d %s %016llx\n",
4601 				      proc->pid, thread->pid,
4602 				      cmd == BR_DEAD_BINDER ?
4603 				      "BR_DEAD_BINDER" :
4604 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4605 				      (u64)cookie);
4606 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4607 				binder_inner_proc_unlock(proc);
4608 				kfree(death);
4609 				binder_stats_deleted(BINDER_STAT_DEATH);
4610 			} else {
4611 				binder_enqueue_work_ilocked(
4612 						w, &proc->delivered_death);
4613 				binder_inner_proc_unlock(proc);
4614 			}
4615 			if (put_user(cmd, (uint32_t __user *)ptr))
4616 				return -EFAULT;
4617 			ptr += sizeof(uint32_t);
4618 			if (put_user(cookie,
4619 				     (binder_uintptr_t __user *)ptr))
4620 				return -EFAULT;
4621 			ptr += sizeof(binder_uintptr_t);
4622 			binder_stat_br(proc, thread, cmd);
4623 			if (cmd == BR_DEAD_BINDER)
4624 				goto done; /* DEAD_BINDER notifications can cause transactions */
4625 		} break;
4626 		default:
4627 			binder_inner_proc_unlock(proc);
4628 			pr_err("%d:%d: bad work type %d\n",
4629 			       proc->pid, thread->pid, w->type);
4630 			break;
4631 		}
4632 
4633 		if (!t)
4634 			continue;
4635 
4636 		BUG_ON(t->buffer == NULL);
4637 		if (t->buffer->target_node) {
4638 			struct binder_node *target_node = t->buffer->target_node;
4639 
4640 			trd->target.ptr = target_node->ptr;
4641 			trd->cookie =  target_node->cookie;
4642 			t->saved_priority = task_nice(current);
4643 			if (t->priority < target_node->min_priority &&
4644 			    !(t->flags & TF_ONE_WAY))
4645 				binder_set_nice(t->priority);
4646 			else if (!(t->flags & TF_ONE_WAY) ||
4647 				 t->saved_priority > target_node->min_priority)
4648 				binder_set_nice(target_node->min_priority);
4649 			cmd = BR_TRANSACTION;
4650 		} else {
4651 			trd->target.ptr = 0;
4652 			trd->cookie = 0;
4653 			cmd = BR_REPLY;
4654 		}
4655 		trd->code = t->code;
4656 		trd->flags = t->flags;
4657 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4658 
4659 		t_from = binder_get_txn_from(t);
4660 		if (t_from) {
4661 			struct task_struct *sender = t_from->proc->tsk;
4662 
4663 			trd->sender_pid =
4664 				task_tgid_nr_ns(sender,
4665 						task_active_pid_ns(current));
4666 		} else {
4667 			trd->sender_pid = 0;
4668 		}
4669 
4670 		ret = binder_apply_fd_fixups(proc, t);
4671 		if (ret) {
4672 			struct binder_buffer *buffer = t->buffer;
4673 			bool oneway = !!(t->flags & TF_ONE_WAY);
4674 			int tid = t->debug_id;
4675 
4676 			if (t_from)
4677 				binder_thread_dec_tmpref(t_from);
4678 			buffer->transaction = NULL;
4679 			binder_cleanup_transaction(t, "fd fixups failed",
4680 						   BR_FAILED_REPLY);
4681 			binder_free_buf(proc, thread, buffer, true);
4682 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4683 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4684 				     proc->pid, thread->pid,
4685 				     oneway ? "async " :
4686 					(cmd == BR_REPLY ? "reply " : ""),
4687 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4688 			if (cmd == BR_REPLY) {
4689 				cmd = BR_FAILED_REPLY;
4690 				if (put_user(cmd, (uint32_t __user *)ptr))
4691 					return -EFAULT;
4692 				ptr += sizeof(uint32_t);
4693 				binder_stat_br(proc, thread, cmd);
4694 				break;
4695 			}
4696 			continue;
4697 		}
4698 		trd->data_size = t->buffer->data_size;
4699 		trd->offsets_size = t->buffer->offsets_size;
4700 		trd->data.ptr.buffer = t->buffer->user_data;
4701 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4702 					ALIGN(t->buffer->data_size,
4703 					    sizeof(void *));
4704 
4705 		tr.secctx = t->security_ctx;
4706 		if (t->security_ctx) {
4707 			cmd = BR_TRANSACTION_SEC_CTX;
4708 			trsize = sizeof(tr);
4709 		}
4710 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4711 			if (t_from)
4712 				binder_thread_dec_tmpref(t_from);
4713 
4714 			binder_cleanup_transaction(t, "put_user failed",
4715 						   BR_FAILED_REPLY);
4716 
4717 			return -EFAULT;
4718 		}
4719 		ptr += sizeof(uint32_t);
4720 		if (copy_to_user(ptr, &tr, trsize)) {
4721 			if (t_from)
4722 				binder_thread_dec_tmpref(t_from);
4723 
4724 			binder_cleanup_transaction(t, "copy_to_user failed",
4725 						   BR_FAILED_REPLY);
4726 
4727 			return -EFAULT;
4728 		}
4729 		ptr += trsize;
4730 
4731 		trace_binder_transaction_received(t);
4732 		binder_stat_br(proc, thread, cmd);
4733 		binder_debug(BINDER_DEBUG_TRANSACTION,
4734 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4735 			     proc->pid, thread->pid,
4736 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4737 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4738 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4739 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4740 			     t_from ? t_from->pid : 0, cmd,
4741 			     t->buffer->data_size, t->buffer->offsets_size,
4742 			     (u64)trd->data.ptr.buffer,
4743 			     (u64)trd->data.ptr.offsets);
4744 
4745 		if (t_from)
4746 			binder_thread_dec_tmpref(t_from);
4747 		t->buffer->allow_user_free = 1;
4748 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4749 			binder_inner_proc_lock(thread->proc);
4750 			t->to_parent = thread->transaction_stack;
4751 			t->to_thread = thread;
4752 			thread->transaction_stack = t;
4753 			binder_inner_proc_unlock(thread->proc);
4754 		} else {
4755 			binder_free_transaction(t);
4756 		}
4757 		break;
4758 	}
4759 
4760 done:
4761 
4762 	*consumed = ptr - buffer;
4763 	binder_inner_proc_lock(proc);
4764 	if (proc->requested_threads == 0 &&
4765 	    list_empty(&thread->proc->waiting_threads) &&
4766 	    proc->requested_threads_started < proc->max_threads &&
4767 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4768 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4769 	     /*spawn a new thread if we leave this out */) {
4770 		proc->requested_threads++;
4771 		binder_inner_proc_unlock(proc);
4772 		binder_debug(BINDER_DEBUG_THREADS,
4773 			     "%d:%d BR_SPAWN_LOOPER\n",
4774 			     proc->pid, thread->pid);
4775 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4776 			return -EFAULT;
4777 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4778 	} else
4779 		binder_inner_proc_unlock(proc);
4780 	return 0;
4781 }
4782 
4783 static void binder_release_work(struct binder_proc *proc,
4784 				struct list_head *list)
4785 {
4786 	struct binder_work *w;
4787 	enum binder_work_type wtype;
4788 
4789 	while (1) {
4790 		binder_inner_proc_lock(proc);
4791 		w = binder_dequeue_work_head_ilocked(list);
4792 		wtype = w ? w->type : 0;
4793 		binder_inner_proc_unlock(proc);
4794 		if (!w)
4795 			return;
4796 
4797 		switch (wtype) {
4798 		case BINDER_WORK_TRANSACTION: {
4799 			struct binder_transaction *t;
4800 
4801 			t = container_of(w, struct binder_transaction, work);
4802 
4803 			binder_cleanup_transaction(t, "process died.",
4804 						   BR_DEAD_REPLY);
4805 		} break;
4806 		case BINDER_WORK_RETURN_ERROR: {
4807 			struct binder_error *e = container_of(
4808 					w, struct binder_error, work);
4809 
4810 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4811 				"undelivered TRANSACTION_ERROR: %u\n",
4812 				e->cmd);
4813 		} break;
4814 		case BINDER_WORK_TRANSACTION_PENDING:
4815 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4816 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4817 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4818 				"undelivered TRANSACTION_COMPLETE\n");
4819 			kfree(w);
4820 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4821 		} break;
4822 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4823 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4824 			struct binder_ref_death *death;
4825 
4826 			death = container_of(w, struct binder_ref_death, work);
4827 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4828 				"undelivered death notification, %016llx\n",
4829 				(u64)death->cookie);
4830 			kfree(death);
4831 			binder_stats_deleted(BINDER_STAT_DEATH);
4832 		} break;
4833 		case BINDER_WORK_NODE:
4834 			break;
4835 		default:
4836 			pr_err("unexpected work type, %d, not freed\n",
4837 			       wtype);
4838 			break;
4839 		}
4840 	}
4841 
4842 }
4843 
4844 static struct binder_thread *binder_get_thread_ilocked(
4845 		struct binder_proc *proc, struct binder_thread *new_thread)
4846 {
4847 	struct binder_thread *thread = NULL;
4848 	struct rb_node *parent = NULL;
4849 	struct rb_node **p = &proc->threads.rb_node;
4850 
4851 	while (*p) {
4852 		parent = *p;
4853 		thread = rb_entry(parent, struct binder_thread, rb_node);
4854 
4855 		if (current->pid < thread->pid)
4856 			p = &(*p)->rb_left;
4857 		else if (current->pid > thread->pid)
4858 			p = &(*p)->rb_right;
4859 		else
4860 			return thread;
4861 	}
4862 	if (!new_thread)
4863 		return NULL;
4864 	thread = new_thread;
4865 	binder_stats_created(BINDER_STAT_THREAD);
4866 	thread->proc = proc;
4867 	thread->pid = current->pid;
4868 	atomic_set(&thread->tmp_ref, 0);
4869 	init_waitqueue_head(&thread->wait);
4870 	INIT_LIST_HEAD(&thread->todo);
4871 	rb_link_node(&thread->rb_node, parent, p);
4872 	rb_insert_color(&thread->rb_node, &proc->threads);
4873 	thread->looper_need_return = true;
4874 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4875 	thread->return_error.cmd = BR_OK;
4876 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4877 	thread->reply_error.cmd = BR_OK;
4878 	thread->ee.command = BR_OK;
4879 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4880 	return thread;
4881 }
4882 
4883 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4884 {
4885 	struct binder_thread *thread;
4886 	struct binder_thread *new_thread;
4887 
4888 	binder_inner_proc_lock(proc);
4889 	thread = binder_get_thread_ilocked(proc, NULL);
4890 	binder_inner_proc_unlock(proc);
4891 	if (!thread) {
4892 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4893 		if (new_thread == NULL)
4894 			return NULL;
4895 		binder_inner_proc_lock(proc);
4896 		thread = binder_get_thread_ilocked(proc, new_thread);
4897 		binder_inner_proc_unlock(proc);
4898 		if (thread != new_thread)
4899 			kfree(new_thread);
4900 	}
4901 	return thread;
4902 }
4903 
4904 static void binder_free_proc(struct binder_proc *proc)
4905 {
4906 	struct binder_device *device;
4907 
4908 	BUG_ON(!list_empty(&proc->todo));
4909 	BUG_ON(!list_empty(&proc->delivered_death));
4910 	if (proc->outstanding_txns)
4911 		pr_warn("%s: Unexpected outstanding_txns %d\n",
4912 			__func__, proc->outstanding_txns);
4913 	device = container_of(proc->context, struct binder_device, context);
4914 	if (refcount_dec_and_test(&device->ref)) {
4915 		kfree(proc->context->name);
4916 		kfree(device);
4917 	}
4918 	binder_alloc_deferred_release(&proc->alloc);
4919 	put_task_struct(proc->tsk);
4920 	put_cred(proc->cred);
4921 	binder_stats_deleted(BINDER_STAT_PROC);
4922 	kfree(proc);
4923 }
4924 
4925 static void binder_free_thread(struct binder_thread *thread)
4926 {
4927 	BUG_ON(!list_empty(&thread->todo));
4928 	binder_stats_deleted(BINDER_STAT_THREAD);
4929 	binder_proc_dec_tmpref(thread->proc);
4930 	kfree(thread);
4931 }
4932 
4933 static int binder_thread_release(struct binder_proc *proc,
4934 				 struct binder_thread *thread)
4935 {
4936 	struct binder_transaction *t;
4937 	struct binder_transaction *send_reply = NULL;
4938 	int active_transactions = 0;
4939 	struct binder_transaction *last_t = NULL;
4940 
4941 	binder_inner_proc_lock(thread->proc);
4942 	/*
4943 	 * take a ref on the proc so it survives
4944 	 * after we remove this thread from proc->threads.
4945 	 * The corresponding dec is when we actually
4946 	 * free the thread in binder_free_thread()
4947 	 */
4948 	proc->tmp_ref++;
4949 	/*
4950 	 * take a ref on this thread to ensure it
4951 	 * survives while we are releasing it
4952 	 */
4953 	atomic_inc(&thread->tmp_ref);
4954 	rb_erase(&thread->rb_node, &proc->threads);
4955 	t = thread->transaction_stack;
4956 	if (t) {
4957 		spin_lock(&t->lock);
4958 		if (t->to_thread == thread)
4959 			send_reply = t;
4960 	} else {
4961 		__acquire(&t->lock);
4962 	}
4963 	thread->is_dead = true;
4964 
4965 	while (t) {
4966 		last_t = t;
4967 		active_transactions++;
4968 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4969 			     "release %d:%d transaction %d %s, still active\n",
4970 			      proc->pid, thread->pid,
4971 			     t->debug_id,
4972 			     (t->to_thread == thread) ? "in" : "out");
4973 
4974 		if (t->to_thread == thread) {
4975 			thread->proc->outstanding_txns--;
4976 			t->to_proc = NULL;
4977 			t->to_thread = NULL;
4978 			if (t->buffer) {
4979 				t->buffer->transaction = NULL;
4980 				t->buffer = NULL;
4981 			}
4982 			t = t->to_parent;
4983 		} else if (t->from == thread) {
4984 			t->from = NULL;
4985 			t = t->from_parent;
4986 		} else
4987 			BUG();
4988 		spin_unlock(&last_t->lock);
4989 		if (t)
4990 			spin_lock(&t->lock);
4991 		else
4992 			__acquire(&t->lock);
4993 	}
4994 	/* annotation for sparse, lock not acquired in last iteration above */
4995 	__release(&t->lock);
4996 
4997 	/*
4998 	 * If this thread used poll, make sure we remove the waitqueue from any
4999 	 * poll data structures holding it.
5000 	 */
5001 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5002 		wake_up_pollfree(&thread->wait);
5003 
5004 	binder_inner_proc_unlock(thread->proc);
5005 
5006 	/*
5007 	 * This is needed to avoid races between wake_up_pollfree() above and
5008 	 * someone else removing the last entry from the queue for other reasons
5009 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5010 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5011 	 * we can be sure they're done after we call synchronize_rcu().
5012 	 */
5013 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5014 		synchronize_rcu();
5015 
5016 	if (send_reply)
5017 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5018 	binder_release_work(proc, &thread->todo);
5019 	binder_thread_dec_tmpref(thread);
5020 	return active_transactions;
5021 }
5022 
5023 static __poll_t binder_poll(struct file *filp,
5024 				struct poll_table_struct *wait)
5025 {
5026 	struct binder_proc *proc = filp->private_data;
5027 	struct binder_thread *thread = NULL;
5028 	bool wait_for_proc_work;
5029 
5030 	thread = binder_get_thread(proc);
5031 	if (!thread)
5032 		return EPOLLERR;
5033 
5034 	binder_inner_proc_lock(thread->proc);
5035 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5036 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5037 
5038 	binder_inner_proc_unlock(thread->proc);
5039 
5040 	poll_wait(filp, &thread->wait, wait);
5041 
5042 	if (binder_has_work(thread, wait_for_proc_work))
5043 		return EPOLLIN;
5044 
5045 	return 0;
5046 }
5047 
5048 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5049 				struct binder_thread *thread)
5050 {
5051 	int ret = 0;
5052 	struct binder_proc *proc = filp->private_data;
5053 	void __user *ubuf = (void __user *)arg;
5054 	struct binder_write_read bwr;
5055 
5056 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5057 		ret = -EFAULT;
5058 		goto out;
5059 	}
5060 	binder_debug(BINDER_DEBUG_READ_WRITE,
5061 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5062 		     proc->pid, thread->pid,
5063 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5064 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5065 
5066 	if (bwr.write_size > 0) {
5067 		ret = binder_thread_write(proc, thread,
5068 					  bwr.write_buffer,
5069 					  bwr.write_size,
5070 					  &bwr.write_consumed);
5071 		trace_binder_write_done(ret);
5072 		if (ret < 0) {
5073 			bwr.read_consumed = 0;
5074 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5075 				ret = -EFAULT;
5076 			goto out;
5077 		}
5078 	}
5079 	if (bwr.read_size > 0) {
5080 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5081 					 bwr.read_size,
5082 					 &bwr.read_consumed,
5083 					 filp->f_flags & O_NONBLOCK);
5084 		trace_binder_read_done(ret);
5085 		binder_inner_proc_lock(proc);
5086 		if (!binder_worklist_empty_ilocked(&proc->todo))
5087 			binder_wakeup_proc_ilocked(proc);
5088 		binder_inner_proc_unlock(proc);
5089 		if (ret < 0) {
5090 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5091 				ret = -EFAULT;
5092 			goto out;
5093 		}
5094 	}
5095 	binder_debug(BINDER_DEBUG_READ_WRITE,
5096 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5097 		     proc->pid, thread->pid,
5098 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5099 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5100 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5101 		ret = -EFAULT;
5102 		goto out;
5103 	}
5104 out:
5105 	return ret;
5106 }
5107 
5108 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5109 				    struct flat_binder_object *fbo)
5110 {
5111 	int ret = 0;
5112 	struct binder_proc *proc = filp->private_data;
5113 	struct binder_context *context = proc->context;
5114 	struct binder_node *new_node;
5115 	kuid_t curr_euid = current_euid();
5116 
5117 	mutex_lock(&context->context_mgr_node_lock);
5118 	if (context->binder_context_mgr_node) {
5119 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5120 		ret = -EBUSY;
5121 		goto out;
5122 	}
5123 	ret = security_binder_set_context_mgr(proc->cred);
5124 	if (ret < 0)
5125 		goto out;
5126 	if (uid_valid(context->binder_context_mgr_uid)) {
5127 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5128 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5129 			       from_kuid(&init_user_ns, curr_euid),
5130 			       from_kuid(&init_user_ns,
5131 					 context->binder_context_mgr_uid));
5132 			ret = -EPERM;
5133 			goto out;
5134 		}
5135 	} else {
5136 		context->binder_context_mgr_uid = curr_euid;
5137 	}
5138 	new_node = binder_new_node(proc, fbo);
5139 	if (!new_node) {
5140 		ret = -ENOMEM;
5141 		goto out;
5142 	}
5143 	binder_node_lock(new_node);
5144 	new_node->local_weak_refs++;
5145 	new_node->local_strong_refs++;
5146 	new_node->has_strong_ref = 1;
5147 	new_node->has_weak_ref = 1;
5148 	context->binder_context_mgr_node = new_node;
5149 	binder_node_unlock(new_node);
5150 	binder_put_node(new_node);
5151 out:
5152 	mutex_unlock(&context->context_mgr_node_lock);
5153 	return ret;
5154 }
5155 
5156 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5157 		struct binder_node_info_for_ref *info)
5158 {
5159 	struct binder_node *node;
5160 	struct binder_context *context = proc->context;
5161 	__u32 handle = info->handle;
5162 
5163 	if (info->strong_count || info->weak_count || info->reserved1 ||
5164 	    info->reserved2 || info->reserved3) {
5165 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5166 				  proc->pid);
5167 		return -EINVAL;
5168 	}
5169 
5170 	/* This ioctl may only be used by the context manager */
5171 	mutex_lock(&context->context_mgr_node_lock);
5172 	if (!context->binder_context_mgr_node ||
5173 		context->binder_context_mgr_node->proc != proc) {
5174 		mutex_unlock(&context->context_mgr_node_lock);
5175 		return -EPERM;
5176 	}
5177 	mutex_unlock(&context->context_mgr_node_lock);
5178 
5179 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5180 	if (!node)
5181 		return -EINVAL;
5182 
5183 	info->strong_count = node->local_strong_refs +
5184 		node->internal_strong_refs;
5185 	info->weak_count = node->local_weak_refs;
5186 
5187 	binder_put_node(node);
5188 
5189 	return 0;
5190 }
5191 
5192 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5193 				struct binder_node_debug_info *info)
5194 {
5195 	struct rb_node *n;
5196 	binder_uintptr_t ptr = info->ptr;
5197 
5198 	memset(info, 0, sizeof(*info));
5199 
5200 	binder_inner_proc_lock(proc);
5201 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5202 		struct binder_node *node = rb_entry(n, struct binder_node,
5203 						    rb_node);
5204 		if (node->ptr > ptr) {
5205 			info->ptr = node->ptr;
5206 			info->cookie = node->cookie;
5207 			info->has_strong_ref = node->has_strong_ref;
5208 			info->has_weak_ref = node->has_weak_ref;
5209 			break;
5210 		}
5211 	}
5212 	binder_inner_proc_unlock(proc);
5213 
5214 	return 0;
5215 }
5216 
5217 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5218 {
5219 	struct rb_node *n;
5220 	struct binder_thread *thread;
5221 
5222 	if (proc->outstanding_txns > 0)
5223 		return true;
5224 
5225 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5226 		thread = rb_entry(n, struct binder_thread, rb_node);
5227 		if (thread->transaction_stack)
5228 			return true;
5229 	}
5230 	return false;
5231 }
5232 
5233 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5234 			       struct binder_proc *target_proc)
5235 {
5236 	int ret = 0;
5237 
5238 	if (!info->enable) {
5239 		binder_inner_proc_lock(target_proc);
5240 		target_proc->sync_recv = false;
5241 		target_proc->async_recv = false;
5242 		target_proc->is_frozen = false;
5243 		binder_inner_proc_unlock(target_proc);
5244 		return 0;
5245 	}
5246 
5247 	/*
5248 	 * Freezing the target. Prevent new transactions by
5249 	 * setting frozen state. If timeout specified, wait
5250 	 * for transactions to drain.
5251 	 */
5252 	binder_inner_proc_lock(target_proc);
5253 	target_proc->sync_recv = false;
5254 	target_proc->async_recv = false;
5255 	target_proc->is_frozen = true;
5256 	binder_inner_proc_unlock(target_proc);
5257 
5258 	if (info->timeout_ms > 0)
5259 		ret = wait_event_interruptible_timeout(
5260 			target_proc->freeze_wait,
5261 			(!target_proc->outstanding_txns),
5262 			msecs_to_jiffies(info->timeout_ms));
5263 
5264 	/* Check pending transactions that wait for reply */
5265 	if (ret >= 0) {
5266 		binder_inner_proc_lock(target_proc);
5267 		if (binder_txns_pending_ilocked(target_proc))
5268 			ret = -EAGAIN;
5269 		binder_inner_proc_unlock(target_proc);
5270 	}
5271 
5272 	if (ret < 0) {
5273 		binder_inner_proc_lock(target_proc);
5274 		target_proc->is_frozen = false;
5275 		binder_inner_proc_unlock(target_proc);
5276 	}
5277 
5278 	return ret;
5279 }
5280 
5281 static int binder_ioctl_get_freezer_info(
5282 				struct binder_frozen_status_info *info)
5283 {
5284 	struct binder_proc *target_proc;
5285 	bool found = false;
5286 	__u32 txns_pending;
5287 
5288 	info->sync_recv = 0;
5289 	info->async_recv = 0;
5290 
5291 	mutex_lock(&binder_procs_lock);
5292 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5293 		if (target_proc->pid == info->pid) {
5294 			found = true;
5295 			binder_inner_proc_lock(target_proc);
5296 			txns_pending = binder_txns_pending_ilocked(target_proc);
5297 			info->sync_recv |= target_proc->sync_recv |
5298 					(txns_pending << 1);
5299 			info->async_recv |= target_proc->async_recv;
5300 			binder_inner_proc_unlock(target_proc);
5301 		}
5302 	}
5303 	mutex_unlock(&binder_procs_lock);
5304 
5305 	if (!found)
5306 		return -EINVAL;
5307 
5308 	return 0;
5309 }
5310 
5311 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5312 					   void __user *ubuf)
5313 {
5314 	struct binder_extended_error ee;
5315 
5316 	binder_inner_proc_lock(thread->proc);
5317 	ee = thread->ee;
5318 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5319 	binder_inner_proc_unlock(thread->proc);
5320 
5321 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5322 		return -EFAULT;
5323 
5324 	return 0;
5325 }
5326 
5327 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5328 {
5329 	int ret;
5330 	struct binder_proc *proc = filp->private_data;
5331 	struct binder_thread *thread;
5332 	void __user *ubuf = (void __user *)arg;
5333 
5334 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5335 			proc->pid, current->pid, cmd, arg);*/
5336 
5337 	binder_selftest_alloc(&proc->alloc);
5338 
5339 	trace_binder_ioctl(cmd, arg);
5340 
5341 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5342 	if (ret)
5343 		goto err_unlocked;
5344 
5345 	thread = binder_get_thread(proc);
5346 	if (thread == NULL) {
5347 		ret = -ENOMEM;
5348 		goto err;
5349 	}
5350 
5351 	switch (cmd) {
5352 	case BINDER_WRITE_READ:
5353 		ret = binder_ioctl_write_read(filp, arg, thread);
5354 		if (ret)
5355 			goto err;
5356 		break;
5357 	case BINDER_SET_MAX_THREADS: {
5358 		int max_threads;
5359 
5360 		if (copy_from_user(&max_threads, ubuf,
5361 				   sizeof(max_threads))) {
5362 			ret = -EINVAL;
5363 			goto err;
5364 		}
5365 		binder_inner_proc_lock(proc);
5366 		proc->max_threads = max_threads;
5367 		binder_inner_proc_unlock(proc);
5368 		break;
5369 	}
5370 	case BINDER_SET_CONTEXT_MGR_EXT: {
5371 		struct flat_binder_object fbo;
5372 
5373 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5374 			ret = -EINVAL;
5375 			goto err;
5376 		}
5377 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5378 		if (ret)
5379 			goto err;
5380 		break;
5381 	}
5382 	case BINDER_SET_CONTEXT_MGR:
5383 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5384 		if (ret)
5385 			goto err;
5386 		break;
5387 	case BINDER_THREAD_EXIT:
5388 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5389 			     proc->pid, thread->pid);
5390 		binder_thread_release(proc, thread);
5391 		thread = NULL;
5392 		break;
5393 	case BINDER_VERSION: {
5394 		struct binder_version __user *ver = ubuf;
5395 
5396 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5397 			     &ver->protocol_version)) {
5398 			ret = -EINVAL;
5399 			goto err;
5400 		}
5401 		break;
5402 	}
5403 	case BINDER_GET_NODE_INFO_FOR_REF: {
5404 		struct binder_node_info_for_ref info;
5405 
5406 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5407 			ret = -EFAULT;
5408 			goto err;
5409 		}
5410 
5411 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5412 		if (ret < 0)
5413 			goto err;
5414 
5415 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5416 			ret = -EFAULT;
5417 			goto err;
5418 		}
5419 
5420 		break;
5421 	}
5422 	case BINDER_GET_NODE_DEBUG_INFO: {
5423 		struct binder_node_debug_info info;
5424 
5425 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5426 			ret = -EFAULT;
5427 			goto err;
5428 		}
5429 
5430 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5431 		if (ret < 0)
5432 			goto err;
5433 
5434 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5435 			ret = -EFAULT;
5436 			goto err;
5437 		}
5438 		break;
5439 	}
5440 	case BINDER_FREEZE: {
5441 		struct binder_freeze_info info;
5442 		struct binder_proc **target_procs = NULL, *target_proc;
5443 		int target_procs_count = 0, i = 0;
5444 
5445 		ret = 0;
5446 
5447 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5448 			ret = -EFAULT;
5449 			goto err;
5450 		}
5451 
5452 		mutex_lock(&binder_procs_lock);
5453 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5454 			if (target_proc->pid == info.pid)
5455 				target_procs_count++;
5456 		}
5457 
5458 		if (target_procs_count == 0) {
5459 			mutex_unlock(&binder_procs_lock);
5460 			ret = -EINVAL;
5461 			goto err;
5462 		}
5463 
5464 		target_procs = kcalloc(target_procs_count,
5465 				       sizeof(struct binder_proc *),
5466 				       GFP_KERNEL);
5467 
5468 		if (!target_procs) {
5469 			mutex_unlock(&binder_procs_lock);
5470 			ret = -ENOMEM;
5471 			goto err;
5472 		}
5473 
5474 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5475 			if (target_proc->pid != info.pid)
5476 				continue;
5477 
5478 			binder_inner_proc_lock(target_proc);
5479 			target_proc->tmp_ref++;
5480 			binder_inner_proc_unlock(target_proc);
5481 
5482 			target_procs[i++] = target_proc;
5483 		}
5484 		mutex_unlock(&binder_procs_lock);
5485 
5486 		for (i = 0; i < target_procs_count; i++) {
5487 			if (ret >= 0)
5488 				ret = binder_ioctl_freeze(&info,
5489 							  target_procs[i]);
5490 
5491 			binder_proc_dec_tmpref(target_procs[i]);
5492 		}
5493 
5494 		kfree(target_procs);
5495 
5496 		if (ret < 0)
5497 			goto err;
5498 		break;
5499 	}
5500 	case BINDER_GET_FROZEN_INFO: {
5501 		struct binder_frozen_status_info info;
5502 
5503 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5504 			ret = -EFAULT;
5505 			goto err;
5506 		}
5507 
5508 		ret = binder_ioctl_get_freezer_info(&info);
5509 		if (ret < 0)
5510 			goto err;
5511 
5512 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5513 			ret = -EFAULT;
5514 			goto err;
5515 		}
5516 		break;
5517 	}
5518 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5519 		uint32_t enable;
5520 
5521 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5522 			ret = -EFAULT;
5523 			goto err;
5524 		}
5525 		binder_inner_proc_lock(proc);
5526 		proc->oneway_spam_detection_enabled = (bool)enable;
5527 		binder_inner_proc_unlock(proc);
5528 		break;
5529 	}
5530 	case BINDER_GET_EXTENDED_ERROR:
5531 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5532 		if (ret < 0)
5533 			goto err;
5534 		break;
5535 	default:
5536 		ret = -EINVAL;
5537 		goto err;
5538 	}
5539 	ret = 0;
5540 err:
5541 	if (thread)
5542 		thread->looper_need_return = false;
5543 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5544 	if (ret && ret != -EINTR)
5545 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5546 err_unlocked:
5547 	trace_binder_ioctl_done(ret);
5548 	return ret;
5549 }
5550 
5551 static void binder_vma_open(struct vm_area_struct *vma)
5552 {
5553 	struct binder_proc *proc = vma->vm_private_data;
5554 
5555 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5556 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5557 		     proc->pid, vma->vm_start, vma->vm_end,
5558 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5559 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5560 }
5561 
5562 static void binder_vma_close(struct vm_area_struct *vma)
5563 {
5564 	struct binder_proc *proc = vma->vm_private_data;
5565 
5566 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5567 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5568 		     proc->pid, vma->vm_start, vma->vm_end,
5569 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5570 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5571 	binder_alloc_vma_close(&proc->alloc);
5572 }
5573 
5574 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5575 {
5576 	return VM_FAULT_SIGBUS;
5577 }
5578 
5579 static const struct vm_operations_struct binder_vm_ops = {
5580 	.open = binder_vma_open,
5581 	.close = binder_vma_close,
5582 	.fault = binder_vm_fault,
5583 };
5584 
5585 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5586 {
5587 	struct binder_proc *proc = filp->private_data;
5588 
5589 	if (proc->tsk != current->group_leader)
5590 		return -EINVAL;
5591 
5592 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5593 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5594 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5595 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5596 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5597 
5598 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5599 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5600 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5601 		return -EPERM;
5602 	}
5603 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5604 
5605 	vma->vm_ops = &binder_vm_ops;
5606 	vma->vm_private_data = proc;
5607 
5608 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5609 }
5610 
5611 static int binder_open(struct inode *nodp, struct file *filp)
5612 {
5613 	struct binder_proc *proc, *itr;
5614 	struct binder_device *binder_dev;
5615 	struct binderfs_info *info;
5616 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5617 	bool existing_pid = false;
5618 
5619 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5620 		     current->group_leader->pid, current->pid);
5621 
5622 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5623 	if (proc == NULL)
5624 		return -ENOMEM;
5625 	spin_lock_init(&proc->inner_lock);
5626 	spin_lock_init(&proc->outer_lock);
5627 	get_task_struct(current->group_leader);
5628 	proc->tsk = current->group_leader;
5629 	proc->cred = get_cred(filp->f_cred);
5630 	INIT_LIST_HEAD(&proc->todo);
5631 	init_waitqueue_head(&proc->freeze_wait);
5632 	proc->default_priority = task_nice(current);
5633 	/* binderfs stashes devices in i_private */
5634 	if (is_binderfs_device(nodp)) {
5635 		binder_dev = nodp->i_private;
5636 		info = nodp->i_sb->s_fs_info;
5637 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5638 	} else {
5639 		binder_dev = container_of(filp->private_data,
5640 					  struct binder_device, miscdev);
5641 	}
5642 	refcount_inc(&binder_dev->ref);
5643 	proc->context = &binder_dev->context;
5644 	binder_alloc_init(&proc->alloc);
5645 
5646 	binder_stats_created(BINDER_STAT_PROC);
5647 	proc->pid = current->group_leader->pid;
5648 	INIT_LIST_HEAD(&proc->delivered_death);
5649 	INIT_LIST_HEAD(&proc->waiting_threads);
5650 	filp->private_data = proc;
5651 
5652 	mutex_lock(&binder_procs_lock);
5653 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5654 		if (itr->pid == proc->pid) {
5655 			existing_pid = true;
5656 			break;
5657 		}
5658 	}
5659 	hlist_add_head(&proc->proc_node, &binder_procs);
5660 	mutex_unlock(&binder_procs_lock);
5661 
5662 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5663 		char strbuf[11];
5664 
5665 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5666 		/*
5667 		 * proc debug entries are shared between contexts.
5668 		 * Only create for the first PID to avoid debugfs log spamming
5669 		 * The printing code will anyway print all contexts for a given
5670 		 * PID so this is not a problem.
5671 		 */
5672 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5673 			binder_debugfs_dir_entry_proc,
5674 			(void *)(unsigned long)proc->pid,
5675 			&proc_fops);
5676 	}
5677 
5678 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5679 		char strbuf[11];
5680 		struct dentry *binderfs_entry;
5681 
5682 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5683 		/*
5684 		 * Similar to debugfs, the process specific log file is shared
5685 		 * between contexts. Only create for the first PID.
5686 		 * This is ok since same as debugfs, the log file will contain
5687 		 * information on all contexts of a given PID.
5688 		 */
5689 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5690 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5691 		if (!IS_ERR(binderfs_entry)) {
5692 			proc->binderfs_entry = binderfs_entry;
5693 		} else {
5694 			int error;
5695 
5696 			error = PTR_ERR(binderfs_entry);
5697 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5698 				strbuf, error);
5699 		}
5700 	}
5701 
5702 	return 0;
5703 }
5704 
5705 static int binder_flush(struct file *filp, fl_owner_t id)
5706 {
5707 	struct binder_proc *proc = filp->private_data;
5708 
5709 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5710 
5711 	return 0;
5712 }
5713 
5714 static void binder_deferred_flush(struct binder_proc *proc)
5715 {
5716 	struct rb_node *n;
5717 	int wake_count = 0;
5718 
5719 	binder_inner_proc_lock(proc);
5720 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5721 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5722 
5723 		thread->looper_need_return = true;
5724 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5725 			wake_up_interruptible(&thread->wait);
5726 			wake_count++;
5727 		}
5728 	}
5729 	binder_inner_proc_unlock(proc);
5730 
5731 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5732 		     "binder_flush: %d woke %d threads\n", proc->pid,
5733 		     wake_count);
5734 }
5735 
5736 static int binder_release(struct inode *nodp, struct file *filp)
5737 {
5738 	struct binder_proc *proc = filp->private_data;
5739 
5740 	debugfs_remove(proc->debugfs_entry);
5741 
5742 	if (proc->binderfs_entry) {
5743 		binderfs_remove_file(proc->binderfs_entry);
5744 		proc->binderfs_entry = NULL;
5745 	}
5746 
5747 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5748 
5749 	return 0;
5750 }
5751 
5752 static int binder_node_release(struct binder_node *node, int refs)
5753 {
5754 	struct binder_ref *ref;
5755 	int death = 0;
5756 	struct binder_proc *proc = node->proc;
5757 
5758 	binder_release_work(proc, &node->async_todo);
5759 
5760 	binder_node_lock(node);
5761 	binder_inner_proc_lock(proc);
5762 	binder_dequeue_work_ilocked(&node->work);
5763 	/*
5764 	 * The caller must have taken a temporary ref on the node,
5765 	 */
5766 	BUG_ON(!node->tmp_refs);
5767 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5768 		binder_inner_proc_unlock(proc);
5769 		binder_node_unlock(node);
5770 		binder_free_node(node);
5771 
5772 		return refs;
5773 	}
5774 
5775 	node->proc = NULL;
5776 	node->local_strong_refs = 0;
5777 	node->local_weak_refs = 0;
5778 	binder_inner_proc_unlock(proc);
5779 
5780 	spin_lock(&binder_dead_nodes_lock);
5781 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5782 	spin_unlock(&binder_dead_nodes_lock);
5783 
5784 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5785 		refs++;
5786 		/*
5787 		 * Need the node lock to synchronize
5788 		 * with new notification requests and the
5789 		 * inner lock to synchronize with queued
5790 		 * death notifications.
5791 		 */
5792 		binder_inner_proc_lock(ref->proc);
5793 		if (!ref->death) {
5794 			binder_inner_proc_unlock(ref->proc);
5795 			continue;
5796 		}
5797 
5798 		death++;
5799 
5800 		BUG_ON(!list_empty(&ref->death->work.entry));
5801 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5802 		binder_enqueue_work_ilocked(&ref->death->work,
5803 					    &ref->proc->todo);
5804 		binder_wakeup_proc_ilocked(ref->proc);
5805 		binder_inner_proc_unlock(ref->proc);
5806 	}
5807 
5808 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5809 		     "node %d now dead, refs %d, death %d\n",
5810 		     node->debug_id, refs, death);
5811 	binder_node_unlock(node);
5812 	binder_put_node(node);
5813 
5814 	return refs;
5815 }
5816 
5817 static void binder_deferred_release(struct binder_proc *proc)
5818 {
5819 	struct binder_context *context = proc->context;
5820 	struct rb_node *n;
5821 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5822 
5823 	mutex_lock(&binder_procs_lock);
5824 	hlist_del(&proc->proc_node);
5825 	mutex_unlock(&binder_procs_lock);
5826 
5827 	mutex_lock(&context->context_mgr_node_lock);
5828 	if (context->binder_context_mgr_node &&
5829 	    context->binder_context_mgr_node->proc == proc) {
5830 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5831 			     "%s: %d context_mgr_node gone\n",
5832 			     __func__, proc->pid);
5833 		context->binder_context_mgr_node = NULL;
5834 	}
5835 	mutex_unlock(&context->context_mgr_node_lock);
5836 	binder_inner_proc_lock(proc);
5837 	/*
5838 	 * Make sure proc stays alive after we
5839 	 * remove all the threads
5840 	 */
5841 	proc->tmp_ref++;
5842 
5843 	proc->is_dead = true;
5844 	proc->is_frozen = false;
5845 	proc->sync_recv = false;
5846 	proc->async_recv = false;
5847 	threads = 0;
5848 	active_transactions = 0;
5849 	while ((n = rb_first(&proc->threads))) {
5850 		struct binder_thread *thread;
5851 
5852 		thread = rb_entry(n, struct binder_thread, rb_node);
5853 		binder_inner_proc_unlock(proc);
5854 		threads++;
5855 		active_transactions += binder_thread_release(proc, thread);
5856 		binder_inner_proc_lock(proc);
5857 	}
5858 
5859 	nodes = 0;
5860 	incoming_refs = 0;
5861 	while ((n = rb_first(&proc->nodes))) {
5862 		struct binder_node *node;
5863 
5864 		node = rb_entry(n, struct binder_node, rb_node);
5865 		nodes++;
5866 		/*
5867 		 * take a temporary ref on the node before
5868 		 * calling binder_node_release() which will either
5869 		 * kfree() the node or call binder_put_node()
5870 		 */
5871 		binder_inc_node_tmpref_ilocked(node);
5872 		rb_erase(&node->rb_node, &proc->nodes);
5873 		binder_inner_proc_unlock(proc);
5874 		incoming_refs = binder_node_release(node, incoming_refs);
5875 		binder_inner_proc_lock(proc);
5876 	}
5877 	binder_inner_proc_unlock(proc);
5878 
5879 	outgoing_refs = 0;
5880 	binder_proc_lock(proc);
5881 	while ((n = rb_first(&proc->refs_by_desc))) {
5882 		struct binder_ref *ref;
5883 
5884 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5885 		outgoing_refs++;
5886 		binder_cleanup_ref_olocked(ref);
5887 		binder_proc_unlock(proc);
5888 		binder_free_ref(ref);
5889 		binder_proc_lock(proc);
5890 	}
5891 	binder_proc_unlock(proc);
5892 
5893 	binder_release_work(proc, &proc->todo);
5894 	binder_release_work(proc, &proc->delivered_death);
5895 
5896 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5897 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5898 		     __func__, proc->pid, threads, nodes, incoming_refs,
5899 		     outgoing_refs, active_transactions);
5900 
5901 	binder_proc_dec_tmpref(proc);
5902 }
5903 
5904 static void binder_deferred_func(struct work_struct *work)
5905 {
5906 	struct binder_proc *proc;
5907 
5908 	int defer;
5909 
5910 	do {
5911 		mutex_lock(&binder_deferred_lock);
5912 		if (!hlist_empty(&binder_deferred_list)) {
5913 			proc = hlist_entry(binder_deferred_list.first,
5914 					struct binder_proc, deferred_work_node);
5915 			hlist_del_init(&proc->deferred_work_node);
5916 			defer = proc->deferred_work;
5917 			proc->deferred_work = 0;
5918 		} else {
5919 			proc = NULL;
5920 			defer = 0;
5921 		}
5922 		mutex_unlock(&binder_deferred_lock);
5923 
5924 		if (defer & BINDER_DEFERRED_FLUSH)
5925 			binder_deferred_flush(proc);
5926 
5927 		if (defer & BINDER_DEFERRED_RELEASE)
5928 			binder_deferred_release(proc); /* frees proc */
5929 	} while (proc);
5930 }
5931 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5932 
5933 static void
5934 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5935 {
5936 	mutex_lock(&binder_deferred_lock);
5937 	proc->deferred_work |= defer;
5938 	if (hlist_unhashed(&proc->deferred_work_node)) {
5939 		hlist_add_head(&proc->deferred_work_node,
5940 				&binder_deferred_list);
5941 		schedule_work(&binder_deferred_work);
5942 	}
5943 	mutex_unlock(&binder_deferred_lock);
5944 }
5945 
5946 static void print_binder_transaction_ilocked(struct seq_file *m,
5947 					     struct binder_proc *proc,
5948 					     const char *prefix,
5949 					     struct binder_transaction *t)
5950 {
5951 	struct binder_proc *to_proc;
5952 	struct binder_buffer *buffer = t->buffer;
5953 	ktime_t current_time = ktime_get();
5954 
5955 	spin_lock(&t->lock);
5956 	to_proc = t->to_proc;
5957 	seq_printf(m,
5958 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
5959 		   prefix, t->debug_id, t,
5960 		   t->from_pid,
5961 		   t->from_tid,
5962 		   to_proc ? to_proc->pid : 0,
5963 		   t->to_thread ? t->to_thread->pid : 0,
5964 		   t->code, t->flags, t->priority, t->need_reply,
5965 		   ktime_ms_delta(current_time, t->start_time));
5966 	spin_unlock(&t->lock);
5967 
5968 	if (proc != to_proc) {
5969 		/*
5970 		 * Can only safely deref buffer if we are holding the
5971 		 * correct proc inner lock for this node
5972 		 */
5973 		seq_puts(m, "\n");
5974 		return;
5975 	}
5976 
5977 	if (buffer == NULL) {
5978 		seq_puts(m, " buffer free\n");
5979 		return;
5980 	}
5981 	if (buffer->target_node)
5982 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5983 	seq_printf(m, " size %zd:%zd offset %lx\n",
5984 		   buffer->data_size, buffer->offsets_size,
5985 		   proc->alloc.buffer - buffer->user_data);
5986 }
5987 
5988 static void print_binder_work_ilocked(struct seq_file *m,
5989 				     struct binder_proc *proc,
5990 				     const char *prefix,
5991 				     const char *transaction_prefix,
5992 				     struct binder_work *w)
5993 {
5994 	struct binder_node *node;
5995 	struct binder_transaction *t;
5996 
5997 	switch (w->type) {
5998 	case BINDER_WORK_TRANSACTION:
5999 		t = container_of(w, struct binder_transaction, work);
6000 		print_binder_transaction_ilocked(
6001 				m, proc, transaction_prefix, t);
6002 		break;
6003 	case BINDER_WORK_RETURN_ERROR: {
6004 		struct binder_error *e = container_of(
6005 				w, struct binder_error, work);
6006 
6007 		seq_printf(m, "%stransaction error: %u\n",
6008 			   prefix, e->cmd);
6009 	} break;
6010 	case BINDER_WORK_TRANSACTION_COMPLETE:
6011 		seq_printf(m, "%stransaction complete\n", prefix);
6012 		break;
6013 	case BINDER_WORK_NODE:
6014 		node = container_of(w, struct binder_node, work);
6015 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6016 			   prefix, node->debug_id,
6017 			   (u64)node->ptr, (u64)node->cookie);
6018 		break;
6019 	case BINDER_WORK_DEAD_BINDER:
6020 		seq_printf(m, "%shas dead binder\n", prefix);
6021 		break;
6022 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6023 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6024 		break;
6025 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6026 		seq_printf(m, "%shas cleared death notification\n", prefix);
6027 		break;
6028 	default:
6029 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6030 		break;
6031 	}
6032 }
6033 
6034 static void print_binder_thread_ilocked(struct seq_file *m,
6035 					struct binder_thread *thread,
6036 					int print_always)
6037 {
6038 	struct binder_transaction *t;
6039 	struct binder_work *w;
6040 	size_t start_pos = m->count;
6041 	size_t header_pos;
6042 
6043 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6044 			thread->pid, thread->looper,
6045 			thread->looper_need_return,
6046 			atomic_read(&thread->tmp_ref));
6047 	header_pos = m->count;
6048 	t = thread->transaction_stack;
6049 	while (t) {
6050 		if (t->from == thread) {
6051 			print_binder_transaction_ilocked(m, thread->proc,
6052 					"    outgoing transaction", t);
6053 			t = t->from_parent;
6054 		} else if (t->to_thread == thread) {
6055 			print_binder_transaction_ilocked(m, thread->proc,
6056 						 "    incoming transaction", t);
6057 			t = t->to_parent;
6058 		} else {
6059 			print_binder_transaction_ilocked(m, thread->proc,
6060 					"    bad transaction", t);
6061 			t = NULL;
6062 		}
6063 	}
6064 	list_for_each_entry(w, &thread->todo, entry) {
6065 		print_binder_work_ilocked(m, thread->proc, "    ",
6066 					  "    pending transaction", w);
6067 	}
6068 	if (!print_always && m->count == header_pos)
6069 		m->count = start_pos;
6070 }
6071 
6072 static void print_binder_node_nilocked(struct seq_file *m,
6073 				       struct binder_node *node)
6074 {
6075 	struct binder_ref *ref;
6076 	struct binder_work *w;
6077 	int count;
6078 
6079 	count = 0;
6080 	hlist_for_each_entry(ref, &node->refs, node_entry)
6081 		count++;
6082 
6083 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6084 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
6085 		   node->has_strong_ref, node->has_weak_ref,
6086 		   node->local_strong_refs, node->local_weak_refs,
6087 		   node->internal_strong_refs, count, node->tmp_refs);
6088 	if (count) {
6089 		seq_puts(m, " proc");
6090 		hlist_for_each_entry(ref, &node->refs, node_entry)
6091 			seq_printf(m, " %d", ref->proc->pid);
6092 	}
6093 	seq_puts(m, "\n");
6094 	if (node->proc) {
6095 		list_for_each_entry(w, &node->async_todo, entry)
6096 			print_binder_work_ilocked(m, node->proc, "    ",
6097 					  "    pending async transaction", w);
6098 	}
6099 }
6100 
6101 static void print_binder_ref_olocked(struct seq_file *m,
6102 				     struct binder_ref *ref)
6103 {
6104 	binder_node_lock(ref->node);
6105 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6106 		   ref->data.debug_id, ref->data.desc,
6107 		   ref->node->proc ? "" : "dead ",
6108 		   ref->node->debug_id, ref->data.strong,
6109 		   ref->data.weak, ref->death);
6110 	binder_node_unlock(ref->node);
6111 }
6112 
6113 static void print_binder_proc(struct seq_file *m,
6114 			      struct binder_proc *proc, int print_all)
6115 {
6116 	struct binder_work *w;
6117 	struct rb_node *n;
6118 	size_t start_pos = m->count;
6119 	size_t header_pos;
6120 	struct binder_node *last_node = NULL;
6121 
6122 	seq_printf(m, "proc %d\n", proc->pid);
6123 	seq_printf(m, "context %s\n", proc->context->name);
6124 	header_pos = m->count;
6125 
6126 	binder_inner_proc_lock(proc);
6127 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6128 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6129 						rb_node), print_all);
6130 
6131 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6132 		struct binder_node *node = rb_entry(n, struct binder_node,
6133 						    rb_node);
6134 		if (!print_all && !node->has_async_transaction)
6135 			continue;
6136 
6137 		/*
6138 		 * take a temporary reference on the node so it
6139 		 * survives and isn't removed from the tree
6140 		 * while we print it.
6141 		 */
6142 		binder_inc_node_tmpref_ilocked(node);
6143 		/* Need to drop inner lock to take node lock */
6144 		binder_inner_proc_unlock(proc);
6145 		if (last_node)
6146 			binder_put_node(last_node);
6147 		binder_node_inner_lock(node);
6148 		print_binder_node_nilocked(m, node);
6149 		binder_node_inner_unlock(node);
6150 		last_node = node;
6151 		binder_inner_proc_lock(proc);
6152 	}
6153 	binder_inner_proc_unlock(proc);
6154 	if (last_node)
6155 		binder_put_node(last_node);
6156 
6157 	if (print_all) {
6158 		binder_proc_lock(proc);
6159 		for (n = rb_first(&proc->refs_by_desc);
6160 		     n != NULL;
6161 		     n = rb_next(n))
6162 			print_binder_ref_olocked(m, rb_entry(n,
6163 							    struct binder_ref,
6164 							    rb_node_desc));
6165 		binder_proc_unlock(proc);
6166 	}
6167 	binder_alloc_print_allocated(m, &proc->alloc);
6168 	binder_inner_proc_lock(proc);
6169 	list_for_each_entry(w, &proc->todo, entry)
6170 		print_binder_work_ilocked(m, proc, "  ",
6171 					  "  pending transaction", w);
6172 	list_for_each_entry(w, &proc->delivered_death, entry) {
6173 		seq_puts(m, "  has delivered dead binder\n");
6174 		break;
6175 	}
6176 	binder_inner_proc_unlock(proc);
6177 	if (!print_all && m->count == header_pos)
6178 		m->count = start_pos;
6179 }
6180 
6181 static const char * const binder_return_strings[] = {
6182 	"BR_ERROR",
6183 	"BR_OK",
6184 	"BR_TRANSACTION",
6185 	"BR_REPLY",
6186 	"BR_ACQUIRE_RESULT",
6187 	"BR_DEAD_REPLY",
6188 	"BR_TRANSACTION_COMPLETE",
6189 	"BR_INCREFS",
6190 	"BR_ACQUIRE",
6191 	"BR_RELEASE",
6192 	"BR_DECREFS",
6193 	"BR_ATTEMPT_ACQUIRE",
6194 	"BR_NOOP",
6195 	"BR_SPAWN_LOOPER",
6196 	"BR_FINISHED",
6197 	"BR_DEAD_BINDER",
6198 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6199 	"BR_FAILED_REPLY",
6200 	"BR_FROZEN_REPLY",
6201 	"BR_ONEWAY_SPAM_SUSPECT",
6202 	"BR_TRANSACTION_PENDING_FROZEN"
6203 };
6204 
6205 static const char * const binder_command_strings[] = {
6206 	"BC_TRANSACTION",
6207 	"BC_REPLY",
6208 	"BC_ACQUIRE_RESULT",
6209 	"BC_FREE_BUFFER",
6210 	"BC_INCREFS",
6211 	"BC_ACQUIRE",
6212 	"BC_RELEASE",
6213 	"BC_DECREFS",
6214 	"BC_INCREFS_DONE",
6215 	"BC_ACQUIRE_DONE",
6216 	"BC_ATTEMPT_ACQUIRE",
6217 	"BC_REGISTER_LOOPER",
6218 	"BC_ENTER_LOOPER",
6219 	"BC_EXIT_LOOPER",
6220 	"BC_REQUEST_DEATH_NOTIFICATION",
6221 	"BC_CLEAR_DEATH_NOTIFICATION",
6222 	"BC_DEAD_BINDER_DONE",
6223 	"BC_TRANSACTION_SG",
6224 	"BC_REPLY_SG",
6225 };
6226 
6227 static const char * const binder_objstat_strings[] = {
6228 	"proc",
6229 	"thread",
6230 	"node",
6231 	"ref",
6232 	"death",
6233 	"transaction",
6234 	"transaction_complete"
6235 };
6236 
6237 static void print_binder_stats(struct seq_file *m, const char *prefix,
6238 			       struct binder_stats *stats)
6239 {
6240 	int i;
6241 
6242 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6243 		     ARRAY_SIZE(binder_command_strings));
6244 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6245 		int temp = atomic_read(&stats->bc[i]);
6246 
6247 		if (temp)
6248 			seq_printf(m, "%s%s: %d\n", prefix,
6249 				   binder_command_strings[i], temp);
6250 	}
6251 
6252 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6253 		     ARRAY_SIZE(binder_return_strings));
6254 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6255 		int temp = atomic_read(&stats->br[i]);
6256 
6257 		if (temp)
6258 			seq_printf(m, "%s%s: %d\n", prefix,
6259 				   binder_return_strings[i], temp);
6260 	}
6261 
6262 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6263 		     ARRAY_SIZE(binder_objstat_strings));
6264 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6265 		     ARRAY_SIZE(stats->obj_deleted));
6266 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6267 		int created = atomic_read(&stats->obj_created[i]);
6268 		int deleted = atomic_read(&stats->obj_deleted[i]);
6269 
6270 		if (created || deleted)
6271 			seq_printf(m, "%s%s: active %d total %d\n",
6272 				prefix,
6273 				binder_objstat_strings[i],
6274 				created - deleted,
6275 				created);
6276 	}
6277 }
6278 
6279 static void print_binder_proc_stats(struct seq_file *m,
6280 				    struct binder_proc *proc)
6281 {
6282 	struct binder_work *w;
6283 	struct binder_thread *thread;
6284 	struct rb_node *n;
6285 	int count, strong, weak, ready_threads;
6286 	size_t free_async_space =
6287 		binder_alloc_get_free_async_space(&proc->alloc);
6288 
6289 	seq_printf(m, "proc %d\n", proc->pid);
6290 	seq_printf(m, "context %s\n", proc->context->name);
6291 	count = 0;
6292 	ready_threads = 0;
6293 	binder_inner_proc_lock(proc);
6294 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6295 		count++;
6296 
6297 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6298 		ready_threads++;
6299 
6300 	seq_printf(m, "  threads: %d\n", count);
6301 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6302 			"  ready threads %d\n"
6303 			"  free async space %zd\n", proc->requested_threads,
6304 			proc->requested_threads_started, proc->max_threads,
6305 			ready_threads,
6306 			free_async_space);
6307 	count = 0;
6308 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6309 		count++;
6310 	binder_inner_proc_unlock(proc);
6311 	seq_printf(m, "  nodes: %d\n", count);
6312 	count = 0;
6313 	strong = 0;
6314 	weak = 0;
6315 	binder_proc_lock(proc);
6316 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6317 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6318 						  rb_node_desc);
6319 		count++;
6320 		strong += ref->data.strong;
6321 		weak += ref->data.weak;
6322 	}
6323 	binder_proc_unlock(proc);
6324 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6325 
6326 	count = binder_alloc_get_allocated_count(&proc->alloc);
6327 	seq_printf(m, "  buffers: %d\n", count);
6328 
6329 	binder_alloc_print_pages(m, &proc->alloc);
6330 
6331 	count = 0;
6332 	binder_inner_proc_lock(proc);
6333 	list_for_each_entry(w, &proc->todo, entry) {
6334 		if (w->type == BINDER_WORK_TRANSACTION)
6335 			count++;
6336 	}
6337 	binder_inner_proc_unlock(proc);
6338 	seq_printf(m, "  pending transactions: %d\n", count);
6339 
6340 	print_binder_stats(m, "  ", &proc->stats);
6341 }
6342 
6343 static int state_show(struct seq_file *m, void *unused)
6344 {
6345 	struct binder_proc *proc;
6346 	struct binder_node *node;
6347 	struct binder_node *last_node = NULL;
6348 
6349 	seq_puts(m, "binder state:\n");
6350 
6351 	spin_lock(&binder_dead_nodes_lock);
6352 	if (!hlist_empty(&binder_dead_nodes))
6353 		seq_puts(m, "dead nodes:\n");
6354 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6355 		/*
6356 		 * take a temporary reference on the node so it
6357 		 * survives and isn't removed from the list
6358 		 * while we print it.
6359 		 */
6360 		node->tmp_refs++;
6361 		spin_unlock(&binder_dead_nodes_lock);
6362 		if (last_node)
6363 			binder_put_node(last_node);
6364 		binder_node_lock(node);
6365 		print_binder_node_nilocked(m, node);
6366 		binder_node_unlock(node);
6367 		last_node = node;
6368 		spin_lock(&binder_dead_nodes_lock);
6369 	}
6370 	spin_unlock(&binder_dead_nodes_lock);
6371 	if (last_node)
6372 		binder_put_node(last_node);
6373 
6374 	mutex_lock(&binder_procs_lock);
6375 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6376 		print_binder_proc(m, proc, 1);
6377 	mutex_unlock(&binder_procs_lock);
6378 
6379 	return 0;
6380 }
6381 
6382 static int stats_show(struct seq_file *m, void *unused)
6383 {
6384 	struct binder_proc *proc;
6385 
6386 	seq_puts(m, "binder stats:\n");
6387 
6388 	print_binder_stats(m, "", &binder_stats);
6389 
6390 	mutex_lock(&binder_procs_lock);
6391 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6392 		print_binder_proc_stats(m, proc);
6393 	mutex_unlock(&binder_procs_lock);
6394 
6395 	return 0;
6396 }
6397 
6398 static int transactions_show(struct seq_file *m, void *unused)
6399 {
6400 	struct binder_proc *proc;
6401 
6402 	seq_puts(m, "binder transactions:\n");
6403 	mutex_lock(&binder_procs_lock);
6404 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6405 		print_binder_proc(m, proc, 0);
6406 	mutex_unlock(&binder_procs_lock);
6407 
6408 	return 0;
6409 }
6410 
6411 static int proc_show(struct seq_file *m, void *unused)
6412 {
6413 	struct binder_proc *itr;
6414 	int pid = (unsigned long)m->private;
6415 
6416 	mutex_lock(&binder_procs_lock);
6417 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6418 		if (itr->pid == pid) {
6419 			seq_puts(m, "binder proc state:\n");
6420 			print_binder_proc(m, itr, 1);
6421 		}
6422 	}
6423 	mutex_unlock(&binder_procs_lock);
6424 
6425 	return 0;
6426 }
6427 
6428 static void print_binder_transaction_log_entry(struct seq_file *m,
6429 					struct binder_transaction_log_entry *e)
6430 {
6431 	int debug_id = READ_ONCE(e->debug_id_done);
6432 	/*
6433 	 * read barrier to guarantee debug_id_done read before
6434 	 * we print the log values
6435 	 */
6436 	smp_rmb();
6437 	seq_printf(m,
6438 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6439 		   e->debug_id, (e->call_type == 2) ? "reply" :
6440 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6441 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6442 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6443 		   e->return_error, e->return_error_param,
6444 		   e->return_error_line);
6445 	/*
6446 	 * read-barrier to guarantee read of debug_id_done after
6447 	 * done printing the fields of the entry
6448 	 */
6449 	smp_rmb();
6450 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6451 			"\n" : " (incomplete)\n");
6452 }
6453 
6454 static int transaction_log_show(struct seq_file *m, void *unused)
6455 {
6456 	struct binder_transaction_log *log = m->private;
6457 	unsigned int log_cur = atomic_read(&log->cur);
6458 	unsigned int count;
6459 	unsigned int cur;
6460 	int i;
6461 
6462 	count = log_cur + 1;
6463 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6464 		0 : count % ARRAY_SIZE(log->entry);
6465 	if (count > ARRAY_SIZE(log->entry) || log->full)
6466 		count = ARRAY_SIZE(log->entry);
6467 	for (i = 0; i < count; i++) {
6468 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6469 
6470 		print_binder_transaction_log_entry(m, &log->entry[index]);
6471 	}
6472 	return 0;
6473 }
6474 
6475 const struct file_operations binder_fops = {
6476 	.owner = THIS_MODULE,
6477 	.poll = binder_poll,
6478 	.unlocked_ioctl = binder_ioctl,
6479 	.compat_ioctl = compat_ptr_ioctl,
6480 	.mmap = binder_mmap,
6481 	.open = binder_open,
6482 	.flush = binder_flush,
6483 	.release = binder_release,
6484 };
6485 
6486 DEFINE_SHOW_ATTRIBUTE(state);
6487 DEFINE_SHOW_ATTRIBUTE(stats);
6488 DEFINE_SHOW_ATTRIBUTE(transactions);
6489 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6490 
6491 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6492 	{
6493 		.name = "state",
6494 		.mode = 0444,
6495 		.fops = &state_fops,
6496 		.data = NULL,
6497 	},
6498 	{
6499 		.name = "stats",
6500 		.mode = 0444,
6501 		.fops = &stats_fops,
6502 		.data = NULL,
6503 	},
6504 	{
6505 		.name = "transactions",
6506 		.mode = 0444,
6507 		.fops = &transactions_fops,
6508 		.data = NULL,
6509 	},
6510 	{
6511 		.name = "transaction_log",
6512 		.mode = 0444,
6513 		.fops = &transaction_log_fops,
6514 		.data = &binder_transaction_log,
6515 	},
6516 	{
6517 		.name = "failed_transaction_log",
6518 		.mode = 0444,
6519 		.fops = &transaction_log_fops,
6520 		.data = &binder_transaction_log_failed,
6521 	},
6522 	{} /* terminator */
6523 };
6524 
6525 static int __init init_binder_device(const char *name)
6526 {
6527 	int ret;
6528 	struct binder_device *binder_device;
6529 
6530 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6531 	if (!binder_device)
6532 		return -ENOMEM;
6533 
6534 	binder_device->miscdev.fops = &binder_fops;
6535 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6536 	binder_device->miscdev.name = name;
6537 
6538 	refcount_set(&binder_device->ref, 1);
6539 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6540 	binder_device->context.name = name;
6541 	mutex_init(&binder_device->context.context_mgr_node_lock);
6542 
6543 	ret = misc_register(&binder_device->miscdev);
6544 	if (ret < 0) {
6545 		kfree(binder_device);
6546 		return ret;
6547 	}
6548 
6549 	hlist_add_head(&binder_device->hlist, &binder_devices);
6550 
6551 	return ret;
6552 }
6553 
6554 static int __init binder_init(void)
6555 {
6556 	int ret;
6557 	char *device_name, *device_tmp;
6558 	struct binder_device *device;
6559 	struct hlist_node *tmp;
6560 	char *device_names = NULL;
6561 	const struct binder_debugfs_entry *db_entry;
6562 
6563 	ret = binder_alloc_shrinker_init();
6564 	if (ret)
6565 		return ret;
6566 
6567 	atomic_set(&binder_transaction_log.cur, ~0U);
6568 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6569 
6570 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6571 
6572 	binder_for_each_debugfs_entry(db_entry)
6573 		debugfs_create_file(db_entry->name,
6574 					db_entry->mode,
6575 					binder_debugfs_dir_entry_root,
6576 					db_entry->data,
6577 					db_entry->fops);
6578 
6579 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6580 						binder_debugfs_dir_entry_root);
6581 
6582 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6583 	    strcmp(binder_devices_param, "") != 0) {
6584 		/*
6585 		* Copy the module_parameter string, because we don't want to
6586 		* tokenize it in-place.
6587 		 */
6588 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6589 		if (!device_names) {
6590 			ret = -ENOMEM;
6591 			goto err_alloc_device_names_failed;
6592 		}
6593 
6594 		device_tmp = device_names;
6595 		while ((device_name = strsep(&device_tmp, ","))) {
6596 			ret = init_binder_device(device_name);
6597 			if (ret)
6598 				goto err_init_binder_device_failed;
6599 		}
6600 	}
6601 
6602 	ret = init_binderfs();
6603 	if (ret)
6604 		goto err_init_binder_device_failed;
6605 
6606 	return ret;
6607 
6608 err_init_binder_device_failed:
6609 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6610 		misc_deregister(&device->miscdev);
6611 		hlist_del(&device->hlist);
6612 		kfree(device);
6613 	}
6614 
6615 	kfree(device_names);
6616 
6617 err_alloc_device_names_failed:
6618 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6619 	binder_alloc_shrinker_exit();
6620 
6621 	return ret;
6622 }
6623 
6624 device_initcall(binder_init);
6625 
6626 #define CREATE_TRACE_POINTS
6627 #include "binder_trace.h"
6628 
6629 MODULE_LICENSE("GPL v2");
6630