xref: /linux/drivers/android/binder.c (revision 4a6e2325afc980920b48d5337a5fd3d1649b0aff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <uapi/linux/android/binder.h>
72 
73 #include <linux/cacheflush.h>
74 
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77 
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80 
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84 
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91 
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94 
95 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
96 
97 enum {
98 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
99 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
100 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
101 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
102 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
103 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
104 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
105 	BINDER_DEBUG_USER_REFS              = 1U << 7,
106 	BINDER_DEBUG_THREADS                = 1U << 8,
107 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
108 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
109 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
110 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
111 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
112 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
113 };
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
120 
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123 
124 static int binder_set_stop_on_user_error(const char *val,
125 					 const struct kernel_param *kp)
126 {
127 	int ret;
128 
129 	ret = param_set_int(val, kp);
130 	if (binder_stop_on_user_error < 2)
131 		wake_up(&binder_user_error_wait);
132 	return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 	param_get_int, &binder_stop_on_user_error, 0644);
136 
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 {
139 	struct va_format vaf;
140 	va_list args;
141 
142 	if (binder_debug_mask & mask) {
143 		va_start(args, format);
144 		vaf.va = &args;
145 		vaf.fmt = format;
146 		pr_info_ratelimited("%pV", &vaf);
147 		va_end(args);
148 	}
149 }
150 
151 #define binder_txn_error(x...) \
152 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153 
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 {
156 	struct va_format vaf;
157 	va_list args;
158 
159 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 		va_start(args, format);
161 		vaf.va = &args;
162 		vaf.fmt = format;
163 		pr_info_ratelimited("%pV", &vaf);
164 		va_end(args);
165 	}
166 
167 	if (binder_stop_on_user_error)
168 		binder_stop_on_user_error = 2;
169 }
170 
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172 	do { \
173 		(ee)->id = _id; \
174 		(ee)->command = _command; \
175 		(ee)->param = _param; \
176 	} while (0)
177 
178 #define to_flat_binder_object(hdr) \
179 	container_of(hdr, struct flat_binder_object, hdr)
180 
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182 
183 #define to_binder_buffer_object(hdr) \
184 	container_of(hdr, struct binder_buffer_object, hdr)
185 
186 #define to_binder_fd_array_object(hdr) \
187 	container_of(hdr, struct binder_fd_array_object, hdr)
188 
189 static struct binder_stats binder_stats;
190 
191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193 	atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195 
196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_created[type]);
199 }
200 
201 struct binder_transaction_log_entry {
202 	int debug_id;
203 	int debug_id_done;
204 	int call_type;
205 	int from_proc;
206 	int from_thread;
207 	int target_handle;
208 	int to_proc;
209 	int to_thread;
210 	int to_node;
211 	int data_size;
212 	int offsets_size;
213 	int return_error_line;
214 	uint32_t return_error;
215 	uint32_t return_error_param;
216 	char context_name[BINDERFS_MAX_NAME + 1];
217 };
218 
219 struct binder_transaction_log {
220 	atomic_t cur;
221 	bool full;
222 	struct binder_transaction_log_entry entry[32];
223 };
224 
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
227 
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 	struct binder_transaction_log *log)
230 {
231 	struct binder_transaction_log_entry *e;
232 	unsigned int cur = atomic_inc_return(&log->cur);
233 
234 	if (cur >= ARRAY_SIZE(log->entry))
235 		log->full = true;
236 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 	WRITE_ONCE(e->debug_id_done, 0);
238 	/*
239 	 * write-barrier to synchronize access to e->debug_id_done.
240 	 * We make sure the initialized 0 value is seen before
241 	 * memset() other fields are zeroed by memset.
242 	 */
243 	smp_wmb();
244 	memset(e, 0, sizeof(*e));
245 	return e;
246 }
247 
248 enum binder_deferred_state {
249 	BINDER_DEFERRED_FLUSH        = 0x01,
250 	BINDER_DEFERRED_RELEASE      = 0x02,
251 };
252 
253 enum {
254 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
255 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
256 	BINDER_LOOPER_STATE_EXITED      = 0x04,
257 	BINDER_LOOPER_STATE_INVALID     = 0x08,
258 	BINDER_LOOPER_STATE_WAITING     = 0x10,
259 	BINDER_LOOPER_STATE_POLL        = 0x20,
260 };
261 
262 /**
263  * binder_proc_lock() - Acquire outer lock for given binder_proc
264  * @proc:         struct binder_proc to acquire
265  *
266  * Acquires proc->outer_lock. Used to protect binder_ref
267  * structures associated with the given proc.
268  */
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
271 _binder_proc_lock(struct binder_proc *proc, int line)
272 	__acquires(&proc->outer_lock)
273 {
274 	binder_debug(BINDER_DEBUG_SPINLOCKS,
275 		     "%s: line=%d\n", __func__, line);
276 	spin_lock(&proc->outer_lock);
277 }
278 
279 /**
280  * binder_proc_unlock() - Release outer lock for given binder_proc
281  * @proc:                struct binder_proc to acquire
282  *
283  * Release lock acquired via binder_proc_lock()
284  */
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 	__releases(&proc->outer_lock)
289 {
290 	binder_debug(BINDER_DEBUG_SPINLOCKS,
291 		     "%s: line=%d\n", __func__, line);
292 	spin_unlock(&proc->outer_lock);
293 }
294 
295 /**
296  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297  * @proc:         struct binder_proc to acquire
298  *
299  * Acquires proc->inner_lock. Used to protect todo lists
300  */
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 	__acquires(&proc->inner_lock)
305 {
306 	binder_debug(BINDER_DEBUG_SPINLOCKS,
307 		     "%s: line=%d\n", __func__, line);
308 	spin_lock(&proc->inner_lock);
309 }
310 
311 /**
312  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313  * @proc:         struct binder_proc to acquire
314  *
315  * Release lock acquired via binder_inner_proc_lock()
316  */
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 	__releases(&proc->inner_lock)
321 {
322 	binder_debug(BINDER_DEBUG_SPINLOCKS,
323 		     "%s: line=%d\n", __func__, line);
324 	spin_unlock(&proc->inner_lock);
325 }
326 
327 /**
328  * binder_node_lock() - Acquire spinlock for given binder_node
329  * @node:         struct binder_node to acquire
330  *
331  * Acquires node->lock. Used to protect binder_node fields
332  */
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
335 _binder_node_lock(struct binder_node *node, int line)
336 	__acquires(&node->lock)
337 {
338 	binder_debug(BINDER_DEBUG_SPINLOCKS,
339 		     "%s: line=%d\n", __func__, line);
340 	spin_lock(&node->lock);
341 }
342 
343 /**
344  * binder_node_unlock() - Release spinlock for given binder_proc
345  * @node:         struct binder_node to acquire
346  *
347  * Release lock acquired via binder_node_lock()
348  */
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
351 _binder_node_unlock(struct binder_node *node, int line)
352 	__releases(&node->lock)
353 {
354 	binder_debug(BINDER_DEBUG_SPINLOCKS,
355 		     "%s: line=%d\n", __func__, line);
356 	spin_unlock(&node->lock);
357 }
358 
359 /**
360  * binder_node_inner_lock() - Acquire node and inner locks
361  * @node:         struct binder_node to acquire
362  *
363  * Acquires node->lock. If node->proc also acquires
364  * proc->inner_lock. Used to protect binder_node fields
365  */
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
368 _binder_node_inner_lock(struct binder_node *node, int line)
369 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 {
371 	binder_debug(BINDER_DEBUG_SPINLOCKS,
372 		     "%s: line=%d\n", __func__, line);
373 	spin_lock(&node->lock);
374 	if (node->proc)
375 		binder_inner_proc_lock(node->proc);
376 	else
377 		/* annotation for sparse */
378 		__acquire(&node->proc->inner_lock);
379 }
380 
381 /**
382  * binder_node_inner_unlock() - Release node and inner locks
383  * @node:         struct binder_node to acquire
384  *
385  * Release lock acquired via binder_node_lock()
386  */
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 	__releases(&node->lock) __releases(&node->proc->inner_lock)
391 {
392 	struct binder_proc *proc = node->proc;
393 
394 	binder_debug(BINDER_DEBUG_SPINLOCKS,
395 		     "%s: line=%d\n", __func__, line);
396 	if (proc)
397 		binder_inner_proc_unlock(proc);
398 	else
399 		/* annotation for sparse */
400 		__release(&node->proc->inner_lock);
401 	spin_unlock(&node->lock);
402 }
403 
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 {
406 	return list_empty(list);
407 }
408 
409 /**
410  * binder_worklist_empty() - Check if no items on the work list
411  * @proc:       binder_proc associated with list
412  * @list:	list to check
413  *
414  * Return: true if there are no items on list, else false
415  */
416 static bool binder_worklist_empty(struct binder_proc *proc,
417 				  struct list_head *list)
418 {
419 	bool ret;
420 
421 	binder_inner_proc_lock(proc);
422 	ret = binder_worklist_empty_ilocked(list);
423 	binder_inner_proc_unlock(proc);
424 	return ret;
425 }
426 
427 /**
428  * binder_enqueue_work_ilocked() - Add an item to the work list
429  * @work:         struct binder_work to add to list
430  * @target_list:  list to add work to
431  *
432  * Adds the work to the specified list. Asserts that work
433  * is not already on a list.
434  *
435  * Requires the proc->inner_lock to be held.
436  */
437 static void
438 binder_enqueue_work_ilocked(struct binder_work *work,
439 			   struct list_head *target_list)
440 {
441 	BUG_ON(target_list == NULL);
442 	BUG_ON(work->entry.next && !list_empty(&work->entry));
443 	list_add_tail(&work->entry, target_list);
444 }
445 
446 /**
447  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448  * @thread:       thread to queue work to
449  * @work:         struct binder_work to add to list
450  *
451  * Adds the work to the todo list of the thread. Doesn't set the process_todo
452  * flag, which means that (if it wasn't already set) the thread will go to
453  * sleep without handling this work when it calls read.
454  *
455  * Requires the proc->inner_lock to be held.
456  */
457 static void
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 					    struct binder_work *work)
460 {
461 	WARN_ON(!list_empty(&thread->waiting_thread_node));
462 	binder_enqueue_work_ilocked(work, &thread->todo);
463 }
464 
465 /**
466  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467  * @thread:       thread to queue work to
468  * @work:         struct binder_work to add to list
469  *
470  * Adds the work to the todo list of the thread, and enables processing
471  * of the todo queue.
472  *
473  * Requires the proc->inner_lock to be held.
474  */
475 static void
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 				   struct binder_work *work)
478 {
479 	WARN_ON(!list_empty(&thread->waiting_thread_node));
480 	binder_enqueue_work_ilocked(work, &thread->todo);
481 
482 	/* (e)poll-based threads require an explicit wakeup signal when
483 	 * queuing their own work; they rely on these events to consume
484 	 * messages without I/O block. Without it, threads risk waiting
485 	 * indefinitely without handling the work.
486 	 */
487 	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 	    thread->pid == current->pid && !thread->process_todo)
489 		wake_up_interruptible_sync(&thread->wait);
490 
491 	thread->process_todo = true;
492 }
493 
494 /**
495  * binder_enqueue_thread_work() - Add an item to the thread work list
496  * @thread:       thread to queue work to
497  * @work:         struct binder_work to add to list
498  *
499  * Adds the work to the todo list of the thread, and enables processing
500  * of the todo queue.
501  */
502 static void
503 binder_enqueue_thread_work(struct binder_thread *thread,
504 			   struct binder_work *work)
505 {
506 	binder_inner_proc_lock(thread->proc);
507 	binder_enqueue_thread_work_ilocked(thread, work);
508 	binder_inner_proc_unlock(thread->proc);
509 }
510 
511 static void
512 binder_dequeue_work_ilocked(struct binder_work *work)
513 {
514 	list_del_init(&work->entry);
515 }
516 
517 /**
518  * binder_dequeue_work() - Removes an item from the work list
519  * @proc:         binder_proc associated with list
520  * @work:         struct binder_work to remove from list
521  *
522  * Removes the specified work item from whatever list it is on.
523  * Can safely be called if work is not on any list.
524  */
525 static void
526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527 {
528 	binder_inner_proc_lock(proc);
529 	binder_dequeue_work_ilocked(work);
530 	binder_inner_proc_unlock(proc);
531 }
532 
533 static struct binder_work *binder_dequeue_work_head_ilocked(
534 					struct list_head *list)
535 {
536 	struct binder_work *w;
537 
538 	w = list_first_entry_or_null(list, struct binder_work, entry);
539 	if (w)
540 		list_del_init(&w->entry);
541 	return w;
542 }
543 
544 static void
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549 
550 static bool binder_has_work_ilocked(struct binder_thread *thread,
551 				    bool do_proc_work)
552 {
553 	return thread->process_todo ||
554 		thread->looper_need_return ||
555 		(do_proc_work &&
556 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
557 }
558 
559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560 {
561 	bool has_work;
562 
563 	binder_inner_proc_lock(thread->proc);
564 	has_work = binder_has_work_ilocked(thread, do_proc_work);
565 	binder_inner_proc_unlock(thread->proc);
566 
567 	return has_work;
568 }
569 
570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571 {
572 	return !thread->transaction_stack &&
573 		binder_worklist_empty_ilocked(&thread->todo);
574 }
575 
576 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
577 					       bool sync)
578 {
579 	struct rb_node *n;
580 	struct binder_thread *thread;
581 
582 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
583 		thread = rb_entry(n, struct binder_thread, rb_node);
584 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
585 		    binder_available_for_proc_work_ilocked(thread)) {
586 			if (sync)
587 				wake_up_interruptible_sync(&thread->wait);
588 			else
589 				wake_up_interruptible(&thread->wait);
590 		}
591 	}
592 }
593 
594 /**
595  * binder_select_thread_ilocked() - selects a thread for doing proc work.
596  * @proc:	process to select a thread from
597  *
598  * Note that calling this function moves the thread off the waiting_threads
599  * list, so it can only be woken up by the caller of this function, or a
600  * signal. Therefore, callers *should* always wake up the thread this function
601  * returns.
602  *
603  * Return:	If there's a thread currently waiting for process work,
604  *		returns that thread. Otherwise returns NULL.
605  */
606 static struct binder_thread *
607 binder_select_thread_ilocked(struct binder_proc *proc)
608 {
609 	struct binder_thread *thread;
610 
611 	assert_spin_locked(&proc->inner_lock);
612 	thread = list_first_entry_or_null(&proc->waiting_threads,
613 					  struct binder_thread,
614 					  waiting_thread_node);
615 
616 	if (thread)
617 		list_del_init(&thread->waiting_thread_node);
618 
619 	return thread;
620 }
621 
622 /**
623  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624  * @proc:	process to wake up a thread in
625  * @thread:	specific thread to wake-up (may be NULL)
626  * @sync:	whether to do a synchronous wake-up
627  *
628  * This function wakes up a thread in the @proc process.
629  * The caller may provide a specific thread to wake-up in
630  * the @thread parameter. If @thread is NULL, this function
631  * will wake up threads that have called poll().
632  *
633  * Note that for this function to work as expected, callers
634  * should first call binder_select_thread() to find a thread
635  * to handle the work (if they don't have a thread already),
636  * and pass the result into the @thread parameter.
637  */
638 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
639 					 struct binder_thread *thread,
640 					 bool sync)
641 {
642 	assert_spin_locked(&proc->inner_lock);
643 
644 	if (thread) {
645 		if (sync)
646 			wake_up_interruptible_sync(&thread->wait);
647 		else
648 			wake_up_interruptible(&thread->wait);
649 		return;
650 	}
651 
652 	/* Didn't find a thread waiting for proc work; this can happen
653 	 * in two scenarios:
654 	 * 1. All threads are busy handling transactions
655 	 *    In that case, one of those threads should call back into
656 	 *    the kernel driver soon and pick up this work.
657 	 * 2. Threads are using the (e)poll interface, in which case
658 	 *    they may be blocked on the waitqueue without having been
659 	 *    added to waiting_threads. For this case, we just iterate
660 	 *    over all threads not handling transaction work, and
661 	 *    wake them all up. We wake all because we don't know whether
662 	 *    a thread that called into (e)poll is handling non-binder
663 	 *    work currently.
664 	 */
665 	binder_wakeup_poll_threads_ilocked(proc, sync);
666 }
667 
668 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
669 {
670 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
671 
672 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
673 }
674 
675 static void binder_set_nice(long nice)
676 {
677 	long min_nice;
678 
679 	if (can_nice(current, nice)) {
680 		set_user_nice(current, nice);
681 		return;
682 	}
683 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
684 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
685 		     "%d: nice value %ld not allowed use %ld instead\n",
686 		      current->pid, nice, min_nice);
687 	set_user_nice(current, min_nice);
688 	if (min_nice <= MAX_NICE)
689 		return;
690 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
691 }
692 
693 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
694 						   binder_uintptr_t ptr)
695 {
696 	struct rb_node *n = proc->nodes.rb_node;
697 	struct binder_node *node;
698 
699 	assert_spin_locked(&proc->inner_lock);
700 
701 	while (n) {
702 		node = rb_entry(n, struct binder_node, rb_node);
703 
704 		if (ptr < node->ptr)
705 			n = n->rb_left;
706 		else if (ptr > node->ptr)
707 			n = n->rb_right;
708 		else {
709 			/*
710 			 * take an implicit weak reference
711 			 * to ensure node stays alive until
712 			 * call to binder_put_node()
713 			 */
714 			binder_inc_node_tmpref_ilocked(node);
715 			return node;
716 		}
717 	}
718 	return NULL;
719 }
720 
721 static struct binder_node *binder_get_node(struct binder_proc *proc,
722 					   binder_uintptr_t ptr)
723 {
724 	struct binder_node *node;
725 
726 	binder_inner_proc_lock(proc);
727 	node = binder_get_node_ilocked(proc, ptr);
728 	binder_inner_proc_unlock(proc);
729 	return node;
730 }
731 
732 static struct binder_node *binder_init_node_ilocked(
733 						struct binder_proc *proc,
734 						struct binder_node *new_node,
735 						struct flat_binder_object *fp)
736 {
737 	struct rb_node **p = &proc->nodes.rb_node;
738 	struct rb_node *parent = NULL;
739 	struct binder_node *node;
740 	binder_uintptr_t ptr = fp ? fp->binder : 0;
741 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
742 	__u32 flags = fp ? fp->flags : 0;
743 
744 	assert_spin_locked(&proc->inner_lock);
745 
746 	while (*p) {
747 
748 		parent = *p;
749 		node = rb_entry(parent, struct binder_node, rb_node);
750 
751 		if (ptr < node->ptr)
752 			p = &(*p)->rb_left;
753 		else if (ptr > node->ptr)
754 			p = &(*p)->rb_right;
755 		else {
756 			/*
757 			 * A matching node is already in
758 			 * the rb tree. Abandon the init
759 			 * and return it.
760 			 */
761 			binder_inc_node_tmpref_ilocked(node);
762 			return node;
763 		}
764 	}
765 	node = new_node;
766 	binder_stats_created(BINDER_STAT_NODE);
767 	node->tmp_refs++;
768 	rb_link_node(&node->rb_node, parent, p);
769 	rb_insert_color(&node->rb_node, &proc->nodes);
770 	node->debug_id = atomic_inc_return(&binder_last_id);
771 	node->proc = proc;
772 	node->ptr = ptr;
773 	node->cookie = cookie;
774 	node->work.type = BINDER_WORK_NODE;
775 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
776 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
777 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
778 	spin_lock_init(&node->lock);
779 	INIT_LIST_HEAD(&node->work.entry);
780 	INIT_LIST_HEAD(&node->async_todo);
781 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
782 		     "%d:%d node %d u%016llx c%016llx created\n",
783 		     proc->pid, current->pid, node->debug_id,
784 		     (u64)node->ptr, (u64)node->cookie);
785 
786 	return node;
787 }
788 
789 static struct binder_node *binder_new_node(struct binder_proc *proc,
790 					   struct flat_binder_object *fp)
791 {
792 	struct binder_node *node;
793 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
794 
795 	if (!new_node)
796 		return NULL;
797 	binder_inner_proc_lock(proc);
798 	node = binder_init_node_ilocked(proc, new_node, fp);
799 	binder_inner_proc_unlock(proc);
800 	if (node != new_node)
801 		/*
802 		 * The node was already added by another thread
803 		 */
804 		kfree(new_node);
805 
806 	return node;
807 }
808 
809 static void binder_free_node(struct binder_node *node)
810 {
811 	kfree(node);
812 	binder_stats_deleted(BINDER_STAT_NODE);
813 }
814 
815 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
816 				    int internal,
817 				    struct list_head *target_list)
818 {
819 	struct binder_proc *proc = node->proc;
820 
821 	assert_spin_locked(&node->lock);
822 	if (proc)
823 		assert_spin_locked(&proc->inner_lock);
824 	if (strong) {
825 		if (internal) {
826 			if (target_list == NULL &&
827 			    node->internal_strong_refs == 0 &&
828 			    !(node->proc &&
829 			      node == node->proc->context->binder_context_mgr_node &&
830 			      node->has_strong_ref)) {
831 				pr_err("invalid inc strong node for %d\n",
832 					node->debug_id);
833 				return -EINVAL;
834 			}
835 			node->internal_strong_refs++;
836 		} else
837 			node->local_strong_refs++;
838 		if (!node->has_strong_ref && target_list) {
839 			struct binder_thread *thread = container_of(target_list,
840 						    struct binder_thread, todo);
841 			binder_dequeue_work_ilocked(&node->work);
842 			BUG_ON(&thread->todo != target_list);
843 			binder_enqueue_deferred_thread_work_ilocked(thread,
844 								   &node->work);
845 		}
846 	} else {
847 		if (!internal)
848 			node->local_weak_refs++;
849 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
850 			if (target_list == NULL) {
851 				pr_err("invalid inc weak node for %d\n",
852 					node->debug_id);
853 				return -EINVAL;
854 			}
855 			/*
856 			 * See comment above
857 			 */
858 			binder_enqueue_work_ilocked(&node->work, target_list);
859 		}
860 	}
861 	return 0;
862 }
863 
864 static int binder_inc_node(struct binder_node *node, int strong, int internal,
865 			   struct list_head *target_list)
866 {
867 	int ret;
868 
869 	binder_node_inner_lock(node);
870 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
871 	binder_node_inner_unlock(node);
872 
873 	return ret;
874 }
875 
876 static bool binder_dec_node_nilocked(struct binder_node *node,
877 				     int strong, int internal)
878 {
879 	struct binder_proc *proc = node->proc;
880 
881 	assert_spin_locked(&node->lock);
882 	if (proc)
883 		assert_spin_locked(&proc->inner_lock);
884 	if (strong) {
885 		if (internal)
886 			node->internal_strong_refs--;
887 		else
888 			node->local_strong_refs--;
889 		if (node->local_strong_refs || node->internal_strong_refs)
890 			return false;
891 	} else {
892 		if (!internal)
893 			node->local_weak_refs--;
894 		if (node->local_weak_refs || node->tmp_refs ||
895 				!hlist_empty(&node->refs))
896 			return false;
897 	}
898 
899 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
900 		if (list_empty(&node->work.entry)) {
901 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
902 			binder_wakeup_proc_ilocked(proc);
903 		}
904 	} else {
905 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
906 		    !node->local_weak_refs && !node->tmp_refs) {
907 			if (proc) {
908 				binder_dequeue_work_ilocked(&node->work);
909 				rb_erase(&node->rb_node, &proc->nodes);
910 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911 					     "refless node %d deleted\n",
912 					     node->debug_id);
913 			} else {
914 				BUG_ON(!list_empty(&node->work.entry));
915 				spin_lock(&binder_dead_nodes_lock);
916 				/*
917 				 * tmp_refs could have changed so
918 				 * check it again
919 				 */
920 				if (node->tmp_refs) {
921 					spin_unlock(&binder_dead_nodes_lock);
922 					return false;
923 				}
924 				hlist_del(&node->dead_node);
925 				spin_unlock(&binder_dead_nodes_lock);
926 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
927 					     "dead node %d deleted\n",
928 					     node->debug_id);
929 			}
930 			return true;
931 		}
932 	}
933 	return false;
934 }
935 
936 static void binder_dec_node(struct binder_node *node, int strong, int internal)
937 {
938 	bool free_node;
939 
940 	binder_node_inner_lock(node);
941 	free_node = binder_dec_node_nilocked(node, strong, internal);
942 	binder_node_inner_unlock(node);
943 	if (free_node)
944 		binder_free_node(node);
945 }
946 
947 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
948 {
949 	/*
950 	 * No call to binder_inc_node() is needed since we
951 	 * don't need to inform userspace of any changes to
952 	 * tmp_refs
953 	 */
954 	node->tmp_refs++;
955 }
956 
957 /**
958  * binder_inc_node_tmpref() - take a temporary reference on node
959  * @node:	node to reference
960  *
961  * Take reference on node to prevent the node from being freed
962  * while referenced only by a local variable. The inner lock is
963  * needed to serialize with the node work on the queue (which
964  * isn't needed after the node is dead). If the node is dead
965  * (node->proc is NULL), use binder_dead_nodes_lock to protect
966  * node->tmp_refs against dead-node-only cases where the node
967  * lock cannot be acquired (eg traversing the dead node list to
968  * print nodes)
969  */
970 static void binder_inc_node_tmpref(struct binder_node *node)
971 {
972 	binder_node_lock(node);
973 	if (node->proc)
974 		binder_inner_proc_lock(node->proc);
975 	else
976 		spin_lock(&binder_dead_nodes_lock);
977 	binder_inc_node_tmpref_ilocked(node);
978 	if (node->proc)
979 		binder_inner_proc_unlock(node->proc);
980 	else
981 		spin_unlock(&binder_dead_nodes_lock);
982 	binder_node_unlock(node);
983 }
984 
985 /**
986  * binder_dec_node_tmpref() - remove a temporary reference on node
987  * @node:	node to reference
988  *
989  * Release temporary reference on node taken via binder_inc_node_tmpref()
990  */
991 static void binder_dec_node_tmpref(struct binder_node *node)
992 {
993 	bool free_node;
994 
995 	binder_node_inner_lock(node);
996 	if (!node->proc)
997 		spin_lock(&binder_dead_nodes_lock);
998 	else
999 		__acquire(&binder_dead_nodes_lock);
1000 	node->tmp_refs--;
1001 	BUG_ON(node->tmp_refs < 0);
1002 	if (!node->proc)
1003 		spin_unlock(&binder_dead_nodes_lock);
1004 	else
1005 		__release(&binder_dead_nodes_lock);
1006 	/*
1007 	 * Call binder_dec_node() to check if all refcounts are 0
1008 	 * and cleanup is needed. Calling with strong=0 and internal=1
1009 	 * causes no actual reference to be released in binder_dec_node().
1010 	 * If that changes, a change is needed here too.
1011 	 */
1012 	free_node = binder_dec_node_nilocked(node, 0, 1);
1013 	binder_node_inner_unlock(node);
1014 	if (free_node)
1015 		binder_free_node(node);
1016 }
1017 
1018 static void binder_put_node(struct binder_node *node)
1019 {
1020 	binder_dec_node_tmpref(node);
1021 }
1022 
1023 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1024 						 u32 desc, bool need_strong_ref)
1025 {
1026 	struct rb_node *n = proc->refs_by_desc.rb_node;
1027 	struct binder_ref *ref;
1028 
1029 	while (n) {
1030 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1031 
1032 		if (desc < ref->data.desc) {
1033 			n = n->rb_left;
1034 		} else if (desc > ref->data.desc) {
1035 			n = n->rb_right;
1036 		} else if (need_strong_ref && !ref->data.strong) {
1037 			binder_user_error("tried to use weak ref as strong ref\n");
1038 			return NULL;
1039 		} else {
1040 			return ref;
1041 		}
1042 	}
1043 	return NULL;
1044 }
1045 
1046 /* Find the smallest unused descriptor the "slow way" */
1047 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1048 {
1049 	struct binder_ref *ref;
1050 	struct rb_node *n;
1051 	u32 desc;
1052 
1053 	desc = offset;
1054 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1055 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1056 		if (ref->data.desc > desc)
1057 			break;
1058 		desc = ref->data.desc + 1;
1059 	}
1060 
1061 	return desc;
1062 }
1063 
1064 /*
1065  * Find an available reference descriptor ID. The proc->outer_lock might
1066  * be released in the process, in which case -EAGAIN is returned and the
1067  * @desc should be considered invalid.
1068  */
1069 static int get_ref_desc_olocked(struct binder_proc *proc,
1070 				struct binder_node *node,
1071 				u32 *desc)
1072 {
1073 	struct dbitmap *dmap = &proc->dmap;
1074 	unsigned int nbits, offset;
1075 	unsigned long *new, bit;
1076 
1077 	/* 0 is reserved for the context manager */
1078 	offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1079 
1080 	if (!dbitmap_enabled(dmap)) {
1081 		*desc = slow_desc_lookup_olocked(proc, offset);
1082 		return 0;
1083 	}
1084 
1085 	if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1086 		*desc = bit;
1087 		return 0;
1088 	}
1089 
1090 	/*
1091 	 * The dbitmap is full and needs to grow. The proc->outer_lock
1092 	 * is briefly released to allocate the new bitmap safely.
1093 	 */
1094 	nbits = dbitmap_grow_nbits(dmap);
1095 	binder_proc_unlock(proc);
1096 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1097 	binder_proc_lock(proc);
1098 	dbitmap_grow(dmap, new, nbits);
1099 
1100 	return -EAGAIN;
1101 }
1102 
1103 /**
1104  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1105  * @proc:	binder_proc that owns the ref
1106  * @node:	binder_node of target
1107  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1108  *
1109  * Look up the ref for the given node and return it if it exists
1110  *
1111  * If it doesn't exist and the caller provides a newly allocated
1112  * ref, initialize the fields of the newly allocated ref and insert
1113  * into the given proc rb_trees and node refs list.
1114  *
1115  * Return:	the ref for node. It is possible that another thread
1116  *		allocated/initialized the ref first in which case the
1117  *		returned ref would be different than the passed-in
1118  *		new_ref. new_ref must be kfree'd by the caller in
1119  *		this case.
1120  */
1121 static struct binder_ref *binder_get_ref_for_node_olocked(
1122 					struct binder_proc *proc,
1123 					struct binder_node *node,
1124 					struct binder_ref *new_ref)
1125 {
1126 	struct binder_ref *ref;
1127 	struct rb_node *parent;
1128 	struct rb_node **p;
1129 	u32 desc;
1130 
1131 retry:
1132 	p = &proc->refs_by_node.rb_node;
1133 	parent = NULL;
1134 	while (*p) {
1135 		parent = *p;
1136 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1137 
1138 		if (node < ref->node)
1139 			p = &(*p)->rb_left;
1140 		else if (node > ref->node)
1141 			p = &(*p)->rb_right;
1142 		else
1143 			return ref;
1144 	}
1145 	if (!new_ref)
1146 		return NULL;
1147 
1148 	/* might release the proc->outer_lock */
1149 	if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1150 		goto retry;
1151 
1152 	binder_stats_created(BINDER_STAT_REF);
1153 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1154 	new_ref->proc = proc;
1155 	new_ref->node = node;
1156 	rb_link_node(&new_ref->rb_node_node, parent, p);
1157 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1158 
1159 	new_ref->data.desc = desc;
1160 	p = &proc->refs_by_desc.rb_node;
1161 	while (*p) {
1162 		parent = *p;
1163 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1164 
1165 		if (new_ref->data.desc < ref->data.desc)
1166 			p = &(*p)->rb_left;
1167 		else if (new_ref->data.desc > ref->data.desc)
1168 			p = &(*p)->rb_right;
1169 		else
1170 			BUG();
1171 	}
1172 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1173 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1174 
1175 	binder_node_lock(node);
1176 	hlist_add_head(&new_ref->node_entry, &node->refs);
1177 
1178 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1179 		     "%d new ref %d desc %d for node %d\n",
1180 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1181 		      node->debug_id);
1182 	binder_node_unlock(node);
1183 	return new_ref;
1184 }
1185 
1186 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1187 {
1188 	struct dbitmap *dmap = &ref->proc->dmap;
1189 	bool delete_node = false;
1190 
1191 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1192 		     "%d delete ref %d desc %d for node %d\n",
1193 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1194 		      ref->node->debug_id);
1195 
1196 	if (dbitmap_enabled(dmap))
1197 		dbitmap_clear_bit(dmap, ref->data.desc);
1198 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1199 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1200 
1201 	binder_node_inner_lock(ref->node);
1202 	if (ref->data.strong)
1203 		binder_dec_node_nilocked(ref->node, 1, 1);
1204 
1205 	hlist_del(&ref->node_entry);
1206 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1207 	binder_node_inner_unlock(ref->node);
1208 	/*
1209 	 * Clear ref->node unless we want the caller to free the node
1210 	 */
1211 	if (!delete_node) {
1212 		/*
1213 		 * The caller uses ref->node to determine
1214 		 * whether the node needs to be freed. Clear
1215 		 * it since the node is still alive.
1216 		 */
1217 		ref->node = NULL;
1218 	}
1219 
1220 	if (ref->death) {
1221 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1222 			     "%d delete ref %d desc %d has death notification\n",
1223 			      ref->proc->pid, ref->data.debug_id,
1224 			      ref->data.desc);
1225 		binder_dequeue_work(ref->proc, &ref->death->work);
1226 		binder_stats_deleted(BINDER_STAT_DEATH);
1227 	}
1228 
1229 	if (ref->freeze) {
1230 		binder_dequeue_work(ref->proc, &ref->freeze->work);
1231 		binder_stats_deleted(BINDER_STAT_FREEZE);
1232 	}
1233 
1234 	binder_stats_deleted(BINDER_STAT_REF);
1235 }
1236 
1237 /**
1238  * binder_inc_ref_olocked() - increment the ref for given handle
1239  * @ref:         ref to be incremented
1240  * @strong:      if true, strong increment, else weak
1241  * @target_list: list to queue node work on
1242  *
1243  * Increment the ref. @ref->proc->outer_lock must be held on entry
1244  *
1245  * Return: 0, if successful, else errno
1246  */
1247 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1248 				  struct list_head *target_list)
1249 {
1250 	int ret;
1251 
1252 	if (strong) {
1253 		if (ref->data.strong == 0) {
1254 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1255 			if (ret)
1256 				return ret;
1257 		}
1258 		ref->data.strong++;
1259 	} else {
1260 		if (ref->data.weak == 0) {
1261 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1262 			if (ret)
1263 				return ret;
1264 		}
1265 		ref->data.weak++;
1266 	}
1267 	return 0;
1268 }
1269 
1270 /**
1271  * binder_dec_ref_olocked() - dec the ref for given handle
1272  * @ref:	ref to be decremented
1273  * @strong:	if true, strong decrement, else weak
1274  *
1275  * Decrement the ref.
1276  *
1277  * Return: %true if ref is cleaned up and ready to be freed.
1278  */
1279 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1280 {
1281 	if (strong) {
1282 		if (ref->data.strong == 0) {
1283 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1284 					  ref->proc->pid, ref->data.debug_id,
1285 					  ref->data.desc, ref->data.strong,
1286 					  ref->data.weak);
1287 			return false;
1288 		}
1289 		ref->data.strong--;
1290 		if (ref->data.strong == 0)
1291 			binder_dec_node(ref->node, strong, 1);
1292 	} else {
1293 		if (ref->data.weak == 0) {
1294 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1295 					  ref->proc->pid, ref->data.debug_id,
1296 					  ref->data.desc, ref->data.strong,
1297 					  ref->data.weak);
1298 			return false;
1299 		}
1300 		ref->data.weak--;
1301 	}
1302 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1303 		binder_cleanup_ref_olocked(ref);
1304 		return true;
1305 	}
1306 	return false;
1307 }
1308 
1309 /**
1310  * binder_get_node_from_ref() - get the node from the given proc/desc
1311  * @proc:	proc containing the ref
1312  * @desc:	the handle associated with the ref
1313  * @need_strong_ref: if true, only return node if ref is strong
1314  * @rdata:	the id/refcount data for the ref
1315  *
1316  * Given a proc and ref handle, return the associated binder_node
1317  *
1318  * Return: a binder_node or NULL if not found or not strong when strong required
1319  */
1320 static struct binder_node *binder_get_node_from_ref(
1321 		struct binder_proc *proc,
1322 		u32 desc, bool need_strong_ref,
1323 		struct binder_ref_data *rdata)
1324 {
1325 	struct binder_node *node;
1326 	struct binder_ref *ref;
1327 
1328 	binder_proc_lock(proc);
1329 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1330 	if (!ref)
1331 		goto err_no_ref;
1332 	node = ref->node;
1333 	/*
1334 	 * Take an implicit reference on the node to ensure
1335 	 * it stays alive until the call to binder_put_node()
1336 	 */
1337 	binder_inc_node_tmpref(node);
1338 	if (rdata)
1339 		*rdata = ref->data;
1340 	binder_proc_unlock(proc);
1341 
1342 	return node;
1343 
1344 err_no_ref:
1345 	binder_proc_unlock(proc);
1346 	return NULL;
1347 }
1348 
1349 /**
1350  * binder_free_ref() - free the binder_ref
1351  * @ref:	ref to free
1352  *
1353  * Free the binder_ref. Free the binder_node indicated by ref->node
1354  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1355  */
1356 static void binder_free_ref(struct binder_ref *ref)
1357 {
1358 	if (ref->node)
1359 		binder_free_node(ref->node);
1360 	kfree(ref->death);
1361 	kfree(ref->freeze);
1362 	kfree(ref);
1363 }
1364 
1365 /* shrink descriptor bitmap if needed */
1366 static void try_shrink_dmap(struct binder_proc *proc)
1367 {
1368 	unsigned long *new;
1369 	int nbits;
1370 
1371 	binder_proc_lock(proc);
1372 	nbits = dbitmap_shrink_nbits(&proc->dmap);
1373 	binder_proc_unlock(proc);
1374 
1375 	if (!nbits)
1376 		return;
1377 
1378 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1379 	binder_proc_lock(proc);
1380 	dbitmap_shrink(&proc->dmap, new, nbits);
1381 	binder_proc_unlock(proc);
1382 }
1383 
1384 /**
1385  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1386  * @proc:	proc containing the ref
1387  * @desc:	the handle associated with the ref
1388  * @increment:	true=inc reference, false=dec reference
1389  * @strong:	true=strong reference, false=weak reference
1390  * @rdata:	the id/refcount data for the ref
1391  *
1392  * Given a proc and ref handle, increment or decrement the ref
1393  * according to "increment" arg.
1394  *
1395  * Return: 0 if successful, else errno
1396  */
1397 static int binder_update_ref_for_handle(struct binder_proc *proc,
1398 		uint32_t desc, bool increment, bool strong,
1399 		struct binder_ref_data *rdata)
1400 {
1401 	int ret = 0;
1402 	struct binder_ref *ref;
1403 	bool delete_ref = false;
1404 
1405 	binder_proc_lock(proc);
1406 	ref = binder_get_ref_olocked(proc, desc, strong);
1407 	if (!ref) {
1408 		ret = -EINVAL;
1409 		goto err_no_ref;
1410 	}
1411 	if (increment)
1412 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1413 	else
1414 		delete_ref = binder_dec_ref_olocked(ref, strong);
1415 
1416 	if (rdata)
1417 		*rdata = ref->data;
1418 	binder_proc_unlock(proc);
1419 
1420 	if (delete_ref) {
1421 		binder_free_ref(ref);
1422 		try_shrink_dmap(proc);
1423 	}
1424 	return ret;
1425 
1426 err_no_ref:
1427 	binder_proc_unlock(proc);
1428 	return ret;
1429 }
1430 
1431 /**
1432  * binder_dec_ref_for_handle() - dec the ref for given handle
1433  * @proc:	proc containing the ref
1434  * @desc:	the handle associated with the ref
1435  * @strong:	true=strong reference, false=weak reference
1436  * @rdata:	the id/refcount data for the ref
1437  *
1438  * Just calls binder_update_ref_for_handle() to decrement the ref.
1439  *
1440  * Return: 0 if successful, else errno
1441  */
1442 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1443 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1444 {
1445 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1446 }
1447 
1448 
1449 /**
1450  * binder_inc_ref_for_node() - increment the ref for given proc/node
1451  * @proc:	 proc containing the ref
1452  * @node:	 target node
1453  * @strong:	 true=strong reference, false=weak reference
1454  * @target_list: worklist to use if node is incremented
1455  * @rdata:	 the id/refcount data for the ref
1456  *
1457  * Given a proc and node, increment the ref. Create the ref if it
1458  * doesn't already exist
1459  *
1460  * Return: 0 if successful, else errno
1461  */
1462 static int binder_inc_ref_for_node(struct binder_proc *proc,
1463 			struct binder_node *node,
1464 			bool strong,
1465 			struct list_head *target_list,
1466 			struct binder_ref_data *rdata)
1467 {
1468 	struct binder_ref *ref;
1469 	struct binder_ref *new_ref = NULL;
1470 	int ret = 0;
1471 
1472 	binder_proc_lock(proc);
1473 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1474 	if (!ref) {
1475 		binder_proc_unlock(proc);
1476 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1477 		if (!new_ref)
1478 			return -ENOMEM;
1479 		binder_proc_lock(proc);
1480 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1481 	}
1482 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1483 	*rdata = ref->data;
1484 	if (ret && ref == new_ref) {
1485 		/*
1486 		 * Cleanup the failed reference here as the target
1487 		 * could now be dead and have already released its
1488 		 * references by now. Calling on the new reference
1489 		 * with strong=0 and a tmp_refs will not decrement
1490 		 * the node. The new_ref gets kfree'd below.
1491 		 */
1492 		binder_cleanup_ref_olocked(new_ref);
1493 		ref = NULL;
1494 	}
1495 
1496 	binder_proc_unlock(proc);
1497 	if (new_ref && ref != new_ref)
1498 		/*
1499 		 * Another thread created the ref first so
1500 		 * free the one we allocated
1501 		 */
1502 		kfree(new_ref);
1503 	return ret;
1504 }
1505 
1506 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1507 					   struct binder_transaction *t)
1508 {
1509 	BUG_ON(!target_thread);
1510 	assert_spin_locked(&target_thread->proc->inner_lock);
1511 	BUG_ON(target_thread->transaction_stack != t);
1512 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1513 	target_thread->transaction_stack =
1514 		target_thread->transaction_stack->from_parent;
1515 	t->from = NULL;
1516 }
1517 
1518 /**
1519  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1520  * @thread:	thread to decrement
1521  *
1522  * A thread needs to be kept alive while being used to create or
1523  * handle a transaction. binder_get_txn_from() is used to safely
1524  * extract t->from from a binder_transaction and keep the thread
1525  * indicated by t->from from being freed. When done with that
1526  * binder_thread, this function is called to decrement the
1527  * tmp_ref and free if appropriate (thread has been released
1528  * and no transaction being processed by the driver)
1529  */
1530 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1531 {
1532 	/*
1533 	 * atomic is used to protect the counter value while
1534 	 * it cannot reach zero or thread->is_dead is false
1535 	 */
1536 	binder_inner_proc_lock(thread->proc);
1537 	atomic_dec(&thread->tmp_ref);
1538 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1539 		binder_inner_proc_unlock(thread->proc);
1540 		binder_free_thread(thread);
1541 		return;
1542 	}
1543 	binder_inner_proc_unlock(thread->proc);
1544 }
1545 
1546 /**
1547  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1548  * @proc:	proc to decrement
1549  *
1550  * A binder_proc needs to be kept alive while being used to create or
1551  * handle a transaction. proc->tmp_ref is incremented when
1552  * creating a new transaction or the binder_proc is currently in-use
1553  * by threads that are being released. When done with the binder_proc,
1554  * this function is called to decrement the counter and free the
1555  * proc if appropriate (proc has been released, all threads have
1556  * been released and not currently in-use to process a transaction).
1557  */
1558 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1559 {
1560 	binder_inner_proc_lock(proc);
1561 	proc->tmp_ref--;
1562 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1563 			!proc->tmp_ref) {
1564 		binder_inner_proc_unlock(proc);
1565 		binder_free_proc(proc);
1566 		return;
1567 	}
1568 	binder_inner_proc_unlock(proc);
1569 }
1570 
1571 /**
1572  * binder_get_txn_from() - safely extract the "from" thread in transaction
1573  * @t:	binder transaction for t->from
1574  *
1575  * Atomically return the "from" thread and increment the tmp_ref
1576  * count for the thread to ensure it stays alive until
1577  * binder_thread_dec_tmpref() is called.
1578  *
1579  * Return: the value of t->from
1580  */
1581 static struct binder_thread *binder_get_txn_from(
1582 		struct binder_transaction *t)
1583 {
1584 	struct binder_thread *from;
1585 
1586 	spin_lock(&t->lock);
1587 	from = t->from;
1588 	if (from)
1589 		atomic_inc(&from->tmp_ref);
1590 	spin_unlock(&t->lock);
1591 	return from;
1592 }
1593 
1594 /**
1595  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1596  * @t:	binder transaction for t->from
1597  *
1598  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1599  * to guarantee that the thread cannot be released while operating on it.
1600  * The caller must call binder_inner_proc_unlock() to release the inner lock
1601  * as well as call binder_dec_thread_txn() to release the reference.
1602  *
1603  * Return: the value of t->from
1604  */
1605 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1606 		struct binder_transaction *t)
1607 	__acquires(&t->from->proc->inner_lock)
1608 {
1609 	struct binder_thread *from;
1610 
1611 	from = binder_get_txn_from(t);
1612 	if (!from) {
1613 		__acquire(&from->proc->inner_lock);
1614 		return NULL;
1615 	}
1616 	binder_inner_proc_lock(from->proc);
1617 	if (t->from) {
1618 		BUG_ON(from != t->from);
1619 		return from;
1620 	}
1621 	binder_inner_proc_unlock(from->proc);
1622 	__acquire(&from->proc->inner_lock);
1623 	binder_thread_dec_tmpref(from);
1624 	return NULL;
1625 }
1626 
1627 /**
1628  * binder_free_txn_fixups() - free unprocessed fd fixups
1629  * @t:	binder transaction for t->from
1630  *
1631  * If the transaction is being torn down prior to being
1632  * processed by the target process, free all of the
1633  * fd fixups and fput the file structs. It is safe to
1634  * call this function after the fixups have been
1635  * processed -- in that case, the list will be empty.
1636  */
1637 static void binder_free_txn_fixups(struct binder_transaction *t)
1638 {
1639 	struct binder_txn_fd_fixup *fixup, *tmp;
1640 
1641 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1642 		fput(fixup->file);
1643 		if (fixup->target_fd >= 0)
1644 			put_unused_fd(fixup->target_fd);
1645 		list_del(&fixup->fixup_entry);
1646 		kfree(fixup);
1647 	}
1648 }
1649 
1650 static void binder_txn_latency_free(struct binder_transaction *t)
1651 {
1652 	int from_proc, from_thread, to_proc, to_thread;
1653 
1654 	spin_lock(&t->lock);
1655 	from_proc = t->from ? t->from->proc->pid : 0;
1656 	from_thread = t->from ? t->from->pid : 0;
1657 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1658 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1659 	spin_unlock(&t->lock);
1660 
1661 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1662 }
1663 
1664 static void binder_free_transaction(struct binder_transaction *t)
1665 {
1666 	struct binder_proc *target_proc = t->to_proc;
1667 
1668 	if (target_proc) {
1669 		binder_inner_proc_lock(target_proc);
1670 		target_proc->outstanding_txns--;
1671 		if (target_proc->outstanding_txns < 0)
1672 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1673 				__func__, target_proc->outstanding_txns);
1674 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1675 			wake_up_interruptible_all(&target_proc->freeze_wait);
1676 		if (t->buffer)
1677 			t->buffer->transaction = NULL;
1678 		binder_inner_proc_unlock(target_proc);
1679 	}
1680 	if (trace_binder_txn_latency_free_enabled())
1681 		binder_txn_latency_free(t);
1682 	/*
1683 	 * If the transaction has no target_proc, then
1684 	 * t->buffer->transaction has already been cleared.
1685 	 */
1686 	binder_free_txn_fixups(t);
1687 	kfree(t);
1688 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1689 }
1690 
1691 static void binder_send_failed_reply(struct binder_transaction *t,
1692 				     uint32_t error_code)
1693 {
1694 	struct binder_thread *target_thread;
1695 	struct binder_transaction *next;
1696 
1697 	BUG_ON(t->flags & TF_ONE_WAY);
1698 	while (1) {
1699 		target_thread = binder_get_txn_from_and_acq_inner(t);
1700 		if (target_thread) {
1701 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1702 				     "send failed reply for transaction %d to %d:%d\n",
1703 				      t->debug_id,
1704 				      target_thread->proc->pid,
1705 				      target_thread->pid);
1706 
1707 			binder_pop_transaction_ilocked(target_thread, t);
1708 			if (target_thread->reply_error.cmd == BR_OK) {
1709 				target_thread->reply_error.cmd = error_code;
1710 				binder_enqueue_thread_work_ilocked(
1711 					target_thread,
1712 					&target_thread->reply_error.work);
1713 				wake_up_interruptible(&target_thread->wait);
1714 			} else {
1715 				/*
1716 				 * Cannot get here for normal operation, but
1717 				 * we can if multiple synchronous transactions
1718 				 * are sent without blocking for responses.
1719 				 * Just ignore the 2nd error in this case.
1720 				 */
1721 				pr_warn("Unexpected reply error: %u\n",
1722 					target_thread->reply_error.cmd);
1723 			}
1724 			binder_inner_proc_unlock(target_thread->proc);
1725 			binder_thread_dec_tmpref(target_thread);
1726 			binder_free_transaction(t);
1727 			return;
1728 		}
1729 		__release(&target_thread->proc->inner_lock);
1730 		next = t->from_parent;
1731 
1732 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1733 			     "send failed reply for transaction %d, target dead\n",
1734 			     t->debug_id);
1735 
1736 		binder_free_transaction(t);
1737 		if (next == NULL) {
1738 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1739 				     "reply failed, no target thread at root\n");
1740 			return;
1741 		}
1742 		t = next;
1743 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1744 			     "reply failed, no target thread -- retry %d\n",
1745 			      t->debug_id);
1746 	}
1747 }
1748 
1749 /**
1750  * binder_cleanup_transaction() - cleans up undelivered transaction
1751  * @t:		transaction that needs to be cleaned up
1752  * @reason:	reason the transaction wasn't delivered
1753  * @error_code:	error to return to caller (if synchronous call)
1754  */
1755 static void binder_cleanup_transaction(struct binder_transaction *t,
1756 				       const char *reason,
1757 				       uint32_t error_code)
1758 {
1759 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1760 		binder_send_failed_reply(t, error_code);
1761 	} else {
1762 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1763 			"undelivered transaction %d, %s\n",
1764 			t->debug_id, reason);
1765 		binder_free_transaction(t);
1766 	}
1767 }
1768 
1769 /**
1770  * binder_get_object() - gets object and checks for valid metadata
1771  * @proc:	binder_proc owning the buffer
1772  * @u:		sender's user pointer to base of buffer
1773  * @buffer:	binder_buffer that we're parsing.
1774  * @offset:	offset in the @buffer at which to validate an object.
1775  * @object:	struct binder_object to read into
1776  *
1777  * Copy the binder object at the given offset into @object. If @u is
1778  * provided then the copy is from the sender's buffer. If not, then
1779  * it is copied from the target's @buffer.
1780  *
1781  * Return:	If there's a valid metadata object at @offset, the
1782  *		size of that object. Otherwise, it returns zero. The object
1783  *		is read into the struct binder_object pointed to by @object.
1784  */
1785 static size_t binder_get_object(struct binder_proc *proc,
1786 				const void __user *u,
1787 				struct binder_buffer *buffer,
1788 				unsigned long offset,
1789 				struct binder_object *object)
1790 {
1791 	size_t read_size;
1792 	struct binder_object_header *hdr;
1793 	size_t object_size = 0;
1794 
1795 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1796 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1797 	    !IS_ALIGNED(offset, sizeof(u32)))
1798 		return 0;
1799 
1800 	if (u) {
1801 		if (copy_from_user(object, u + offset, read_size))
1802 			return 0;
1803 	} else {
1804 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1805 						  offset, read_size))
1806 			return 0;
1807 	}
1808 
1809 	/* Ok, now see if we read a complete object. */
1810 	hdr = &object->hdr;
1811 	switch (hdr->type) {
1812 	case BINDER_TYPE_BINDER:
1813 	case BINDER_TYPE_WEAK_BINDER:
1814 	case BINDER_TYPE_HANDLE:
1815 	case BINDER_TYPE_WEAK_HANDLE:
1816 		object_size = sizeof(struct flat_binder_object);
1817 		break;
1818 	case BINDER_TYPE_FD:
1819 		object_size = sizeof(struct binder_fd_object);
1820 		break;
1821 	case BINDER_TYPE_PTR:
1822 		object_size = sizeof(struct binder_buffer_object);
1823 		break;
1824 	case BINDER_TYPE_FDA:
1825 		object_size = sizeof(struct binder_fd_array_object);
1826 		break;
1827 	default:
1828 		return 0;
1829 	}
1830 	if (offset <= buffer->data_size - object_size &&
1831 	    buffer->data_size >= object_size)
1832 		return object_size;
1833 	else
1834 		return 0;
1835 }
1836 
1837 /**
1838  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1839  * @proc:	binder_proc owning the buffer
1840  * @b:		binder_buffer containing the object
1841  * @object:	struct binder_object to read into
1842  * @index:	index in offset array at which the binder_buffer_object is
1843  *		located
1844  * @start_offset: points to the start of the offset array
1845  * @object_offsetp: offset of @object read from @b
1846  * @num_valid:	the number of valid offsets in the offset array
1847  *
1848  * Return:	If @index is within the valid range of the offset array
1849  *		described by @start and @num_valid, and if there's a valid
1850  *		binder_buffer_object at the offset found in index @index
1851  *		of the offset array, that object is returned. Otherwise,
1852  *		%NULL is returned.
1853  *		Note that the offset found in index @index itself is not
1854  *		verified; this function assumes that @num_valid elements
1855  *		from @start were previously verified to have valid offsets.
1856  *		If @object_offsetp is non-NULL, then the offset within
1857  *		@b is written to it.
1858  */
1859 static struct binder_buffer_object *binder_validate_ptr(
1860 						struct binder_proc *proc,
1861 						struct binder_buffer *b,
1862 						struct binder_object *object,
1863 						binder_size_t index,
1864 						binder_size_t start_offset,
1865 						binder_size_t *object_offsetp,
1866 						binder_size_t num_valid)
1867 {
1868 	size_t object_size;
1869 	binder_size_t object_offset;
1870 	unsigned long buffer_offset;
1871 
1872 	if (index >= num_valid)
1873 		return NULL;
1874 
1875 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1876 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1877 					  b, buffer_offset,
1878 					  sizeof(object_offset)))
1879 		return NULL;
1880 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1881 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1882 		return NULL;
1883 	if (object_offsetp)
1884 		*object_offsetp = object_offset;
1885 
1886 	return &object->bbo;
1887 }
1888 
1889 /**
1890  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1891  * @proc:		binder_proc owning the buffer
1892  * @b:			transaction buffer
1893  * @objects_start_offset: offset to start of objects buffer
1894  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1895  * @fixup_offset:	start offset in @buffer to fix up
1896  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1897  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1898  *
1899  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1900  *			allowed.
1901  *
1902  * For safety reasons, we only allow fixups inside a buffer to happen
1903  * at increasing offsets; additionally, we only allow fixup on the last
1904  * buffer object that was verified, or one of its parents.
1905  *
1906  * Example of what is allowed:
1907  *
1908  * A
1909  *   B (parent = A, offset = 0)
1910  *   C (parent = A, offset = 16)
1911  *     D (parent = C, offset = 0)
1912  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1913  *
1914  * Examples of what is not allowed:
1915  *
1916  * Decreasing offsets within the same parent:
1917  * A
1918  *   C (parent = A, offset = 16)
1919  *   B (parent = A, offset = 0) // decreasing offset within A
1920  *
1921  * Referring to a parent that wasn't the last object or any of its parents:
1922  * A
1923  *   B (parent = A, offset = 0)
1924  *   C (parent = A, offset = 0)
1925  *   C (parent = A, offset = 16)
1926  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1927  */
1928 static bool binder_validate_fixup(struct binder_proc *proc,
1929 				  struct binder_buffer *b,
1930 				  binder_size_t objects_start_offset,
1931 				  binder_size_t buffer_obj_offset,
1932 				  binder_size_t fixup_offset,
1933 				  binder_size_t last_obj_offset,
1934 				  binder_size_t last_min_offset)
1935 {
1936 	if (!last_obj_offset) {
1937 		/* Nothing to fix up in */
1938 		return false;
1939 	}
1940 
1941 	while (last_obj_offset != buffer_obj_offset) {
1942 		unsigned long buffer_offset;
1943 		struct binder_object last_object;
1944 		struct binder_buffer_object *last_bbo;
1945 		size_t object_size = binder_get_object(proc, NULL, b,
1946 						       last_obj_offset,
1947 						       &last_object);
1948 		if (object_size != sizeof(*last_bbo))
1949 			return false;
1950 
1951 		last_bbo = &last_object.bbo;
1952 		/*
1953 		 * Safe to retrieve the parent of last_obj, since it
1954 		 * was already previously verified by the driver.
1955 		 */
1956 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1957 			return false;
1958 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1959 		buffer_offset = objects_start_offset +
1960 			sizeof(binder_size_t) * last_bbo->parent;
1961 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1962 						  &last_obj_offset,
1963 						  b, buffer_offset,
1964 						  sizeof(last_obj_offset)))
1965 			return false;
1966 	}
1967 	return (fixup_offset >= last_min_offset);
1968 }
1969 
1970 /**
1971  * struct binder_task_work_cb - for deferred close
1972  *
1973  * @twork:                callback_head for task work
1974  * @file:                 file to close
1975  *
1976  * Structure to pass task work to be handled after
1977  * returning from binder_ioctl() via task_work_add().
1978  */
1979 struct binder_task_work_cb {
1980 	struct callback_head twork;
1981 	struct file *file;
1982 };
1983 
1984 /**
1985  * binder_do_fd_close() - close list of file descriptors
1986  * @twork:	callback head for task work
1987  *
1988  * It is not safe to call ksys_close() during the binder_ioctl()
1989  * function if there is a chance that binder's own file descriptor
1990  * might be closed. This is to meet the requirements for using
1991  * fdget() (see comments for __fget_light()). Therefore use
1992  * task_work_add() to schedule the close operation once we have
1993  * returned from binder_ioctl(). This function is a callback
1994  * for that mechanism and does the actual ksys_close() on the
1995  * given file descriptor.
1996  */
1997 static void binder_do_fd_close(struct callback_head *twork)
1998 {
1999 	struct binder_task_work_cb *twcb = container_of(twork,
2000 			struct binder_task_work_cb, twork);
2001 
2002 	fput(twcb->file);
2003 	kfree(twcb);
2004 }
2005 
2006 /**
2007  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2008  * @fd:		file-descriptor to close
2009  *
2010  * See comments in binder_do_fd_close(). This function is used to schedule
2011  * a file-descriptor to be closed after returning from binder_ioctl().
2012  */
2013 static void binder_deferred_fd_close(int fd)
2014 {
2015 	struct binder_task_work_cb *twcb;
2016 
2017 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2018 	if (!twcb)
2019 		return;
2020 	init_task_work(&twcb->twork, binder_do_fd_close);
2021 	twcb->file = file_close_fd(fd);
2022 	if (twcb->file) {
2023 		// pin it until binder_do_fd_close(); see comments there
2024 		get_file(twcb->file);
2025 		filp_close(twcb->file, current->files);
2026 		task_work_add(current, &twcb->twork, TWA_RESUME);
2027 	} else {
2028 		kfree(twcb);
2029 	}
2030 }
2031 
2032 static void binder_transaction_buffer_release(struct binder_proc *proc,
2033 					      struct binder_thread *thread,
2034 					      struct binder_buffer *buffer,
2035 					      binder_size_t off_end_offset,
2036 					      bool is_failure)
2037 {
2038 	int debug_id = buffer->debug_id;
2039 	binder_size_t off_start_offset, buffer_offset;
2040 
2041 	binder_debug(BINDER_DEBUG_TRANSACTION,
2042 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2043 		     proc->pid, buffer->debug_id,
2044 		     buffer->data_size, buffer->offsets_size,
2045 		     (unsigned long long)off_end_offset);
2046 
2047 	if (buffer->target_node)
2048 		binder_dec_node(buffer->target_node, 1, 0);
2049 
2050 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2051 
2052 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2053 	     buffer_offset += sizeof(binder_size_t)) {
2054 		struct binder_object_header *hdr;
2055 		size_t object_size = 0;
2056 		struct binder_object object;
2057 		binder_size_t object_offset;
2058 
2059 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2060 						   buffer, buffer_offset,
2061 						   sizeof(object_offset)))
2062 			object_size = binder_get_object(proc, NULL, buffer,
2063 							object_offset, &object);
2064 		if (object_size == 0) {
2065 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2066 			       debug_id, (u64)object_offset, buffer->data_size);
2067 			continue;
2068 		}
2069 		hdr = &object.hdr;
2070 		switch (hdr->type) {
2071 		case BINDER_TYPE_BINDER:
2072 		case BINDER_TYPE_WEAK_BINDER: {
2073 			struct flat_binder_object *fp;
2074 			struct binder_node *node;
2075 
2076 			fp = to_flat_binder_object(hdr);
2077 			node = binder_get_node(proc, fp->binder);
2078 			if (node == NULL) {
2079 				pr_err("transaction release %d bad node %016llx\n",
2080 				       debug_id, (u64)fp->binder);
2081 				break;
2082 			}
2083 			binder_debug(BINDER_DEBUG_TRANSACTION,
2084 				     "        node %d u%016llx\n",
2085 				     node->debug_id, (u64)node->ptr);
2086 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2087 					0);
2088 			binder_put_node(node);
2089 		} break;
2090 		case BINDER_TYPE_HANDLE:
2091 		case BINDER_TYPE_WEAK_HANDLE: {
2092 			struct flat_binder_object *fp;
2093 			struct binder_ref_data rdata;
2094 			int ret;
2095 
2096 			fp = to_flat_binder_object(hdr);
2097 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2098 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2099 
2100 			if (ret) {
2101 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2102 				 debug_id, fp->handle, ret);
2103 				break;
2104 			}
2105 			binder_debug(BINDER_DEBUG_TRANSACTION,
2106 				     "        ref %d desc %d\n",
2107 				     rdata.debug_id, rdata.desc);
2108 		} break;
2109 
2110 		case BINDER_TYPE_FD: {
2111 			/*
2112 			 * No need to close the file here since user-space
2113 			 * closes it for successfully delivered
2114 			 * transactions. For transactions that weren't
2115 			 * delivered, the new fd was never allocated so
2116 			 * there is no need to close and the fput on the
2117 			 * file is done when the transaction is torn
2118 			 * down.
2119 			 */
2120 		} break;
2121 		case BINDER_TYPE_PTR:
2122 			/*
2123 			 * Nothing to do here, this will get cleaned up when the
2124 			 * transaction buffer gets freed
2125 			 */
2126 			break;
2127 		case BINDER_TYPE_FDA: {
2128 			struct binder_fd_array_object *fda;
2129 			struct binder_buffer_object *parent;
2130 			struct binder_object ptr_object;
2131 			binder_size_t fda_offset;
2132 			size_t fd_index;
2133 			binder_size_t fd_buf_size;
2134 			binder_size_t num_valid;
2135 
2136 			if (is_failure) {
2137 				/*
2138 				 * The fd fixups have not been applied so no
2139 				 * fds need to be closed.
2140 				 */
2141 				continue;
2142 			}
2143 
2144 			num_valid = (buffer_offset - off_start_offset) /
2145 						sizeof(binder_size_t);
2146 			fda = to_binder_fd_array_object(hdr);
2147 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2148 						     fda->parent,
2149 						     off_start_offset,
2150 						     NULL,
2151 						     num_valid);
2152 			if (!parent) {
2153 				pr_err("transaction release %d bad parent offset\n",
2154 				       debug_id);
2155 				continue;
2156 			}
2157 			fd_buf_size = sizeof(u32) * fda->num_fds;
2158 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2159 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2160 				       debug_id, (u64)fda->num_fds);
2161 				continue;
2162 			}
2163 			if (fd_buf_size > parent->length ||
2164 			    fda->parent_offset > parent->length - fd_buf_size) {
2165 				/* No space for all file descriptors here. */
2166 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2167 				       debug_id, (u64)fda->num_fds);
2168 				continue;
2169 			}
2170 			/*
2171 			 * the source data for binder_buffer_object is visible
2172 			 * to user-space and the @buffer element is the user
2173 			 * pointer to the buffer_object containing the fd_array.
2174 			 * Convert the address to an offset relative to
2175 			 * the base of the transaction buffer.
2176 			 */
2177 			fda_offset = parent->buffer - buffer->user_data +
2178 				fda->parent_offset;
2179 			for (fd_index = 0; fd_index < fda->num_fds;
2180 			     fd_index++) {
2181 				u32 fd;
2182 				int err;
2183 				binder_size_t offset = fda_offset +
2184 					fd_index * sizeof(fd);
2185 
2186 				err = binder_alloc_copy_from_buffer(
2187 						&proc->alloc, &fd, buffer,
2188 						offset, sizeof(fd));
2189 				WARN_ON(err);
2190 				if (!err) {
2191 					binder_deferred_fd_close(fd);
2192 					/*
2193 					 * Need to make sure the thread goes
2194 					 * back to userspace to complete the
2195 					 * deferred close
2196 					 */
2197 					if (thread)
2198 						thread->looper_need_return = true;
2199 				}
2200 			}
2201 		} break;
2202 		default:
2203 			pr_err("transaction release %d bad object type %x\n",
2204 				debug_id, hdr->type);
2205 			break;
2206 		}
2207 	}
2208 }
2209 
2210 /* Clean up all the objects in the buffer */
2211 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2212 						struct binder_thread *thread,
2213 						struct binder_buffer *buffer,
2214 						bool is_failure)
2215 {
2216 	binder_size_t off_end_offset;
2217 
2218 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2219 	off_end_offset += buffer->offsets_size;
2220 
2221 	binder_transaction_buffer_release(proc, thread, buffer,
2222 					  off_end_offset, is_failure);
2223 }
2224 
2225 static int binder_translate_binder(struct flat_binder_object *fp,
2226 				   struct binder_transaction *t,
2227 				   struct binder_thread *thread)
2228 {
2229 	struct binder_node *node;
2230 	struct binder_proc *proc = thread->proc;
2231 	struct binder_proc *target_proc = t->to_proc;
2232 	struct binder_ref_data rdata;
2233 	int ret = 0;
2234 
2235 	node = binder_get_node(proc, fp->binder);
2236 	if (!node) {
2237 		node = binder_new_node(proc, fp);
2238 		if (!node)
2239 			return -ENOMEM;
2240 	}
2241 	if (fp->cookie != node->cookie) {
2242 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2243 				  proc->pid, thread->pid, (u64)fp->binder,
2244 				  node->debug_id, (u64)fp->cookie,
2245 				  (u64)node->cookie);
2246 		ret = -EINVAL;
2247 		goto done;
2248 	}
2249 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2250 		ret = -EPERM;
2251 		goto done;
2252 	}
2253 
2254 	ret = binder_inc_ref_for_node(target_proc, node,
2255 			fp->hdr.type == BINDER_TYPE_BINDER,
2256 			&thread->todo, &rdata);
2257 	if (ret)
2258 		goto done;
2259 
2260 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2261 		fp->hdr.type = BINDER_TYPE_HANDLE;
2262 	else
2263 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2264 	fp->binder = 0;
2265 	fp->handle = rdata.desc;
2266 	fp->cookie = 0;
2267 
2268 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2269 	binder_debug(BINDER_DEBUG_TRANSACTION,
2270 		     "        node %d u%016llx -> ref %d desc %d\n",
2271 		     node->debug_id, (u64)node->ptr,
2272 		     rdata.debug_id, rdata.desc);
2273 done:
2274 	binder_put_node(node);
2275 	return ret;
2276 }
2277 
2278 static int binder_translate_handle(struct flat_binder_object *fp,
2279 				   struct binder_transaction *t,
2280 				   struct binder_thread *thread)
2281 {
2282 	struct binder_proc *proc = thread->proc;
2283 	struct binder_proc *target_proc = t->to_proc;
2284 	struct binder_node *node;
2285 	struct binder_ref_data src_rdata;
2286 	int ret = 0;
2287 
2288 	node = binder_get_node_from_ref(proc, fp->handle,
2289 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2290 	if (!node) {
2291 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2292 				  proc->pid, thread->pid, fp->handle);
2293 		return -EINVAL;
2294 	}
2295 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2296 		ret = -EPERM;
2297 		goto done;
2298 	}
2299 
2300 	binder_node_lock(node);
2301 	if (node->proc == target_proc) {
2302 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2303 			fp->hdr.type = BINDER_TYPE_BINDER;
2304 		else
2305 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2306 		fp->binder = node->ptr;
2307 		fp->cookie = node->cookie;
2308 		if (node->proc)
2309 			binder_inner_proc_lock(node->proc);
2310 		else
2311 			__acquire(&node->proc->inner_lock);
2312 		binder_inc_node_nilocked(node,
2313 					 fp->hdr.type == BINDER_TYPE_BINDER,
2314 					 0, NULL);
2315 		if (node->proc)
2316 			binder_inner_proc_unlock(node->proc);
2317 		else
2318 			__release(&node->proc->inner_lock);
2319 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2320 		binder_debug(BINDER_DEBUG_TRANSACTION,
2321 			     "        ref %d desc %d -> node %d u%016llx\n",
2322 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2323 			     (u64)node->ptr);
2324 		binder_node_unlock(node);
2325 	} else {
2326 		struct binder_ref_data dest_rdata;
2327 
2328 		binder_node_unlock(node);
2329 		ret = binder_inc_ref_for_node(target_proc, node,
2330 				fp->hdr.type == BINDER_TYPE_HANDLE,
2331 				NULL, &dest_rdata);
2332 		if (ret)
2333 			goto done;
2334 
2335 		fp->binder = 0;
2336 		fp->handle = dest_rdata.desc;
2337 		fp->cookie = 0;
2338 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2339 						    &dest_rdata);
2340 		binder_debug(BINDER_DEBUG_TRANSACTION,
2341 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2342 			     src_rdata.debug_id, src_rdata.desc,
2343 			     dest_rdata.debug_id, dest_rdata.desc,
2344 			     node->debug_id);
2345 	}
2346 done:
2347 	binder_put_node(node);
2348 	return ret;
2349 }
2350 
2351 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2352 			       struct binder_transaction *t,
2353 			       struct binder_thread *thread,
2354 			       struct binder_transaction *in_reply_to)
2355 {
2356 	struct binder_proc *proc = thread->proc;
2357 	struct binder_proc *target_proc = t->to_proc;
2358 	struct binder_txn_fd_fixup *fixup;
2359 	struct file *file;
2360 	int ret = 0;
2361 	bool target_allows_fd;
2362 
2363 	if (in_reply_to)
2364 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2365 	else
2366 		target_allows_fd = t->buffer->target_node->accept_fds;
2367 	if (!target_allows_fd) {
2368 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2369 				  proc->pid, thread->pid,
2370 				  in_reply_to ? "reply" : "transaction",
2371 				  fd);
2372 		ret = -EPERM;
2373 		goto err_fd_not_accepted;
2374 	}
2375 
2376 	file = fget(fd);
2377 	if (!file) {
2378 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2379 				  proc->pid, thread->pid, fd);
2380 		ret = -EBADF;
2381 		goto err_fget;
2382 	}
2383 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2384 	if (ret < 0) {
2385 		ret = -EPERM;
2386 		goto err_security;
2387 	}
2388 
2389 	/*
2390 	 * Add fixup record for this transaction. The allocation
2391 	 * of the fd in the target needs to be done from a
2392 	 * target thread.
2393 	 */
2394 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2395 	if (!fixup) {
2396 		ret = -ENOMEM;
2397 		goto err_alloc;
2398 	}
2399 	fixup->file = file;
2400 	fixup->offset = fd_offset;
2401 	fixup->target_fd = -1;
2402 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2403 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2404 
2405 	return ret;
2406 
2407 err_alloc:
2408 err_security:
2409 	fput(file);
2410 err_fget:
2411 err_fd_not_accepted:
2412 	return ret;
2413 }
2414 
2415 /**
2416  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2417  * @offset	offset in target buffer to fixup
2418  * @skip_size	bytes to skip in copy (fixup will be written later)
2419  * @fixup_data	data to write at fixup offset
2420  * @node	list node
2421  *
2422  * This is used for the pointer fixup list (pf) which is created and consumed
2423  * during binder_transaction() and is only accessed locally. No
2424  * locking is necessary.
2425  *
2426  * The list is ordered by @offset.
2427  */
2428 struct binder_ptr_fixup {
2429 	binder_size_t offset;
2430 	size_t skip_size;
2431 	binder_uintptr_t fixup_data;
2432 	struct list_head node;
2433 };
2434 
2435 /**
2436  * struct binder_sg_copy - scatter-gather data to be copied
2437  * @offset		offset in target buffer
2438  * @sender_uaddr	user address in source buffer
2439  * @length		bytes to copy
2440  * @node		list node
2441  *
2442  * This is used for the sg copy list (sgc) which is created and consumed
2443  * during binder_transaction() and is only accessed locally. No
2444  * locking is necessary.
2445  *
2446  * The list is ordered by @offset.
2447  */
2448 struct binder_sg_copy {
2449 	binder_size_t offset;
2450 	const void __user *sender_uaddr;
2451 	size_t length;
2452 	struct list_head node;
2453 };
2454 
2455 /**
2456  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2457  * @alloc:	binder_alloc associated with @buffer
2458  * @buffer:	binder buffer in target process
2459  * @sgc_head:	list_head of scatter-gather copy list
2460  * @pf_head:	list_head of pointer fixup list
2461  *
2462  * Processes all elements of @sgc_head, applying fixups from @pf_head
2463  * and copying the scatter-gather data from the source process' user
2464  * buffer to the target's buffer. It is expected that the list creation
2465  * and processing all occurs during binder_transaction() so these lists
2466  * are only accessed in local context.
2467  *
2468  * Return: 0=success, else -errno
2469  */
2470 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2471 					 struct binder_buffer *buffer,
2472 					 struct list_head *sgc_head,
2473 					 struct list_head *pf_head)
2474 {
2475 	int ret = 0;
2476 	struct binder_sg_copy *sgc, *tmpsgc;
2477 	struct binder_ptr_fixup *tmppf;
2478 	struct binder_ptr_fixup *pf =
2479 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2480 					 node);
2481 
2482 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2483 		size_t bytes_copied = 0;
2484 
2485 		while (bytes_copied < sgc->length) {
2486 			size_t copy_size;
2487 			size_t bytes_left = sgc->length - bytes_copied;
2488 			size_t offset = sgc->offset + bytes_copied;
2489 
2490 			/*
2491 			 * We copy up to the fixup (pointed to by pf)
2492 			 */
2493 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2494 				       : bytes_left;
2495 			if (!ret && copy_size)
2496 				ret = binder_alloc_copy_user_to_buffer(
2497 						alloc, buffer,
2498 						offset,
2499 						sgc->sender_uaddr + bytes_copied,
2500 						copy_size);
2501 			bytes_copied += copy_size;
2502 			if (copy_size != bytes_left) {
2503 				BUG_ON(!pf);
2504 				/* we stopped at a fixup offset */
2505 				if (pf->skip_size) {
2506 					/*
2507 					 * we are just skipping. This is for
2508 					 * BINDER_TYPE_FDA where the translated
2509 					 * fds will be fixed up when we get
2510 					 * to target context.
2511 					 */
2512 					bytes_copied += pf->skip_size;
2513 				} else {
2514 					/* apply the fixup indicated by pf */
2515 					if (!ret)
2516 						ret = binder_alloc_copy_to_buffer(
2517 							alloc, buffer,
2518 							pf->offset,
2519 							&pf->fixup_data,
2520 							sizeof(pf->fixup_data));
2521 					bytes_copied += sizeof(pf->fixup_data);
2522 				}
2523 				list_del(&pf->node);
2524 				kfree(pf);
2525 				pf = list_first_entry_or_null(pf_head,
2526 						struct binder_ptr_fixup, node);
2527 			}
2528 		}
2529 		list_del(&sgc->node);
2530 		kfree(sgc);
2531 	}
2532 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2533 		BUG_ON(pf->skip_size == 0);
2534 		list_del(&pf->node);
2535 		kfree(pf);
2536 	}
2537 	BUG_ON(!list_empty(sgc_head));
2538 
2539 	return ret > 0 ? -EINVAL : ret;
2540 }
2541 
2542 /**
2543  * binder_cleanup_deferred_txn_lists() - free specified lists
2544  * @sgc_head:	list_head of scatter-gather copy list
2545  * @pf_head:	list_head of pointer fixup list
2546  *
2547  * Called to clean up @sgc_head and @pf_head if there is an
2548  * error.
2549  */
2550 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2551 					      struct list_head *pf_head)
2552 {
2553 	struct binder_sg_copy *sgc, *tmpsgc;
2554 	struct binder_ptr_fixup *pf, *tmppf;
2555 
2556 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2557 		list_del(&sgc->node);
2558 		kfree(sgc);
2559 	}
2560 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2561 		list_del(&pf->node);
2562 		kfree(pf);
2563 	}
2564 }
2565 
2566 /**
2567  * binder_defer_copy() - queue a scatter-gather buffer for copy
2568  * @sgc_head:		list_head of scatter-gather copy list
2569  * @offset:		binder buffer offset in target process
2570  * @sender_uaddr:	user address in source process
2571  * @length:		bytes to copy
2572  *
2573  * Specify a scatter-gather block to be copied. The actual copy must
2574  * be deferred until all the needed fixups are identified and queued.
2575  * Then the copy and fixups are done together so un-translated values
2576  * from the source are never visible in the target buffer.
2577  *
2578  * We are guaranteed that repeated calls to this function will have
2579  * monotonically increasing @offset values so the list will naturally
2580  * be ordered.
2581  *
2582  * Return: 0=success, else -errno
2583  */
2584 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2585 			     const void __user *sender_uaddr, size_t length)
2586 {
2587 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2588 
2589 	if (!bc)
2590 		return -ENOMEM;
2591 
2592 	bc->offset = offset;
2593 	bc->sender_uaddr = sender_uaddr;
2594 	bc->length = length;
2595 	INIT_LIST_HEAD(&bc->node);
2596 
2597 	/*
2598 	 * We are guaranteed that the deferred copies are in-order
2599 	 * so just add to the tail.
2600 	 */
2601 	list_add_tail(&bc->node, sgc_head);
2602 
2603 	return 0;
2604 }
2605 
2606 /**
2607  * binder_add_fixup() - queue a fixup to be applied to sg copy
2608  * @pf_head:	list_head of binder ptr fixup list
2609  * @offset:	binder buffer offset in target process
2610  * @fixup:	bytes to be copied for fixup
2611  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2612  *
2613  * Add the specified fixup to a list ordered by @offset. When copying
2614  * the scatter-gather buffers, the fixup will be copied instead of
2615  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2616  * will be applied later (in target process context), so we just skip
2617  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2618  * value in @fixup.
2619  *
2620  * This function is called *mostly* in @offset order, but there are
2621  * exceptions. Since out-of-order inserts are relatively uncommon,
2622  * we insert the new element by searching backward from the tail of
2623  * the list.
2624  *
2625  * Return: 0=success, else -errno
2626  */
2627 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2628 			    binder_uintptr_t fixup, size_t skip_size)
2629 {
2630 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2631 	struct binder_ptr_fixup *tmppf;
2632 
2633 	if (!pf)
2634 		return -ENOMEM;
2635 
2636 	pf->offset = offset;
2637 	pf->fixup_data = fixup;
2638 	pf->skip_size = skip_size;
2639 	INIT_LIST_HEAD(&pf->node);
2640 
2641 	/* Fixups are *mostly* added in-order, but there are some
2642 	 * exceptions. Look backwards through list for insertion point.
2643 	 */
2644 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2645 		if (tmppf->offset < pf->offset) {
2646 			list_add(&pf->node, &tmppf->node);
2647 			return 0;
2648 		}
2649 	}
2650 	/*
2651 	 * if we get here, then the new offset is the lowest so
2652 	 * insert at the head
2653 	 */
2654 	list_add(&pf->node, pf_head);
2655 	return 0;
2656 }
2657 
2658 static int binder_translate_fd_array(struct list_head *pf_head,
2659 				     struct binder_fd_array_object *fda,
2660 				     const void __user *sender_ubuffer,
2661 				     struct binder_buffer_object *parent,
2662 				     struct binder_buffer_object *sender_uparent,
2663 				     struct binder_transaction *t,
2664 				     struct binder_thread *thread,
2665 				     struct binder_transaction *in_reply_to)
2666 {
2667 	binder_size_t fdi, fd_buf_size;
2668 	binder_size_t fda_offset;
2669 	const void __user *sender_ufda_base;
2670 	struct binder_proc *proc = thread->proc;
2671 	int ret;
2672 
2673 	if (fda->num_fds == 0)
2674 		return 0;
2675 
2676 	fd_buf_size = sizeof(u32) * fda->num_fds;
2677 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2678 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2679 				  proc->pid, thread->pid, (u64)fda->num_fds);
2680 		return -EINVAL;
2681 	}
2682 	if (fd_buf_size > parent->length ||
2683 	    fda->parent_offset > parent->length - fd_buf_size) {
2684 		/* No space for all file descriptors here. */
2685 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2686 				  proc->pid, thread->pid, (u64)fda->num_fds);
2687 		return -EINVAL;
2688 	}
2689 	/*
2690 	 * the source data for binder_buffer_object is visible
2691 	 * to user-space and the @buffer element is the user
2692 	 * pointer to the buffer_object containing the fd_array.
2693 	 * Convert the address to an offset relative to
2694 	 * the base of the transaction buffer.
2695 	 */
2696 	fda_offset = parent->buffer - t->buffer->user_data +
2697 		fda->parent_offset;
2698 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2699 				fda->parent_offset;
2700 
2701 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2702 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2703 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2704 				  proc->pid, thread->pid);
2705 		return -EINVAL;
2706 	}
2707 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2708 	if (ret)
2709 		return ret;
2710 
2711 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2712 		u32 fd;
2713 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2714 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2715 
2716 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2717 		if (!ret)
2718 			ret = binder_translate_fd(fd, offset, t, thread,
2719 						  in_reply_to);
2720 		if (ret)
2721 			return ret > 0 ? -EINVAL : ret;
2722 	}
2723 	return 0;
2724 }
2725 
2726 static int binder_fixup_parent(struct list_head *pf_head,
2727 			       struct binder_transaction *t,
2728 			       struct binder_thread *thread,
2729 			       struct binder_buffer_object *bp,
2730 			       binder_size_t off_start_offset,
2731 			       binder_size_t num_valid,
2732 			       binder_size_t last_fixup_obj_off,
2733 			       binder_size_t last_fixup_min_off)
2734 {
2735 	struct binder_buffer_object *parent;
2736 	struct binder_buffer *b = t->buffer;
2737 	struct binder_proc *proc = thread->proc;
2738 	struct binder_proc *target_proc = t->to_proc;
2739 	struct binder_object object;
2740 	binder_size_t buffer_offset;
2741 	binder_size_t parent_offset;
2742 
2743 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2744 		return 0;
2745 
2746 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2747 				     off_start_offset, &parent_offset,
2748 				     num_valid);
2749 	if (!parent) {
2750 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2751 				  proc->pid, thread->pid);
2752 		return -EINVAL;
2753 	}
2754 
2755 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2756 				   parent_offset, bp->parent_offset,
2757 				   last_fixup_obj_off,
2758 				   last_fixup_min_off)) {
2759 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2760 				  proc->pid, thread->pid);
2761 		return -EINVAL;
2762 	}
2763 
2764 	if (parent->length < sizeof(binder_uintptr_t) ||
2765 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2766 		/* No space for a pointer here! */
2767 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2768 				  proc->pid, thread->pid);
2769 		return -EINVAL;
2770 	}
2771 
2772 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2773 
2774 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2775 }
2776 
2777 /**
2778  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2779  * @t1: the pending async txn in the frozen process
2780  * @t2: the new async txn to supersede the outdated pending one
2781  *
2782  * Return:  true if t2 can supersede t1
2783  *          false if t2 can not supersede t1
2784  */
2785 static bool binder_can_update_transaction(struct binder_transaction *t1,
2786 					  struct binder_transaction *t2)
2787 {
2788 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2789 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2790 		return false;
2791 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2792 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2793 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2794 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2795 		return true;
2796 	return false;
2797 }
2798 
2799 /**
2800  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2801  * @t:		 new async transaction
2802  * @target_list: list to find outdated transaction
2803  *
2804  * Return: the outdated transaction if found
2805  *         NULL if no outdated transacton can be found
2806  *
2807  * Requires the proc->inner_lock to be held.
2808  */
2809 static struct binder_transaction *
2810 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2811 					 struct list_head *target_list)
2812 {
2813 	struct binder_work *w;
2814 
2815 	list_for_each_entry(w, target_list, entry) {
2816 		struct binder_transaction *t_queued;
2817 
2818 		if (w->type != BINDER_WORK_TRANSACTION)
2819 			continue;
2820 		t_queued = container_of(w, struct binder_transaction, work);
2821 		if (binder_can_update_transaction(t_queued, t))
2822 			return t_queued;
2823 	}
2824 	return NULL;
2825 }
2826 
2827 /**
2828  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2829  * @t:		transaction to send
2830  * @proc:	process to send the transaction to
2831  * @thread:	thread in @proc to send the transaction to (may be NULL)
2832  *
2833  * This function queues a transaction to the specified process. It will try
2834  * to find a thread in the target process to handle the transaction and
2835  * wake it up. If no thread is found, the work is queued to the proc
2836  * waitqueue.
2837  *
2838  * If the @thread parameter is not NULL, the transaction is always queued
2839  * to the waitlist of that specific thread.
2840  *
2841  * Return:	0 if the transaction was successfully queued
2842  *		BR_DEAD_REPLY if the target process or thread is dead
2843  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2844  *			the sync transaction was rejected
2845  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2846  *		and the async transaction was successfully queued
2847  */
2848 static int binder_proc_transaction(struct binder_transaction *t,
2849 				    struct binder_proc *proc,
2850 				    struct binder_thread *thread)
2851 {
2852 	struct binder_node *node = t->buffer->target_node;
2853 	bool oneway = !!(t->flags & TF_ONE_WAY);
2854 	bool pending_async = false;
2855 	struct binder_transaction *t_outdated = NULL;
2856 	bool frozen = false;
2857 
2858 	BUG_ON(!node);
2859 	binder_node_lock(node);
2860 	if (oneway) {
2861 		BUG_ON(thread);
2862 		if (node->has_async_transaction)
2863 			pending_async = true;
2864 		else
2865 			node->has_async_transaction = true;
2866 	}
2867 
2868 	binder_inner_proc_lock(proc);
2869 	if (proc->is_frozen) {
2870 		frozen = true;
2871 		proc->sync_recv |= !oneway;
2872 		proc->async_recv |= oneway;
2873 	}
2874 
2875 	if ((frozen && !oneway) || proc->is_dead ||
2876 			(thread && thread->is_dead)) {
2877 		binder_inner_proc_unlock(proc);
2878 		binder_node_unlock(node);
2879 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2880 	}
2881 
2882 	if (!thread && !pending_async)
2883 		thread = binder_select_thread_ilocked(proc);
2884 
2885 	if (thread) {
2886 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2887 	} else if (!pending_async) {
2888 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2889 	} else {
2890 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2891 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2892 									      &node->async_todo);
2893 			if (t_outdated) {
2894 				binder_debug(BINDER_DEBUG_TRANSACTION,
2895 					     "txn %d supersedes %d\n",
2896 					     t->debug_id, t_outdated->debug_id);
2897 				list_del_init(&t_outdated->work.entry);
2898 				proc->outstanding_txns--;
2899 			}
2900 		}
2901 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2902 	}
2903 
2904 	if (!pending_async)
2905 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2906 
2907 	proc->outstanding_txns++;
2908 	binder_inner_proc_unlock(proc);
2909 	binder_node_unlock(node);
2910 
2911 	/*
2912 	 * To reduce potential contention, free the outdated transaction and
2913 	 * buffer after releasing the locks.
2914 	 */
2915 	if (t_outdated) {
2916 		struct binder_buffer *buffer = t_outdated->buffer;
2917 
2918 		t_outdated->buffer = NULL;
2919 		buffer->transaction = NULL;
2920 		trace_binder_transaction_update_buffer_release(buffer);
2921 		binder_release_entire_buffer(proc, NULL, buffer, false);
2922 		binder_alloc_free_buf(&proc->alloc, buffer);
2923 		kfree(t_outdated);
2924 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2925 	}
2926 
2927 	if (oneway && frozen)
2928 		return BR_TRANSACTION_PENDING_FROZEN;
2929 
2930 	return 0;
2931 }
2932 
2933 /**
2934  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2935  * @node:         struct binder_node for which to get refs
2936  * @procp:        returns @node->proc if valid
2937  * @error:        if no @procp then returns BR_DEAD_REPLY
2938  *
2939  * User-space normally keeps the node alive when creating a transaction
2940  * since it has a reference to the target. The local strong ref keeps it
2941  * alive if the sending process dies before the target process processes
2942  * the transaction. If the source process is malicious or has a reference
2943  * counting bug, relying on the local strong ref can fail.
2944  *
2945  * Since user-space can cause the local strong ref to go away, we also take
2946  * a tmpref on the node to ensure it survives while we are constructing
2947  * the transaction. We also need a tmpref on the proc while we are
2948  * constructing the transaction, so we take that here as well.
2949  *
2950  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2951  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2952  * target proc has died, @error is set to BR_DEAD_REPLY.
2953  */
2954 static struct binder_node *binder_get_node_refs_for_txn(
2955 		struct binder_node *node,
2956 		struct binder_proc **procp,
2957 		uint32_t *error)
2958 {
2959 	struct binder_node *target_node = NULL;
2960 
2961 	binder_node_inner_lock(node);
2962 	if (node->proc) {
2963 		target_node = node;
2964 		binder_inc_node_nilocked(node, 1, 0, NULL);
2965 		binder_inc_node_tmpref_ilocked(node);
2966 		node->proc->tmp_ref++;
2967 		*procp = node->proc;
2968 	} else
2969 		*error = BR_DEAD_REPLY;
2970 	binder_node_inner_unlock(node);
2971 
2972 	return target_node;
2973 }
2974 
2975 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2976 				      uint32_t command, int32_t param)
2977 {
2978 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2979 
2980 	if (!from) {
2981 		/* annotation for sparse */
2982 		__release(&from->proc->inner_lock);
2983 		return;
2984 	}
2985 
2986 	/* don't override existing errors */
2987 	if (from->ee.command == BR_OK)
2988 		binder_set_extended_error(&from->ee, id, command, param);
2989 	binder_inner_proc_unlock(from->proc);
2990 	binder_thread_dec_tmpref(from);
2991 }
2992 
2993 static void binder_transaction(struct binder_proc *proc,
2994 			       struct binder_thread *thread,
2995 			       struct binder_transaction_data *tr, int reply,
2996 			       binder_size_t extra_buffers_size)
2997 {
2998 	int ret;
2999 	struct binder_transaction *t;
3000 	struct binder_work *w;
3001 	struct binder_work *tcomplete;
3002 	binder_size_t buffer_offset = 0;
3003 	binder_size_t off_start_offset, off_end_offset;
3004 	binder_size_t off_min;
3005 	binder_size_t sg_buf_offset, sg_buf_end_offset;
3006 	binder_size_t user_offset = 0;
3007 	struct binder_proc *target_proc = NULL;
3008 	struct binder_thread *target_thread = NULL;
3009 	struct binder_node *target_node = NULL;
3010 	struct binder_transaction *in_reply_to = NULL;
3011 	struct binder_transaction_log_entry *e;
3012 	uint32_t return_error = 0;
3013 	uint32_t return_error_param = 0;
3014 	uint32_t return_error_line = 0;
3015 	binder_size_t last_fixup_obj_off = 0;
3016 	binder_size_t last_fixup_min_off = 0;
3017 	struct binder_context *context = proc->context;
3018 	int t_debug_id = atomic_inc_return(&binder_last_id);
3019 	ktime_t t_start_time = ktime_get();
3020 	struct lsm_context lsmctx = { };
3021 	struct list_head sgc_head;
3022 	struct list_head pf_head;
3023 	const void __user *user_buffer = (const void __user *)
3024 				(uintptr_t)tr->data.ptr.buffer;
3025 	INIT_LIST_HEAD(&sgc_head);
3026 	INIT_LIST_HEAD(&pf_head);
3027 
3028 	e = binder_transaction_log_add(&binder_transaction_log);
3029 	e->debug_id = t_debug_id;
3030 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3031 	e->from_proc = proc->pid;
3032 	e->from_thread = thread->pid;
3033 	e->target_handle = tr->target.handle;
3034 	e->data_size = tr->data_size;
3035 	e->offsets_size = tr->offsets_size;
3036 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3037 
3038 	binder_inner_proc_lock(proc);
3039 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3040 	binder_inner_proc_unlock(proc);
3041 
3042 	if (reply) {
3043 		binder_inner_proc_lock(proc);
3044 		in_reply_to = thread->transaction_stack;
3045 		if (in_reply_to == NULL) {
3046 			binder_inner_proc_unlock(proc);
3047 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3048 					  proc->pid, thread->pid);
3049 			return_error = BR_FAILED_REPLY;
3050 			return_error_param = -EPROTO;
3051 			return_error_line = __LINE__;
3052 			goto err_empty_call_stack;
3053 		}
3054 		if (in_reply_to->to_thread != thread) {
3055 			spin_lock(&in_reply_to->lock);
3056 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3057 				proc->pid, thread->pid, in_reply_to->debug_id,
3058 				in_reply_to->to_proc ?
3059 				in_reply_to->to_proc->pid : 0,
3060 				in_reply_to->to_thread ?
3061 				in_reply_to->to_thread->pid : 0);
3062 			spin_unlock(&in_reply_to->lock);
3063 			binder_inner_proc_unlock(proc);
3064 			return_error = BR_FAILED_REPLY;
3065 			return_error_param = -EPROTO;
3066 			return_error_line = __LINE__;
3067 			in_reply_to = NULL;
3068 			goto err_bad_call_stack;
3069 		}
3070 		thread->transaction_stack = in_reply_to->to_parent;
3071 		binder_inner_proc_unlock(proc);
3072 		binder_set_nice(in_reply_to->saved_priority);
3073 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3074 		if (target_thread == NULL) {
3075 			/* annotation for sparse */
3076 			__release(&target_thread->proc->inner_lock);
3077 			binder_txn_error("%d:%d reply target not found\n",
3078 				thread->pid, proc->pid);
3079 			return_error = BR_DEAD_REPLY;
3080 			return_error_line = __LINE__;
3081 			goto err_dead_binder;
3082 		}
3083 		if (target_thread->transaction_stack != in_reply_to) {
3084 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3085 				proc->pid, thread->pid,
3086 				target_thread->transaction_stack ?
3087 				target_thread->transaction_stack->debug_id : 0,
3088 				in_reply_to->debug_id);
3089 			binder_inner_proc_unlock(target_thread->proc);
3090 			return_error = BR_FAILED_REPLY;
3091 			return_error_param = -EPROTO;
3092 			return_error_line = __LINE__;
3093 			in_reply_to = NULL;
3094 			target_thread = NULL;
3095 			goto err_dead_binder;
3096 		}
3097 		target_proc = target_thread->proc;
3098 		target_proc->tmp_ref++;
3099 		binder_inner_proc_unlock(target_thread->proc);
3100 	} else {
3101 		if (tr->target.handle) {
3102 			struct binder_ref *ref;
3103 
3104 			/*
3105 			 * There must already be a strong ref
3106 			 * on this node. If so, do a strong
3107 			 * increment on the node to ensure it
3108 			 * stays alive until the transaction is
3109 			 * done.
3110 			 */
3111 			binder_proc_lock(proc);
3112 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3113 						     true);
3114 			if (ref) {
3115 				target_node = binder_get_node_refs_for_txn(
3116 						ref->node, &target_proc,
3117 						&return_error);
3118 			} else {
3119 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3120 						  proc->pid, thread->pid, tr->target.handle);
3121 				return_error = BR_FAILED_REPLY;
3122 			}
3123 			binder_proc_unlock(proc);
3124 		} else {
3125 			mutex_lock(&context->context_mgr_node_lock);
3126 			target_node = context->binder_context_mgr_node;
3127 			if (target_node)
3128 				target_node = binder_get_node_refs_for_txn(
3129 						target_node, &target_proc,
3130 						&return_error);
3131 			else
3132 				return_error = BR_DEAD_REPLY;
3133 			mutex_unlock(&context->context_mgr_node_lock);
3134 			if (target_node && target_proc->pid == proc->pid) {
3135 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3136 						  proc->pid, thread->pid);
3137 				return_error = BR_FAILED_REPLY;
3138 				return_error_param = -EINVAL;
3139 				return_error_line = __LINE__;
3140 				goto err_invalid_target_handle;
3141 			}
3142 		}
3143 		if (!target_node) {
3144 			binder_txn_error("%d:%d cannot find target node\n",
3145 				thread->pid, proc->pid);
3146 			/*
3147 			 * return_error is set above
3148 			 */
3149 			return_error_param = -EINVAL;
3150 			return_error_line = __LINE__;
3151 			goto err_dead_binder;
3152 		}
3153 		e->to_node = target_node->debug_id;
3154 		if (WARN_ON(proc == target_proc)) {
3155 			binder_txn_error("%d:%d self transactions not allowed\n",
3156 				thread->pid, proc->pid);
3157 			return_error = BR_FAILED_REPLY;
3158 			return_error_param = -EINVAL;
3159 			return_error_line = __LINE__;
3160 			goto err_invalid_target_handle;
3161 		}
3162 		if (security_binder_transaction(proc->cred,
3163 						target_proc->cred) < 0) {
3164 			binder_txn_error("%d:%d transaction credentials failed\n",
3165 				thread->pid, proc->pid);
3166 			return_error = BR_FAILED_REPLY;
3167 			return_error_param = -EPERM;
3168 			return_error_line = __LINE__;
3169 			goto err_invalid_target_handle;
3170 		}
3171 		binder_inner_proc_lock(proc);
3172 
3173 		w = list_first_entry_or_null(&thread->todo,
3174 					     struct binder_work, entry);
3175 		if (!(tr->flags & TF_ONE_WAY) && w &&
3176 		    w->type == BINDER_WORK_TRANSACTION) {
3177 			/*
3178 			 * Do not allow new outgoing transaction from a
3179 			 * thread that has a transaction at the head of
3180 			 * its todo list. Only need to check the head
3181 			 * because binder_select_thread_ilocked picks a
3182 			 * thread from proc->waiting_threads to enqueue
3183 			 * the transaction, and nothing is queued to the
3184 			 * todo list while the thread is on waiting_threads.
3185 			 */
3186 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3187 					  proc->pid, thread->pid);
3188 			binder_inner_proc_unlock(proc);
3189 			return_error = BR_FAILED_REPLY;
3190 			return_error_param = -EPROTO;
3191 			return_error_line = __LINE__;
3192 			goto err_bad_todo_list;
3193 		}
3194 
3195 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3196 			struct binder_transaction *tmp;
3197 
3198 			tmp = thread->transaction_stack;
3199 			if (tmp->to_thread != thread) {
3200 				spin_lock(&tmp->lock);
3201 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3202 					proc->pid, thread->pid, tmp->debug_id,
3203 					tmp->to_proc ? tmp->to_proc->pid : 0,
3204 					tmp->to_thread ?
3205 					tmp->to_thread->pid : 0);
3206 				spin_unlock(&tmp->lock);
3207 				binder_inner_proc_unlock(proc);
3208 				return_error = BR_FAILED_REPLY;
3209 				return_error_param = -EPROTO;
3210 				return_error_line = __LINE__;
3211 				goto err_bad_call_stack;
3212 			}
3213 			while (tmp) {
3214 				struct binder_thread *from;
3215 
3216 				spin_lock(&tmp->lock);
3217 				from = tmp->from;
3218 				if (from && from->proc == target_proc) {
3219 					atomic_inc(&from->tmp_ref);
3220 					target_thread = from;
3221 					spin_unlock(&tmp->lock);
3222 					break;
3223 				}
3224 				spin_unlock(&tmp->lock);
3225 				tmp = tmp->from_parent;
3226 			}
3227 		}
3228 		binder_inner_proc_unlock(proc);
3229 	}
3230 	if (target_thread)
3231 		e->to_thread = target_thread->pid;
3232 	e->to_proc = target_proc->pid;
3233 
3234 	/* TODO: reuse incoming transaction for reply */
3235 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3236 	if (t == NULL) {
3237 		binder_txn_error("%d:%d cannot allocate transaction\n",
3238 			thread->pid, proc->pid);
3239 		return_error = BR_FAILED_REPLY;
3240 		return_error_param = -ENOMEM;
3241 		return_error_line = __LINE__;
3242 		goto err_alloc_t_failed;
3243 	}
3244 	INIT_LIST_HEAD(&t->fd_fixups);
3245 	binder_stats_created(BINDER_STAT_TRANSACTION);
3246 	spin_lock_init(&t->lock);
3247 
3248 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3249 	if (tcomplete == NULL) {
3250 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3251 			thread->pid, proc->pid);
3252 		return_error = BR_FAILED_REPLY;
3253 		return_error_param = -ENOMEM;
3254 		return_error_line = __LINE__;
3255 		goto err_alloc_tcomplete_failed;
3256 	}
3257 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3258 
3259 	t->debug_id = t_debug_id;
3260 	t->start_time = t_start_time;
3261 
3262 	if (reply)
3263 		binder_debug(BINDER_DEBUG_TRANSACTION,
3264 			     "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3265 			     proc->pid, thread->pid, t->debug_id,
3266 			     target_proc->pid, target_thread->pid,
3267 			     (u64)tr->data_size, (u64)tr->offsets_size,
3268 			     (u64)extra_buffers_size);
3269 	else
3270 		binder_debug(BINDER_DEBUG_TRANSACTION,
3271 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3272 			     proc->pid, thread->pid, t->debug_id,
3273 			     target_proc->pid, target_node->debug_id,
3274 			     (u64)tr->data_size, (u64)tr->offsets_size,
3275 			     (u64)extra_buffers_size);
3276 
3277 	if (!reply && !(tr->flags & TF_ONE_WAY))
3278 		t->from = thread;
3279 	else
3280 		t->from = NULL;
3281 	t->from_pid = proc->pid;
3282 	t->from_tid = thread->pid;
3283 	t->sender_euid = task_euid(proc->tsk);
3284 	t->to_proc = target_proc;
3285 	t->to_thread = target_thread;
3286 	t->code = tr->code;
3287 	t->flags = tr->flags;
3288 	t->priority = task_nice(current);
3289 
3290 	if (target_node && target_node->txn_security_ctx) {
3291 		u32 secid;
3292 		size_t added_size;
3293 
3294 		security_cred_getsecid(proc->cred, &secid);
3295 		ret = security_secid_to_secctx(secid, &lsmctx);
3296 		if (ret < 0) {
3297 			binder_txn_error("%d:%d failed to get security context\n",
3298 				thread->pid, proc->pid);
3299 			return_error = BR_FAILED_REPLY;
3300 			return_error_param = ret;
3301 			return_error_line = __LINE__;
3302 			goto err_get_secctx_failed;
3303 		}
3304 		added_size = ALIGN(lsmctx.len, sizeof(u64));
3305 		extra_buffers_size += added_size;
3306 		if (extra_buffers_size < added_size) {
3307 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3308 				thread->pid, proc->pid);
3309 			return_error = BR_FAILED_REPLY;
3310 			return_error_param = -EINVAL;
3311 			return_error_line = __LINE__;
3312 			goto err_bad_extra_size;
3313 		}
3314 	}
3315 
3316 	trace_binder_transaction(reply, t, target_node);
3317 
3318 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3319 		tr->offsets_size, extra_buffers_size,
3320 		!reply && (t->flags & TF_ONE_WAY));
3321 	if (IS_ERR(t->buffer)) {
3322 		char *s;
3323 
3324 		ret = PTR_ERR(t->buffer);
3325 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3326 			: (ret == -ENOSPC) ? ": no space left"
3327 			: (ret == -ENOMEM) ? ": memory allocation failed"
3328 			: "";
3329 		binder_txn_error("cannot allocate buffer%s", s);
3330 
3331 		return_error_param = PTR_ERR(t->buffer);
3332 		return_error = return_error_param == -ESRCH ?
3333 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3334 		return_error_line = __LINE__;
3335 		t->buffer = NULL;
3336 		goto err_binder_alloc_buf_failed;
3337 	}
3338 	if (lsmctx.context) {
3339 		int err;
3340 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3341 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3342 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3343 				    ALIGN(lsmctx.len, sizeof(u64));
3344 
3345 		t->security_ctx = t->buffer->user_data + buf_offset;
3346 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3347 						  t->buffer, buf_offset,
3348 						  lsmctx.context, lsmctx.len);
3349 		if (err) {
3350 			t->security_ctx = 0;
3351 			WARN_ON(1);
3352 		}
3353 		security_release_secctx(&lsmctx);
3354 		lsmctx.context = NULL;
3355 	}
3356 	t->buffer->debug_id = t->debug_id;
3357 	t->buffer->transaction = t;
3358 	t->buffer->target_node = target_node;
3359 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3360 	trace_binder_transaction_alloc_buf(t->buffer);
3361 
3362 	if (binder_alloc_copy_user_to_buffer(
3363 				&target_proc->alloc,
3364 				t->buffer,
3365 				ALIGN(tr->data_size, sizeof(void *)),
3366 				(const void __user *)
3367 					(uintptr_t)tr->data.ptr.offsets,
3368 				tr->offsets_size)) {
3369 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3370 				proc->pid, thread->pid);
3371 		return_error = BR_FAILED_REPLY;
3372 		return_error_param = -EFAULT;
3373 		return_error_line = __LINE__;
3374 		goto err_copy_data_failed;
3375 	}
3376 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3377 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3378 				proc->pid, thread->pid, (u64)tr->offsets_size);
3379 		return_error = BR_FAILED_REPLY;
3380 		return_error_param = -EINVAL;
3381 		return_error_line = __LINE__;
3382 		goto err_bad_offset;
3383 	}
3384 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3385 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3386 				  proc->pid, thread->pid,
3387 				  (u64)extra_buffers_size);
3388 		return_error = BR_FAILED_REPLY;
3389 		return_error_param = -EINVAL;
3390 		return_error_line = __LINE__;
3391 		goto err_bad_offset;
3392 	}
3393 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3394 	buffer_offset = off_start_offset;
3395 	off_end_offset = off_start_offset + tr->offsets_size;
3396 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3397 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3398 		ALIGN(lsmctx.len, sizeof(u64));
3399 	off_min = 0;
3400 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3401 	     buffer_offset += sizeof(binder_size_t)) {
3402 		struct binder_object_header *hdr;
3403 		size_t object_size;
3404 		struct binder_object object;
3405 		binder_size_t object_offset;
3406 		binder_size_t copy_size;
3407 
3408 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3409 						  &object_offset,
3410 						  t->buffer,
3411 						  buffer_offset,
3412 						  sizeof(object_offset))) {
3413 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3414 				thread->pid, proc->pid);
3415 			return_error = BR_FAILED_REPLY;
3416 			return_error_param = -EINVAL;
3417 			return_error_line = __LINE__;
3418 			goto err_bad_offset;
3419 		}
3420 
3421 		/*
3422 		 * Copy the source user buffer up to the next object
3423 		 * that will be processed.
3424 		 */
3425 		copy_size = object_offset - user_offset;
3426 		if (copy_size && (user_offset > object_offset ||
3427 				object_offset > tr->data_size ||
3428 				binder_alloc_copy_user_to_buffer(
3429 					&target_proc->alloc,
3430 					t->buffer, user_offset,
3431 					user_buffer + user_offset,
3432 					copy_size))) {
3433 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3434 					proc->pid, thread->pid);
3435 			return_error = BR_FAILED_REPLY;
3436 			return_error_param = -EFAULT;
3437 			return_error_line = __LINE__;
3438 			goto err_copy_data_failed;
3439 		}
3440 		object_size = binder_get_object(target_proc, user_buffer,
3441 				t->buffer, object_offset, &object);
3442 		if (object_size == 0 || object_offset < off_min) {
3443 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3444 					  proc->pid, thread->pid,
3445 					  (u64)object_offset,
3446 					  (u64)off_min,
3447 					  (u64)t->buffer->data_size);
3448 			return_error = BR_FAILED_REPLY;
3449 			return_error_param = -EINVAL;
3450 			return_error_line = __LINE__;
3451 			goto err_bad_offset;
3452 		}
3453 		/*
3454 		 * Set offset to the next buffer fragment to be
3455 		 * copied
3456 		 */
3457 		user_offset = object_offset + object_size;
3458 
3459 		hdr = &object.hdr;
3460 		off_min = object_offset + object_size;
3461 		switch (hdr->type) {
3462 		case BINDER_TYPE_BINDER:
3463 		case BINDER_TYPE_WEAK_BINDER: {
3464 			struct flat_binder_object *fp;
3465 
3466 			fp = to_flat_binder_object(hdr);
3467 			ret = binder_translate_binder(fp, t, thread);
3468 
3469 			if (ret < 0 ||
3470 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3471 							t->buffer,
3472 							object_offset,
3473 							fp, sizeof(*fp))) {
3474 				binder_txn_error("%d:%d translate binder failed\n",
3475 					thread->pid, proc->pid);
3476 				return_error = BR_FAILED_REPLY;
3477 				return_error_param = ret;
3478 				return_error_line = __LINE__;
3479 				goto err_translate_failed;
3480 			}
3481 		} break;
3482 		case BINDER_TYPE_HANDLE:
3483 		case BINDER_TYPE_WEAK_HANDLE: {
3484 			struct flat_binder_object *fp;
3485 
3486 			fp = to_flat_binder_object(hdr);
3487 			ret = binder_translate_handle(fp, t, thread);
3488 			if (ret < 0 ||
3489 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3490 							t->buffer,
3491 							object_offset,
3492 							fp, sizeof(*fp))) {
3493 				binder_txn_error("%d:%d translate handle failed\n",
3494 					thread->pid, proc->pid);
3495 				return_error = BR_FAILED_REPLY;
3496 				return_error_param = ret;
3497 				return_error_line = __LINE__;
3498 				goto err_translate_failed;
3499 			}
3500 		} break;
3501 
3502 		case BINDER_TYPE_FD: {
3503 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3504 			binder_size_t fd_offset = object_offset +
3505 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3506 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3507 						      thread, in_reply_to);
3508 
3509 			fp->pad_binder = 0;
3510 			if (ret < 0 ||
3511 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3512 							t->buffer,
3513 							object_offset,
3514 							fp, sizeof(*fp))) {
3515 				binder_txn_error("%d:%d translate fd failed\n",
3516 					thread->pid, proc->pid);
3517 				return_error = BR_FAILED_REPLY;
3518 				return_error_param = ret;
3519 				return_error_line = __LINE__;
3520 				goto err_translate_failed;
3521 			}
3522 		} break;
3523 		case BINDER_TYPE_FDA: {
3524 			struct binder_object ptr_object;
3525 			binder_size_t parent_offset;
3526 			struct binder_object user_object;
3527 			size_t user_parent_size;
3528 			struct binder_fd_array_object *fda =
3529 				to_binder_fd_array_object(hdr);
3530 			size_t num_valid = (buffer_offset - off_start_offset) /
3531 						sizeof(binder_size_t);
3532 			struct binder_buffer_object *parent =
3533 				binder_validate_ptr(target_proc, t->buffer,
3534 						    &ptr_object, fda->parent,
3535 						    off_start_offset,
3536 						    &parent_offset,
3537 						    num_valid);
3538 			if (!parent) {
3539 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3540 						  proc->pid, thread->pid);
3541 				return_error = BR_FAILED_REPLY;
3542 				return_error_param = -EINVAL;
3543 				return_error_line = __LINE__;
3544 				goto err_bad_parent;
3545 			}
3546 			if (!binder_validate_fixup(target_proc, t->buffer,
3547 						   off_start_offset,
3548 						   parent_offset,
3549 						   fda->parent_offset,
3550 						   last_fixup_obj_off,
3551 						   last_fixup_min_off)) {
3552 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3553 						  proc->pid, thread->pid);
3554 				return_error = BR_FAILED_REPLY;
3555 				return_error_param = -EINVAL;
3556 				return_error_line = __LINE__;
3557 				goto err_bad_parent;
3558 			}
3559 			/*
3560 			 * We need to read the user version of the parent
3561 			 * object to get the original user offset
3562 			 */
3563 			user_parent_size =
3564 				binder_get_object(proc, user_buffer, t->buffer,
3565 						  parent_offset, &user_object);
3566 			if (user_parent_size != sizeof(user_object.bbo)) {
3567 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3568 						  proc->pid, thread->pid,
3569 						  user_parent_size,
3570 						  sizeof(user_object.bbo));
3571 				return_error = BR_FAILED_REPLY;
3572 				return_error_param = -EINVAL;
3573 				return_error_line = __LINE__;
3574 				goto err_bad_parent;
3575 			}
3576 			ret = binder_translate_fd_array(&pf_head, fda,
3577 							user_buffer, parent,
3578 							&user_object.bbo, t,
3579 							thread, in_reply_to);
3580 			if (!ret)
3581 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3582 								  t->buffer,
3583 								  object_offset,
3584 								  fda, sizeof(*fda));
3585 			if (ret) {
3586 				binder_txn_error("%d:%d translate fd array failed\n",
3587 					thread->pid, proc->pid);
3588 				return_error = BR_FAILED_REPLY;
3589 				return_error_param = ret > 0 ? -EINVAL : ret;
3590 				return_error_line = __LINE__;
3591 				goto err_translate_failed;
3592 			}
3593 			last_fixup_obj_off = parent_offset;
3594 			last_fixup_min_off =
3595 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3596 		} break;
3597 		case BINDER_TYPE_PTR: {
3598 			struct binder_buffer_object *bp =
3599 				to_binder_buffer_object(hdr);
3600 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3601 			size_t num_valid;
3602 
3603 			if (bp->length > buf_left) {
3604 				binder_user_error("%d:%d got transaction with too large buffer\n",
3605 						  proc->pid, thread->pid);
3606 				return_error = BR_FAILED_REPLY;
3607 				return_error_param = -EINVAL;
3608 				return_error_line = __LINE__;
3609 				goto err_bad_offset;
3610 			}
3611 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3612 				(const void __user *)(uintptr_t)bp->buffer,
3613 				bp->length);
3614 			if (ret) {
3615 				binder_txn_error("%d:%d deferred copy failed\n",
3616 					thread->pid, proc->pid);
3617 				return_error = BR_FAILED_REPLY;
3618 				return_error_param = ret;
3619 				return_error_line = __LINE__;
3620 				goto err_translate_failed;
3621 			}
3622 			/* Fixup buffer pointer to target proc address space */
3623 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3624 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3625 
3626 			num_valid = (buffer_offset - off_start_offset) /
3627 					sizeof(binder_size_t);
3628 			ret = binder_fixup_parent(&pf_head, t,
3629 						  thread, bp,
3630 						  off_start_offset,
3631 						  num_valid,
3632 						  last_fixup_obj_off,
3633 						  last_fixup_min_off);
3634 			if (ret < 0 ||
3635 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3636 							t->buffer,
3637 							object_offset,
3638 							bp, sizeof(*bp))) {
3639 				binder_txn_error("%d:%d failed to fixup parent\n",
3640 					thread->pid, proc->pid);
3641 				return_error = BR_FAILED_REPLY;
3642 				return_error_param = ret;
3643 				return_error_line = __LINE__;
3644 				goto err_translate_failed;
3645 			}
3646 			last_fixup_obj_off = object_offset;
3647 			last_fixup_min_off = 0;
3648 		} break;
3649 		default:
3650 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3651 				proc->pid, thread->pid, hdr->type);
3652 			return_error = BR_FAILED_REPLY;
3653 			return_error_param = -EINVAL;
3654 			return_error_line = __LINE__;
3655 			goto err_bad_object_type;
3656 		}
3657 	}
3658 	/* Done processing objects, copy the rest of the buffer */
3659 	if (binder_alloc_copy_user_to_buffer(
3660 				&target_proc->alloc,
3661 				t->buffer, user_offset,
3662 				user_buffer + user_offset,
3663 				tr->data_size - user_offset)) {
3664 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3665 				proc->pid, thread->pid);
3666 		return_error = BR_FAILED_REPLY;
3667 		return_error_param = -EFAULT;
3668 		return_error_line = __LINE__;
3669 		goto err_copy_data_failed;
3670 	}
3671 
3672 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3673 					    &sgc_head, &pf_head);
3674 	if (ret) {
3675 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3676 				  proc->pid, thread->pid);
3677 		return_error = BR_FAILED_REPLY;
3678 		return_error_param = ret;
3679 		return_error_line = __LINE__;
3680 		goto err_copy_data_failed;
3681 	}
3682 	if (t->buffer->oneway_spam_suspect)
3683 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3684 	else
3685 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3686 	t->work.type = BINDER_WORK_TRANSACTION;
3687 
3688 	if (reply) {
3689 		binder_enqueue_thread_work(thread, tcomplete);
3690 		binder_inner_proc_lock(target_proc);
3691 		if (target_thread->is_dead) {
3692 			return_error = BR_DEAD_REPLY;
3693 			binder_inner_proc_unlock(target_proc);
3694 			goto err_dead_proc_or_thread;
3695 		}
3696 		BUG_ON(t->buffer->async_transaction != 0);
3697 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3698 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3699 		target_proc->outstanding_txns++;
3700 		binder_inner_proc_unlock(target_proc);
3701 		wake_up_interruptible_sync(&target_thread->wait);
3702 		binder_free_transaction(in_reply_to);
3703 	} else if (!(t->flags & TF_ONE_WAY)) {
3704 		BUG_ON(t->buffer->async_transaction != 0);
3705 		binder_inner_proc_lock(proc);
3706 		/*
3707 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3708 		 * userspace immediately; this allows the target process to
3709 		 * immediately start processing this transaction, reducing
3710 		 * latency. We will then return the TRANSACTION_COMPLETE when
3711 		 * the target replies (or there is an error).
3712 		 */
3713 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3714 		t->need_reply = 1;
3715 		t->from_parent = thread->transaction_stack;
3716 		thread->transaction_stack = t;
3717 		binder_inner_proc_unlock(proc);
3718 		return_error = binder_proc_transaction(t,
3719 				target_proc, target_thread);
3720 		if (return_error) {
3721 			binder_inner_proc_lock(proc);
3722 			binder_pop_transaction_ilocked(thread, t);
3723 			binder_inner_proc_unlock(proc);
3724 			goto err_dead_proc_or_thread;
3725 		}
3726 	} else {
3727 		BUG_ON(target_node == NULL);
3728 		BUG_ON(t->buffer->async_transaction != 1);
3729 		return_error = binder_proc_transaction(t, target_proc, NULL);
3730 		/*
3731 		 * Let the caller know when async transaction reaches a frozen
3732 		 * process and is put in a pending queue, waiting for the target
3733 		 * process to be unfrozen.
3734 		 */
3735 		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3736 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3737 		binder_enqueue_thread_work(thread, tcomplete);
3738 		if (return_error &&
3739 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3740 			goto err_dead_proc_or_thread;
3741 	}
3742 	if (target_thread)
3743 		binder_thread_dec_tmpref(target_thread);
3744 	binder_proc_dec_tmpref(target_proc);
3745 	if (target_node)
3746 		binder_dec_node_tmpref(target_node);
3747 	/*
3748 	 * write barrier to synchronize with initialization
3749 	 * of log entry
3750 	 */
3751 	smp_wmb();
3752 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3753 	return;
3754 
3755 err_dead_proc_or_thread:
3756 	binder_txn_error("%d:%d dead process or thread\n",
3757 		thread->pid, proc->pid);
3758 	return_error_line = __LINE__;
3759 	binder_dequeue_work(proc, tcomplete);
3760 err_translate_failed:
3761 err_bad_object_type:
3762 err_bad_offset:
3763 err_bad_parent:
3764 err_copy_data_failed:
3765 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3766 	binder_free_txn_fixups(t);
3767 	trace_binder_transaction_failed_buffer_release(t->buffer);
3768 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3769 					  buffer_offset, true);
3770 	if (target_node)
3771 		binder_dec_node_tmpref(target_node);
3772 	target_node = NULL;
3773 	t->buffer->transaction = NULL;
3774 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3775 err_binder_alloc_buf_failed:
3776 err_bad_extra_size:
3777 	if (lsmctx.context)
3778 		security_release_secctx(&lsmctx);
3779 err_get_secctx_failed:
3780 	kfree(tcomplete);
3781 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3782 err_alloc_tcomplete_failed:
3783 	if (trace_binder_txn_latency_free_enabled())
3784 		binder_txn_latency_free(t);
3785 	kfree(t);
3786 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3787 err_alloc_t_failed:
3788 err_bad_todo_list:
3789 err_bad_call_stack:
3790 err_empty_call_stack:
3791 err_dead_binder:
3792 err_invalid_target_handle:
3793 	if (target_node) {
3794 		binder_dec_node(target_node, 1, 0);
3795 		binder_dec_node_tmpref(target_node);
3796 	}
3797 
3798 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3799 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3800 		     proc->pid, thread->pid, reply ? "reply" :
3801 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3802 		     target_proc ? target_proc->pid : 0,
3803 		     target_thread ? target_thread->pid : 0,
3804 		     t_debug_id, return_error, return_error_param,
3805 		     tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3806 		     return_error_line);
3807 
3808 	if (target_thread)
3809 		binder_thread_dec_tmpref(target_thread);
3810 	if (target_proc)
3811 		binder_proc_dec_tmpref(target_proc);
3812 
3813 	{
3814 		struct binder_transaction_log_entry *fe;
3815 
3816 		e->return_error = return_error;
3817 		e->return_error_param = return_error_param;
3818 		e->return_error_line = return_error_line;
3819 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3820 		*fe = *e;
3821 		/*
3822 		 * write barrier to synchronize with initialization
3823 		 * of log entry
3824 		 */
3825 		smp_wmb();
3826 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3827 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3828 	}
3829 
3830 	BUG_ON(thread->return_error.cmd != BR_OK);
3831 	if (in_reply_to) {
3832 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3833 				return_error, return_error_param);
3834 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3835 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3836 		binder_send_failed_reply(in_reply_to, return_error);
3837 	} else {
3838 		binder_inner_proc_lock(proc);
3839 		binder_set_extended_error(&thread->ee, t_debug_id,
3840 				return_error, return_error_param);
3841 		binder_inner_proc_unlock(proc);
3842 		thread->return_error.cmd = return_error;
3843 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3844 	}
3845 }
3846 
3847 static int
3848 binder_request_freeze_notification(struct binder_proc *proc,
3849 				   struct binder_thread *thread,
3850 				   struct binder_handle_cookie *handle_cookie)
3851 {
3852 	struct binder_ref_freeze *freeze;
3853 	struct binder_ref *ref;
3854 
3855 	freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3856 	if (!freeze)
3857 		return -ENOMEM;
3858 	binder_proc_lock(proc);
3859 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3860 	if (!ref) {
3861 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3862 				  proc->pid, thread->pid, handle_cookie->handle);
3863 		binder_proc_unlock(proc);
3864 		kfree(freeze);
3865 		return -EINVAL;
3866 	}
3867 
3868 	binder_node_lock(ref->node);
3869 	if (ref->freeze) {
3870 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3871 				  proc->pid, thread->pid);
3872 		binder_node_unlock(ref->node);
3873 		binder_proc_unlock(proc);
3874 		kfree(freeze);
3875 		return -EINVAL;
3876 	}
3877 
3878 	binder_stats_created(BINDER_STAT_FREEZE);
3879 	INIT_LIST_HEAD(&freeze->work.entry);
3880 	freeze->cookie = handle_cookie->cookie;
3881 	freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3882 	ref->freeze = freeze;
3883 
3884 	if (ref->node->proc) {
3885 		binder_inner_proc_lock(ref->node->proc);
3886 		freeze->is_frozen = ref->node->proc->is_frozen;
3887 		binder_inner_proc_unlock(ref->node->proc);
3888 
3889 		binder_inner_proc_lock(proc);
3890 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3891 		binder_wakeup_proc_ilocked(proc);
3892 		binder_inner_proc_unlock(proc);
3893 	}
3894 
3895 	binder_node_unlock(ref->node);
3896 	binder_proc_unlock(proc);
3897 	return 0;
3898 }
3899 
3900 static int
3901 binder_clear_freeze_notification(struct binder_proc *proc,
3902 				 struct binder_thread *thread,
3903 				 struct binder_handle_cookie *handle_cookie)
3904 {
3905 	struct binder_ref_freeze *freeze;
3906 	struct binder_ref *ref;
3907 
3908 	binder_proc_lock(proc);
3909 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3910 	if (!ref) {
3911 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3912 				  proc->pid, thread->pid, handle_cookie->handle);
3913 		binder_proc_unlock(proc);
3914 		return -EINVAL;
3915 	}
3916 
3917 	binder_node_lock(ref->node);
3918 
3919 	if (!ref->freeze) {
3920 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3921 				  proc->pid, thread->pid);
3922 		binder_node_unlock(ref->node);
3923 		binder_proc_unlock(proc);
3924 		return -EINVAL;
3925 	}
3926 	freeze = ref->freeze;
3927 	binder_inner_proc_lock(proc);
3928 	if (freeze->cookie != handle_cookie->cookie) {
3929 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3930 				  proc->pid, thread->pid, (u64)freeze->cookie,
3931 				  (u64)handle_cookie->cookie);
3932 		binder_inner_proc_unlock(proc);
3933 		binder_node_unlock(ref->node);
3934 		binder_proc_unlock(proc);
3935 		return -EINVAL;
3936 	}
3937 	ref->freeze = NULL;
3938 	/*
3939 	 * Take the existing freeze object and overwrite its work type. There are three cases here:
3940 	 * 1. No pending notification. In this case just add the work to the queue.
3941 	 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3942 	 *    should resend with the new work type.
3943 	 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3944 	 *    needs to be done here.
3945 	 */
3946 	freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
3947 	if (list_empty(&freeze->work.entry)) {
3948 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3949 		binder_wakeup_proc_ilocked(proc);
3950 	} else if (freeze->sent) {
3951 		freeze->resend = true;
3952 	}
3953 	binder_inner_proc_unlock(proc);
3954 	binder_node_unlock(ref->node);
3955 	binder_proc_unlock(proc);
3956 	return 0;
3957 }
3958 
3959 static int
3960 binder_freeze_notification_done(struct binder_proc *proc,
3961 				struct binder_thread *thread,
3962 				binder_uintptr_t cookie)
3963 {
3964 	struct binder_ref_freeze *freeze = NULL;
3965 	struct binder_work *w;
3966 
3967 	binder_inner_proc_lock(proc);
3968 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
3969 		struct binder_ref_freeze *tmp_freeze =
3970 			container_of(w, struct binder_ref_freeze, work);
3971 
3972 		if (tmp_freeze->cookie == cookie) {
3973 			freeze = tmp_freeze;
3974 			break;
3975 		}
3976 	}
3977 	if (!freeze) {
3978 		binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3979 				  proc->pid, thread->pid, (u64)cookie);
3980 		binder_inner_proc_unlock(proc);
3981 		return -EINVAL;
3982 	}
3983 	binder_dequeue_work_ilocked(&freeze->work);
3984 	freeze->sent = false;
3985 	if (freeze->resend) {
3986 		freeze->resend = false;
3987 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3988 		binder_wakeup_proc_ilocked(proc);
3989 	}
3990 	binder_inner_proc_unlock(proc);
3991 	return 0;
3992 }
3993 
3994 /**
3995  * binder_free_buf() - free the specified buffer
3996  * @proc:	binder proc that owns buffer
3997  * @buffer:	buffer to be freed
3998  * @is_failure:	failed to send transaction
3999  *
4000  * If buffer for an async transaction, enqueue the next async
4001  * transaction from the node.
4002  *
4003  * Cleanup buffer and free it.
4004  */
4005 static void
4006 binder_free_buf(struct binder_proc *proc,
4007 		struct binder_thread *thread,
4008 		struct binder_buffer *buffer, bool is_failure)
4009 {
4010 	binder_inner_proc_lock(proc);
4011 	if (buffer->transaction) {
4012 		buffer->transaction->buffer = NULL;
4013 		buffer->transaction = NULL;
4014 	}
4015 	binder_inner_proc_unlock(proc);
4016 	if (buffer->async_transaction && buffer->target_node) {
4017 		struct binder_node *buf_node;
4018 		struct binder_work *w;
4019 
4020 		buf_node = buffer->target_node;
4021 		binder_node_inner_lock(buf_node);
4022 		BUG_ON(!buf_node->has_async_transaction);
4023 		BUG_ON(buf_node->proc != proc);
4024 		w = binder_dequeue_work_head_ilocked(
4025 				&buf_node->async_todo);
4026 		if (!w) {
4027 			buf_node->has_async_transaction = false;
4028 		} else {
4029 			binder_enqueue_work_ilocked(
4030 					w, &proc->todo);
4031 			binder_wakeup_proc_ilocked(proc);
4032 		}
4033 		binder_node_inner_unlock(buf_node);
4034 	}
4035 	trace_binder_transaction_buffer_release(buffer);
4036 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
4037 	binder_alloc_free_buf(&proc->alloc, buffer);
4038 }
4039 
4040 static int binder_thread_write(struct binder_proc *proc,
4041 			struct binder_thread *thread,
4042 			binder_uintptr_t binder_buffer, size_t size,
4043 			binder_size_t *consumed)
4044 {
4045 	uint32_t cmd;
4046 	struct binder_context *context = proc->context;
4047 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4048 	void __user *ptr = buffer + *consumed;
4049 	void __user *end = buffer + size;
4050 
4051 	while (ptr < end && thread->return_error.cmd == BR_OK) {
4052 		int ret;
4053 
4054 		if (get_user(cmd, (uint32_t __user *)ptr))
4055 			return -EFAULT;
4056 		ptr += sizeof(uint32_t);
4057 		trace_binder_command(cmd);
4058 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4059 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4060 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4061 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4062 		}
4063 		switch (cmd) {
4064 		case BC_INCREFS:
4065 		case BC_ACQUIRE:
4066 		case BC_RELEASE:
4067 		case BC_DECREFS: {
4068 			uint32_t target;
4069 			const char *debug_string;
4070 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4071 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4072 			struct binder_ref_data rdata;
4073 
4074 			if (get_user(target, (uint32_t __user *)ptr))
4075 				return -EFAULT;
4076 
4077 			ptr += sizeof(uint32_t);
4078 			ret = -1;
4079 			if (increment && !target) {
4080 				struct binder_node *ctx_mgr_node;
4081 
4082 				mutex_lock(&context->context_mgr_node_lock);
4083 				ctx_mgr_node = context->binder_context_mgr_node;
4084 				if (ctx_mgr_node) {
4085 					if (ctx_mgr_node->proc == proc) {
4086 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4087 								  proc->pid, thread->pid);
4088 						mutex_unlock(&context->context_mgr_node_lock);
4089 						return -EINVAL;
4090 					}
4091 					ret = binder_inc_ref_for_node(
4092 							proc, ctx_mgr_node,
4093 							strong, NULL, &rdata);
4094 				}
4095 				mutex_unlock(&context->context_mgr_node_lock);
4096 			}
4097 			if (ret)
4098 				ret = binder_update_ref_for_handle(
4099 						proc, target, increment, strong,
4100 						&rdata);
4101 			if (!ret && rdata.desc != target) {
4102 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4103 					proc->pid, thread->pid,
4104 					target, rdata.desc);
4105 			}
4106 			switch (cmd) {
4107 			case BC_INCREFS:
4108 				debug_string = "IncRefs";
4109 				break;
4110 			case BC_ACQUIRE:
4111 				debug_string = "Acquire";
4112 				break;
4113 			case BC_RELEASE:
4114 				debug_string = "Release";
4115 				break;
4116 			case BC_DECREFS:
4117 			default:
4118 				debug_string = "DecRefs";
4119 				break;
4120 			}
4121 			if (ret) {
4122 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4123 					proc->pid, thread->pid, debug_string,
4124 					strong, target, ret);
4125 				break;
4126 			}
4127 			binder_debug(BINDER_DEBUG_USER_REFS,
4128 				     "%d:%d %s ref %d desc %d s %d w %d\n",
4129 				     proc->pid, thread->pid, debug_string,
4130 				     rdata.debug_id, rdata.desc, rdata.strong,
4131 				     rdata.weak);
4132 			break;
4133 		}
4134 		case BC_INCREFS_DONE:
4135 		case BC_ACQUIRE_DONE: {
4136 			binder_uintptr_t node_ptr;
4137 			binder_uintptr_t cookie;
4138 			struct binder_node *node;
4139 			bool free_node;
4140 
4141 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4142 				return -EFAULT;
4143 			ptr += sizeof(binder_uintptr_t);
4144 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4145 				return -EFAULT;
4146 			ptr += sizeof(binder_uintptr_t);
4147 			node = binder_get_node(proc, node_ptr);
4148 			if (node == NULL) {
4149 				binder_user_error("%d:%d %s u%016llx no match\n",
4150 					proc->pid, thread->pid,
4151 					cmd == BC_INCREFS_DONE ?
4152 					"BC_INCREFS_DONE" :
4153 					"BC_ACQUIRE_DONE",
4154 					(u64)node_ptr);
4155 				break;
4156 			}
4157 			if (cookie != node->cookie) {
4158 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4159 					proc->pid, thread->pid,
4160 					cmd == BC_INCREFS_DONE ?
4161 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4162 					(u64)node_ptr, node->debug_id,
4163 					(u64)cookie, (u64)node->cookie);
4164 				binder_put_node(node);
4165 				break;
4166 			}
4167 			binder_node_inner_lock(node);
4168 			if (cmd == BC_ACQUIRE_DONE) {
4169 				if (node->pending_strong_ref == 0) {
4170 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4171 						proc->pid, thread->pid,
4172 						node->debug_id);
4173 					binder_node_inner_unlock(node);
4174 					binder_put_node(node);
4175 					break;
4176 				}
4177 				node->pending_strong_ref = 0;
4178 			} else {
4179 				if (node->pending_weak_ref == 0) {
4180 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4181 						proc->pid, thread->pid,
4182 						node->debug_id);
4183 					binder_node_inner_unlock(node);
4184 					binder_put_node(node);
4185 					break;
4186 				}
4187 				node->pending_weak_ref = 0;
4188 			}
4189 			free_node = binder_dec_node_nilocked(node,
4190 					cmd == BC_ACQUIRE_DONE, 0);
4191 			WARN_ON(free_node);
4192 			binder_debug(BINDER_DEBUG_USER_REFS,
4193 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4194 				     proc->pid, thread->pid,
4195 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4196 				     node->debug_id, node->local_strong_refs,
4197 				     node->local_weak_refs, node->tmp_refs);
4198 			binder_node_inner_unlock(node);
4199 			binder_put_node(node);
4200 			break;
4201 		}
4202 		case BC_ATTEMPT_ACQUIRE:
4203 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4204 			return -EINVAL;
4205 		case BC_ACQUIRE_RESULT:
4206 			pr_err("BC_ACQUIRE_RESULT not supported\n");
4207 			return -EINVAL;
4208 
4209 		case BC_FREE_BUFFER: {
4210 			binder_uintptr_t data_ptr;
4211 			struct binder_buffer *buffer;
4212 
4213 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4214 				return -EFAULT;
4215 			ptr += sizeof(binder_uintptr_t);
4216 
4217 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4218 							      data_ptr);
4219 			if (IS_ERR_OR_NULL(buffer)) {
4220 				if (PTR_ERR(buffer) == -EPERM) {
4221 					binder_user_error(
4222 						"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4223 						proc->pid, thread->pid,
4224 						(unsigned long)data_ptr - proc->alloc.vm_start);
4225 				} else {
4226 					binder_user_error(
4227 						"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4228 						proc->pid, thread->pid,
4229 						(unsigned long)data_ptr - proc->alloc.vm_start);
4230 				}
4231 				break;
4232 			}
4233 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4234 				     "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4235 				     proc->pid, thread->pid,
4236 				     (unsigned long)data_ptr - proc->alloc.vm_start,
4237 				     buffer->debug_id,
4238 				     buffer->transaction ? "active" : "finished");
4239 			binder_free_buf(proc, thread, buffer, false);
4240 			break;
4241 		}
4242 
4243 		case BC_TRANSACTION_SG:
4244 		case BC_REPLY_SG: {
4245 			struct binder_transaction_data_sg tr;
4246 
4247 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4248 				return -EFAULT;
4249 			ptr += sizeof(tr);
4250 			binder_transaction(proc, thread, &tr.transaction_data,
4251 					   cmd == BC_REPLY_SG, tr.buffers_size);
4252 			break;
4253 		}
4254 		case BC_TRANSACTION:
4255 		case BC_REPLY: {
4256 			struct binder_transaction_data tr;
4257 
4258 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4259 				return -EFAULT;
4260 			ptr += sizeof(tr);
4261 			binder_transaction(proc, thread, &tr,
4262 					   cmd == BC_REPLY, 0);
4263 			break;
4264 		}
4265 
4266 		case BC_REGISTER_LOOPER:
4267 			binder_debug(BINDER_DEBUG_THREADS,
4268 				     "%d:%d BC_REGISTER_LOOPER\n",
4269 				     proc->pid, thread->pid);
4270 			binder_inner_proc_lock(proc);
4271 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4272 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4273 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4274 					proc->pid, thread->pid);
4275 			} else if (proc->requested_threads == 0) {
4276 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4277 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4278 					proc->pid, thread->pid);
4279 			} else {
4280 				proc->requested_threads--;
4281 				proc->requested_threads_started++;
4282 			}
4283 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4284 			binder_inner_proc_unlock(proc);
4285 			break;
4286 		case BC_ENTER_LOOPER:
4287 			binder_debug(BINDER_DEBUG_THREADS,
4288 				     "%d:%d BC_ENTER_LOOPER\n",
4289 				     proc->pid, thread->pid);
4290 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4291 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4292 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4293 					proc->pid, thread->pid);
4294 			}
4295 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4296 			break;
4297 		case BC_EXIT_LOOPER:
4298 			binder_debug(BINDER_DEBUG_THREADS,
4299 				     "%d:%d BC_EXIT_LOOPER\n",
4300 				     proc->pid, thread->pid);
4301 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4302 			break;
4303 
4304 		case BC_REQUEST_DEATH_NOTIFICATION:
4305 		case BC_CLEAR_DEATH_NOTIFICATION: {
4306 			uint32_t target;
4307 			binder_uintptr_t cookie;
4308 			struct binder_ref *ref;
4309 			struct binder_ref_death *death = NULL;
4310 
4311 			if (get_user(target, (uint32_t __user *)ptr))
4312 				return -EFAULT;
4313 			ptr += sizeof(uint32_t);
4314 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4315 				return -EFAULT;
4316 			ptr += sizeof(binder_uintptr_t);
4317 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4318 				/*
4319 				 * Allocate memory for death notification
4320 				 * before taking lock
4321 				 */
4322 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4323 				if (death == NULL) {
4324 					WARN_ON(thread->return_error.cmd !=
4325 						BR_OK);
4326 					thread->return_error.cmd = BR_ERROR;
4327 					binder_enqueue_thread_work(
4328 						thread,
4329 						&thread->return_error.work);
4330 					binder_debug(
4331 						BINDER_DEBUG_FAILED_TRANSACTION,
4332 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4333 						proc->pid, thread->pid);
4334 					break;
4335 				}
4336 			}
4337 			binder_proc_lock(proc);
4338 			ref = binder_get_ref_olocked(proc, target, false);
4339 			if (ref == NULL) {
4340 				binder_user_error("%d:%d %s invalid ref %d\n",
4341 					proc->pid, thread->pid,
4342 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4343 					"BC_REQUEST_DEATH_NOTIFICATION" :
4344 					"BC_CLEAR_DEATH_NOTIFICATION",
4345 					target);
4346 				binder_proc_unlock(proc);
4347 				kfree(death);
4348 				break;
4349 			}
4350 
4351 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4352 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4353 				     proc->pid, thread->pid,
4354 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4355 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4356 				     "BC_CLEAR_DEATH_NOTIFICATION",
4357 				     (u64)cookie, ref->data.debug_id,
4358 				     ref->data.desc, ref->data.strong,
4359 				     ref->data.weak, ref->node->debug_id);
4360 
4361 			binder_node_lock(ref->node);
4362 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4363 				if (ref->death) {
4364 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4365 						proc->pid, thread->pid);
4366 					binder_node_unlock(ref->node);
4367 					binder_proc_unlock(proc);
4368 					kfree(death);
4369 					break;
4370 				}
4371 				binder_stats_created(BINDER_STAT_DEATH);
4372 				INIT_LIST_HEAD(&death->work.entry);
4373 				death->cookie = cookie;
4374 				ref->death = death;
4375 				if (ref->node->proc == NULL) {
4376 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4377 
4378 					binder_inner_proc_lock(proc);
4379 					binder_enqueue_work_ilocked(
4380 						&ref->death->work, &proc->todo);
4381 					binder_wakeup_proc_ilocked(proc);
4382 					binder_inner_proc_unlock(proc);
4383 				}
4384 			} else {
4385 				if (ref->death == NULL) {
4386 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4387 						proc->pid, thread->pid);
4388 					binder_node_unlock(ref->node);
4389 					binder_proc_unlock(proc);
4390 					break;
4391 				}
4392 				death = ref->death;
4393 				if (death->cookie != cookie) {
4394 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4395 						proc->pid, thread->pid,
4396 						(u64)death->cookie,
4397 						(u64)cookie);
4398 					binder_node_unlock(ref->node);
4399 					binder_proc_unlock(proc);
4400 					break;
4401 				}
4402 				ref->death = NULL;
4403 				binder_inner_proc_lock(proc);
4404 				if (list_empty(&death->work.entry)) {
4405 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4406 					if (thread->looper &
4407 					    (BINDER_LOOPER_STATE_REGISTERED |
4408 					     BINDER_LOOPER_STATE_ENTERED))
4409 						binder_enqueue_thread_work_ilocked(
4410 								thread,
4411 								&death->work);
4412 					else {
4413 						binder_enqueue_work_ilocked(
4414 								&death->work,
4415 								&proc->todo);
4416 						binder_wakeup_proc_ilocked(
4417 								proc);
4418 					}
4419 				} else {
4420 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4421 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4422 				}
4423 				binder_inner_proc_unlock(proc);
4424 			}
4425 			binder_node_unlock(ref->node);
4426 			binder_proc_unlock(proc);
4427 		} break;
4428 		case BC_DEAD_BINDER_DONE: {
4429 			struct binder_work *w;
4430 			binder_uintptr_t cookie;
4431 			struct binder_ref_death *death = NULL;
4432 
4433 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4434 				return -EFAULT;
4435 
4436 			ptr += sizeof(cookie);
4437 			binder_inner_proc_lock(proc);
4438 			list_for_each_entry(w, &proc->delivered_death,
4439 					    entry) {
4440 				struct binder_ref_death *tmp_death =
4441 					container_of(w,
4442 						     struct binder_ref_death,
4443 						     work);
4444 
4445 				if (tmp_death->cookie == cookie) {
4446 					death = tmp_death;
4447 					break;
4448 				}
4449 			}
4450 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4451 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4452 				     proc->pid, thread->pid, (u64)cookie,
4453 				     death);
4454 			if (death == NULL) {
4455 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4456 					proc->pid, thread->pid, (u64)cookie);
4457 				binder_inner_proc_unlock(proc);
4458 				break;
4459 			}
4460 			binder_dequeue_work_ilocked(&death->work);
4461 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4462 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4463 				if (thread->looper &
4464 					(BINDER_LOOPER_STATE_REGISTERED |
4465 					 BINDER_LOOPER_STATE_ENTERED))
4466 					binder_enqueue_thread_work_ilocked(
4467 						thread, &death->work);
4468 				else {
4469 					binder_enqueue_work_ilocked(
4470 							&death->work,
4471 							&proc->todo);
4472 					binder_wakeup_proc_ilocked(proc);
4473 				}
4474 			}
4475 			binder_inner_proc_unlock(proc);
4476 		} break;
4477 
4478 		case BC_REQUEST_FREEZE_NOTIFICATION: {
4479 			struct binder_handle_cookie handle_cookie;
4480 			int error;
4481 
4482 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4483 				return -EFAULT;
4484 			ptr += sizeof(handle_cookie);
4485 			error = binder_request_freeze_notification(proc, thread,
4486 								   &handle_cookie);
4487 			if (error)
4488 				return error;
4489 		} break;
4490 
4491 		case BC_CLEAR_FREEZE_NOTIFICATION: {
4492 			struct binder_handle_cookie handle_cookie;
4493 			int error;
4494 
4495 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4496 				return -EFAULT;
4497 			ptr += sizeof(handle_cookie);
4498 			error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4499 			if (error)
4500 				return error;
4501 		} break;
4502 
4503 		case BC_FREEZE_NOTIFICATION_DONE: {
4504 			binder_uintptr_t cookie;
4505 			int error;
4506 
4507 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4508 				return -EFAULT;
4509 
4510 			ptr += sizeof(cookie);
4511 			error = binder_freeze_notification_done(proc, thread, cookie);
4512 			if (error)
4513 				return error;
4514 		} break;
4515 
4516 		default:
4517 			pr_err("%d:%d unknown command %u\n",
4518 			       proc->pid, thread->pid, cmd);
4519 			return -EINVAL;
4520 		}
4521 		*consumed = ptr - buffer;
4522 	}
4523 	return 0;
4524 }
4525 
4526 static void binder_stat_br(struct binder_proc *proc,
4527 			   struct binder_thread *thread, uint32_t cmd)
4528 {
4529 	trace_binder_return(cmd);
4530 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4531 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4532 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4533 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4534 	}
4535 }
4536 
4537 static int binder_put_node_cmd(struct binder_proc *proc,
4538 			       struct binder_thread *thread,
4539 			       void __user **ptrp,
4540 			       binder_uintptr_t node_ptr,
4541 			       binder_uintptr_t node_cookie,
4542 			       int node_debug_id,
4543 			       uint32_t cmd, const char *cmd_name)
4544 {
4545 	void __user *ptr = *ptrp;
4546 
4547 	if (put_user(cmd, (uint32_t __user *)ptr))
4548 		return -EFAULT;
4549 	ptr += sizeof(uint32_t);
4550 
4551 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4552 		return -EFAULT;
4553 	ptr += sizeof(binder_uintptr_t);
4554 
4555 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4556 		return -EFAULT;
4557 	ptr += sizeof(binder_uintptr_t);
4558 
4559 	binder_stat_br(proc, thread, cmd);
4560 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4561 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4562 		     (u64)node_ptr, (u64)node_cookie);
4563 
4564 	*ptrp = ptr;
4565 	return 0;
4566 }
4567 
4568 static int binder_wait_for_work(struct binder_thread *thread,
4569 				bool do_proc_work)
4570 {
4571 	DEFINE_WAIT(wait);
4572 	struct binder_proc *proc = thread->proc;
4573 	int ret = 0;
4574 
4575 	binder_inner_proc_lock(proc);
4576 	for (;;) {
4577 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4578 		if (binder_has_work_ilocked(thread, do_proc_work))
4579 			break;
4580 		if (do_proc_work)
4581 			list_add(&thread->waiting_thread_node,
4582 				 &proc->waiting_threads);
4583 		binder_inner_proc_unlock(proc);
4584 		schedule();
4585 		binder_inner_proc_lock(proc);
4586 		list_del_init(&thread->waiting_thread_node);
4587 		if (signal_pending(current)) {
4588 			ret = -EINTR;
4589 			break;
4590 		}
4591 	}
4592 	finish_wait(&thread->wait, &wait);
4593 	binder_inner_proc_unlock(proc);
4594 
4595 	return ret;
4596 }
4597 
4598 /**
4599  * binder_apply_fd_fixups() - finish fd translation
4600  * @proc:         binder_proc associated @t->buffer
4601  * @t:	binder transaction with list of fd fixups
4602  *
4603  * Now that we are in the context of the transaction target
4604  * process, we can allocate and install fds. Process the
4605  * list of fds to translate and fixup the buffer with the
4606  * new fds first and only then install the files.
4607  *
4608  * If we fail to allocate an fd, skip the install and release
4609  * any fds that have already been allocated.
4610  */
4611 static int binder_apply_fd_fixups(struct binder_proc *proc,
4612 				  struct binder_transaction *t)
4613 {
4614 	struct binder_txn_fd_fixup *fixup, *tmp;
4615 	int ret = 0;
4616 
4617 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4618 		int fd = get_unused_fd_flags(O_CLOEXEC);
4619 
4620 		if (fd < 0) {
4621 			binder_debug(BINDER_DEBUG_TRANSACTION,
4622 				     "failed fd fixup txn %d fd %d\n",
4623 				     t->debug_id, fd);
4624 			ret = -ENOMEM;
4625 			goto err;
4626 		}
4627 		binder_debug(BINDER_DEBUG_TRANSACTION,
4628 			     "fd fixup txn %d fd %d\n",
4629 			     t->debug_id, fd);
4630 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4631 		fixup->target_fd = fd;
4632 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4633 						fixup->offset, &fd,
4634 						sizeof(u32))) {
4635 			ret = -EINVAL;
4636 			goto err;
4637 		}
4638 	}
4639 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4640 		fd_install(fixup->target_fd, fixup->file);
4641 		list_del(&fixup->fixup_entry);
4642 		kfree(fixup);
4643 	}
4644 
4645 	return ret;
4646 
4647 err:
4648 	binder_free_txn_fixups(t);
4649 	return ret;
4650 }
4651 
4652 static int binder_thread_read(struct binder_proc *proc,
4653 			      struct binder_thread *thread,
4654 			      binder_uintptr_t binder_buffer, size_t size,
4655 			      binder_size_t *consumed, int non_block)
4656 {
4657 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4658 	void __user *ptr = buffer + *consumed;
4659 	void __user *end = buffer + size;
4660 
4661 	int ret = 0;
4662 	int wait_for_proc_work;
4663 
4664 	if (*consumed == 0) {
4665 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4666 			return -EFAULT;
4667 		ptr += sizeof(uint32_t);
4668 	}
4669 
4670 retry:
4671 	binder_inner_proc_lock(proc);
4672 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4673 	binder_inner_proc_unlock(proc);
4674 
4675 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4676 
4677 	trace_binder_wait_for_work(wait_for_proc_work,
4678 				   !!thread->transaction_stack,
4679 				   !binder_worklist_empty(proc, &thread->todo));
4680 	if (wait_for_proc_work) {
4681 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4682 					BINDER_LOOPER_STATE_ENTERED))) {
4683 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4684 				proc->pid, thread->pid, thread->looper);
4685 			wait_event_interruptible(binder_user_error_wait,
4686 						 binder_stop_on_user_error < 2);
4687 		}
4688 		binder_set_nice(proc->default_priority);
4689 	}
4690 
4691 	if (non_block) {
4692 		if (!binder_has_work(thread, wait_for_proc_work))
4693 			ret = -EAGAIN;
4694 	} else {
4695 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4696 	}
4697 
4698 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4699 
4700 	if (ret)
4701 		return ret;
4702 
4703 	while (1) {
4704 		uint32_t cmd;
4705 		struct binder_transaction_data_secctx tr;
4706 		struct binder_transaction_data *trd = &tr.transaction_data;
4707 		struct binder_work *w = NULL;
4708 		struct list_head *list = NULL;
4709 		struct binder_transaction *t = NULL;
4710 		struct binder_thread *t_from;
4711 		size_t trsize = sizeof(*trd);
4712 
4713 		binder_inner_proc_lock(proc);
4714 		if (!binder_worklist_empty_ilocked(&thread->todo))
4715 			list = &thread->todo;
4716 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4717 			   wait_for_proc_work)
4718 			list = &proc->todo;
4719 		else {
4720 			binder_inner_proc_unlock(proc);
4721 
4722 			/* no data added */
4723 			if (ptr - buffer == 4 && !thread->looper_need_return)
4724 				goto retry;
4725 			break;
4726 		}
4727 
4728 		if (end - ptr < sizeof(tr) + 4) {
4729 			binder_inner_proc_unlock(proc);
4730 			break;
4731 		}
4732 		w = binder_dequeue_work_head_ilocked(list);
4733 		if (binder_worklist_empty_ilocked(&thread->todo))
4734 			thread->process_todo = false;
4735 
4736 		switch (w->type) {
4737 		case BINDER_WORK_TRANSACTION: {
4738 			binder_inner_proc_unlock(proc);
4739 			t = container_of(w, struct binder_transaction, work);
4740 		} break;
4741 		case BINDER_WORK_RETURN_ERROR: {
4742 			struct binder_error *e = container_of(
4743 					w, struct binder_error, work);
4744 
4745 			WARN_ON(e->cmd == BR_OK);
4746 			binder_inner_proc_unlock(proc);
4747 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4748 				return -EFAULT;
4749 			cmd = e->cmd;
4750 			e->cmd = BR_OK;
4751 			ptr += sizeof(uint32_t);
4752 
4753 			binder_stat_br(proc, thread, cmd);
4754 		} break;
4755 		case BINDER_WORK_TRANSACTION_COMPLETE:
4756 		case BINDER_WORK_TRANSACTION_PENDING:
4757 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4758 			if (proc->oneway_spam_detection_enabled &&
4759 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4760 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4761 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4762 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4763 			else
4764 				cmd = BR_TRANSACTION_COMPLETE;
4765 			binder_inner_proc_unlock(proc);
4766 			kfree(w);
4767 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4768 			if (put_user(cmd, (uint32_t __user *)ptr))
4769 				return -EFAULT;
4770 			ptr += sizeof(uint32_t);
4771 
4772 			binder_stat_br(proc, thread, cmd);
4773 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4774 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4775 				     proc->pid, thread->pid);
4776 		} break;
4777 		case BINDER_WORK_NODE: {
4778 			struct binder_node *node = container_of(w, struct binder_node, work);
4779 			int strong, weak;
4780 			binder_uintptr_t node_ptr = node->ptr;
4781 			binder_uintptr_t node_cookie = node->cookie;
4782 			int node_debug_id = node->debug_id;
4783 			int has_weak_ref;
4784 			int has_strong_ref;
4785 			void __user *orig_ptr = ptr;
4786 
4787 			BUG_ON(proc != node->proc);
4788 			strong = node->internal_strong_refs ||
4789 					node->local_strong_refs;
4790 			weak = !hlist_empty(&node->refs) ||
4791 					node->local_weak_refs ||
4792 					node->tmp_refs || strong;
4793 			has_strong_ref = node->has_strong_ref;
4794 			has_weak_ref = node->has_weak_ref;
4795 
4796 			if (weak && !has_weak_ref) {
4797 				node->has_weak_ref = 1;
4798 				node->pending_weak_ref = 1;
4799 				node->local_weak_refs++;
4800 			}
4801 			if (strong && !has_strong_ref) {
4802 				node->has_strong_ref = 1;
4803 				node->pending_strong_ref = 1;
4804 				node->local_strong_refs++;
4805 			}
4806 			if (!strong && has_strong_ref)
4807 				node->has_strong_ref = 0;
4808 			if (!weak && has_weak_ref)
4809 				node->has_weak_ref = 0;
4810 			if (!weak && !strong) {
4811 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4812 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4813 					     proc->pid, thread->pid,
4814 					     node_debug_id,
4815 					     (u64)node_ptr,
4816 					     (u64)node_cookie);
4817 				rb_erase(&node->rb_node, &proc->nodes);
4818 				binder_inner_proc_unlock(proc);
4819 				binder_node_lock(node);
4820 				/*
4821 				 * Acquire the node lock before freeing the
4822 				 * node to serialize with other threads that
4823 				 * may have been holding the node lock while
4824 				 * decrementing this node (avoids race where
4825 				 * this thread frees while the other thread
4826 				 * is unlocking the node after the final
4827 				 * decrement)
4828 				 */
4829 				binder_node_unlock(node);
4830 				binder_free_node(node);
4831 			} else
4832 				binder_inner_proc_unlock(proc);
4833 
4834 			if (weak && !has_weak_ref)
4835 				ret = binder_put_node_cmd(
4836 						proc, thread, &ptr, node_ptr,
4837 						node_cookie, node_debug_id,
4838 						BR_INCREFS, "BR_INCREFS");
4839 			if (!ret && strong && !has_strong_ref)
4840 				ret = binder_put_node_cmd(
4841 						proc, thread, &ptr, node_ptr,
4842 						node_cookie, node_debug_id,
4843 						BR_ACQUIRE, "BR_ACQUIRE");
4844 			if (!ret && !strong && has_strong_ref)
4845 				ret = binder_put_node_cmd(
4846 						proc, thread, &ptr, node_ptr,
4847 						node_cookie, node_debug_id,
4848 						BR_RELEASE, "BR_RELEASE");
4849 			if (!ret && !weak && has_weak_ref)
4850 				ret = binder_put_node_cmd(
4851 						proc, thread, &ptr, node_ptr,
4852 						node_cookie, node_debug_id,
4853 						BR_DECREFS, "BR_DECREFS");
4854 			if (orig_ptr == ptr)
4855 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4856 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4857 					     proc->pid, thread->pid,
4858 					     node_debug_id,
4859 					     (u64)node_ptr,
4860 					     (u64)node_cookie);
4861 			if (ret)
4862 				return ret;
4863 		} break;
4864 		case BINDER_WORK_DEAD_BINDER:
4865 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4866 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4867 			struct binder_ref_death *death;
4868 			uint32_t cmd;
4869 			binder_uintptr_t cookie;
4870 
4871 			death = container_of(w, struct binder_ref_death, work);
4872 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4873 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4874 			else
4875 				cmd = BR_DEAD_BINDER;
4876 			cookie = death->cookie;
4877 
4878 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4879 				     "%d:%d %s %016llx\n",
4880 				      proc->pid, thread->pid,
4881 				      cmd == BR_DEAD_BINDER ?
4882 				      "BR_DEAD_BINDER" :
4883 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4884 				      (u64)cookie);
4885 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4886 				binder_inner_proc_unlock(proc);
4887 				kfree(death);
4888 				binder_stats_deleted(BINDER_STAT_DEATH);
4889 			} else {
4890 				binder_enqueue_work_ilocked(
4891 						w, &proc->delivered_death);
4892 				binder_inner_proc_unlock(proc);
4893 			}
4894 			if (put_user(cmd, (uint32_t __user *)ptr))
4895 				return -EFAULT;
4896 			ptr += sizeof(uint32_t);
4897 			if (put_user(cookie,
4898 				     (binder_uintptr_t __user *)ptr))
4899 				return -EFAULT;
4900 			ptr += sizeof(binder_uintptr_t);
4901 			binder_stat_br(proc, thread, cmd);
4902 			if (cmd == BR_DEAD_BINDER)
4903 				goto done; /* DEAD_BINDER notifications can cause transactions */
4904 		} break;
4905 
4906 		case BINDER_WORK_FROZEN_BINDER: {
4907 			struct binder_ref_freeze *freeze;
4908 			struct binder_frozen_state_info info;
4909 
4910 			memset(&info, 0, sizeof(info));
4911 			freeze = container_of(w, struct binder_ref_freeze, work);
4912 			info.is_frozen = freeze->is_frozen;
4913 			info.cookie = freeze->cookie;
4914 			freeze->sent = true;
4915 			binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4916 			binder_inner_proc_unlock(proc);
4917 
4918 			if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4919 				return -EFAULT;
4920 			ptr += sizeof(uint32_t);
4921 			if (copy_to_user(ptr, &info, sizeof(info)))
4922 				return -EFAULT;
4923 			ptr += sizeof(info);
4924 			binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4925 			goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4926 		} break;
4927 
4928 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4929 			struct binder_ref_freeze *freeze =
4930 			    container_of(w, struct binder_ref_freeze, work);
4931 			binder_uintptr_t cookie = freeze->cookie;
4932 
4933 			binder_inner_proc_unlock(proc);
4934 			kfree(freeze);
4935 			binder_stats_deleted(BINDER_STAT_FREEZE);
4936 			if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4937 				return -EFAULT;
4938 			ptr += sizeof(uint32_t);
4939 			if (put_user(cookie, (binder_uintptr_t __user *)ptr))
4940 				return -EFAULT;
4941 			ptr += sizeof(binder_uintptr_t);
4942 			binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
4943 		} break;
4944 
4945 		default:
4946 			binder_inner_proc_unlock(proc);
4947 			pr_err("%d:%d: bad work type %d\n",
4948 			       proc->pid, thread->pid, w->type);
4949 			break;
4950 		}
4951 
4952 		if (!t)
4953 			continue;
4954 
4955 		BUG_ON(t->buffer == NULL);
4956 		if (t->buffer->target_node) {
4957 			struct binder_node *target_node = t->buffer->target_node;
4958 
4959 			trd->target.ptr = target_node->ptr;
4960 			trd->cookie =  target_node->cookie;
4961 			t->saved_priority = task_nice(current);
4962 			if (t->priority < target_node->min_priority &&
4963 			    !(t->flags & TF_ONE_WAY))
4964 				binder_set_nice(t->priority);
4965 			else if (!(t->flags & TF_ONE_WAY) ||
4966 				 t->saved_priority > target_node->min_priority)
4967 				binder_set_nice(target_node->min_priority);
4968 			cmd = BR_TRANSACTION;
4969 		} else {
4970 			trd->target.ptr = 0;
4971 			trd->cookie = 0;
4972 			cmd = BR_REPLY;
4973 		}
4974 		trd->code = t->code;
4975 		trd->flags = t->flags;
4976 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4977 
4978 		t_from = binder_get_txn_from(t);
4979 		if (t_from) {
4980 			struct task_struct *sender = t_from->proc->tsk;
4981 
4982 			trd->sender_pid =
4983 				task_tgid_nr_ns(sender,
4984 						task_active_pid_ns(current));
4985 		} else {
4986 			trd->sender_pid = 0;
4987 		}
4988 
4989 		ret = binder_apply_fd_fixups(proc, t);
4990 		if (ret) {
4991 			struct binder_buffer *buffer = t->buffer;
4992 			bool oneway = !!(t->flags & TF_ONE_WAY);
4993 			int tid = t->debug_id;
4994 
4995 			if (t_from)
4996 				binder_thread_dec_tmpref(t_from);
4997 			buffer->transaction = NULL;
4998 			binder_cleanup_transaction(t, "fd fixups failed",
4999 						   BR_FAILED_REPLY);
5000 			binder_free_buf(proc, thread, buffer, true);
5001 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5002 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5003 				     proc->pid, thread->pid,
5004 				     oneway ? "async " :
5005 					(cmd == BR_REPLY ? "reply " : ""),
5006 				     tid, BR_FAILED_REPLY, ret, __LINE__);
5007 			if (cmd == BR_REPLY) {
5008 				cmd = BR_FAILED_REPLY;
5009 				if (put_user(cmd, (uint32_t __user *)ptr))
5010 					return -EFAULT;
5011 				ptr += sizeof(uint32_t);
5012 				binder_stat_br(proc, thread, cmd);
5013 				break;
5014 			}
5015 			continue;
5016 		}
5017 		trd->data_size = t->buffer->data_size;
5018 		trd->offsets_size = t->buffer->offsets_size;
5019 		trd->data.ptr.buffer = t->buffer->user_data;
5020 		trd->data.ptr.offsets = trd->data.ptr.buffer +
5021 					ALIGN(t->buffer->data_size,
5022 					    sizeof(void *));
5023 
5024 		tr.secctx = t->security_ctx;
5025 		if (t->security_ctx) {
5026 			cmd = BR_TRANSACTION_SEC_CTX;
5027 			trsize = sizeof(tr);
5028 		}
5029 		if (put_user(cmd, (uint32_t __user *)ptr)) {
5030 			if (t_from)
5031 				binder_thread_dec_tmpref(t_from);
5032 
5033 			binder_cleanup_transaction(t, "put_user failed",
5034 						   BR_FAILED_REPLY);
5035 
5036 			return -EFAULT;
5037 		}
5038 		ptr += sizeof(uint32_t);
5039 		if (copy_to_user(ptr, &tr, trsize)) {
5040 			if (t_from)
5041 				binder_thread_dec_tmpref(t_from);
5042 
5043 			binder_cleanup_transaction(t, "copy_to_user failed",
5044 						   BR_FAILED_REPLY);
5045 
5046 			return -EFAULT;
5047 		}
5048 		ptr += trsize;
5049 
5050 		trace_binder_transaction_received(t);
5051 		binder_stat_br(proc, thread, cmd);
5052 		binder_debug(BINDER_DEBUG_TRANSACTION,
5053 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5054 			     proc->pid, thread->pid,
5055 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5056 				(cmd == BR_TRANSACTION_SEC_CTX) ?
5057 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5058 			     t->debug_id, t_from ? t_from->proc->pid : 0,
5059 			     t_from ? t_from->pid : 0, cmd,
5060 			     t->buffer->data_size, t->buffer->offsets_size);
5061 
5062 		if (t_from)
5063 			binder_thread_dec_tmpref(t_from);
5064 		t->buffer->allow_user_free = 1;
5065 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5066 			binder_inner_proc_lock(thread->proc);
5067 			t->to_parent = thread->transaction_stack;
5068 			t->to_thread = thread;
5069 			thread->transaction_stack = t;
5070 			binder_inner_proc_unlock(thread->proc);
5071 		} else {
5072 			binder_free_transaction(t);
5073 		}
5074 		break;
5075 	}
5076 
5077 done:
5078 
5079 	*consumed = ptr - buffer;
5080 	binder_inner_proc_lock(proc);
5081 	if (proc->requested_threads == 0 &&
5082 	    list_empty(&thread->proc->waiting_threads) &&
5083 	    proc->requested_threads_started < proc->max_threads &&
5084 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5085 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5086 	     /*spawn a new thread if we leave this out */) {
5087 		proc->requested_threads++;
5088 		binder_inner_proc_unlock(proc);
5089 		binder_debug(BINDER_DEBUG_THREADS,
5090 			     "%d:%d BR_SPAWN_LOOPER\n",
5091 			     proc->pid, thread->pid);
5092 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5093 			return -EFAULT;
5094 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5095 	} else
5096 		binder_inner_proc_unlock(proc);
5097 	return 0;
5098 }
5099 
5100 static void binder_release_work(struct binder_proc *proc,
5101 				struct list_head *list)
5102 {
5103 	struct binder_work *w;
5104 	enum binder_work_type wtype;
5105 
5106 	while (1) {
5107 		binder_inner_proc_lock(proc);
5108 		w = binder_dequeue_work_head_ilocked(list);
5109 		wtype = w ? w->type : 0;
5110 		binder_inner_proc_unlock(proc);
5111 		if (!w)
5112 			return;
5113 
5114 		switch (wtype) {
5115 		case BINDER_WORK_TRANSACTION: {
5116 			struct binder_transaction *t;
5117 
5118 			t = container_of(w, struct binder_transaction, work);
5119 
5120 			binder_cleanup_transaction(t, "process died.",
5121 						   BR_DEAD_REPLY);
5122 		} break;
5123 		case BINDER_WORK_RETURN_ERROR: {
5124 			struct binder_error *e = container_of(
5125 					w, struct binder_error, work);
5126 
5127 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5128 				"undelivered TRANSACTION_ERROR: %u\n",
5129 				e->cmd);
5130 		} break;
5131 		case BINDER_WORK_TRANSACTION_PENDING:
5132 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5133 		case BINDER_WORK_TRANSACTION_COMPLETE: {
5134 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5135 				"undelivered TRANSACTION_COMPLETE\n");
5136 			kfree(w);
5137 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5138 		} break;
5139 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5140 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5141 			struct binder_ref_death *death;
5142 
5143 			death = container_of(w, struct binder_ref_death, work);
5144 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5145 				"undelivered death notification, %016llx\n",
5146 				(u64)death->cookie);
5147 			kfree(death);
5148 			binder_stats_deleted(BINDER_STAT_DEATH);
5149 		} break;
5150 		case BINDER_WORK_NODE:
5151 			break;
5152 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5153 			struct binder_ref_freeze *freeze;
5154 
5155 			freeze = container_of(w, struct binder_ref_freeze, work);
5156 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5157 				     "undelivered freeze notification, %016llx\n",
5158 				     (u64)freeze->cookie);
5159 			kfree(freeze);
5160 			binder_stats_deleted(BINDER_STAT_FREEZE);
5161 		} break;
5162 		default:
5163 			pr_err("unexpected work type, %d, not freed\n",
5164 			       wtype);
5165 			break;
5166 		}
5167 	}
5168 
5169 }
5170 
5171 static struct binder_thread *binder_get_thread_ilocked(
5172 		struct binder_proc *proc, struct binder_thread *new_thread)
5173 {
5174 	struct binder_thread *thread = NULL;
5175 	struct rb_node *parent = NULL;
5176 	struct rb_node **p = &proc->threads.rb_node;
5177 
5178 	while (*p) {
5179 		parent = *p;
5180 		thread = rb_entry(parent, struct binder_thread, rb_node);
5181 
5182 		if (current->pid < thread->pid)
5183 			p = &(*p)->rb_left;
5184 		else if (current->pid > thread->pid)
5185 			p = &(*p)->rb_right;
5186 		else
5187 			return thread;
5188 	}
5189 	if (!new_thread)
5190 		return NULL;
5191 	thread = new_thread;
5192 	binder_stats_created(BINDER_STAT_THREAD);
5193 	thread->proc = proc;
5194 	thread->pid = current->pid;
5195 	atomic_set(&thread->tmp_ref, 0);
5196 	init_waitqueue_head(&thread->wait);
5197 	INIT_LIST_HEAD(&thread->todo);
5198 	rb_link_node(&thread->rb_node, parent, p);
5199 	rb_insert_color(&thread->rb_node, &proc->threads);
5200 	thread->looper_need_return = true;
5201 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5202 	thread->return_error.cmd = BR_OK;
5203 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5204 	thread->reply_error.cmd = BR_OK;
5205 	thread->ee.command = BR_OK;
5206 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5207 	return thread;
5208 }
5209 
5210 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5211 {
5212 	struct binder_thread *thread;
5213 	struct binder_thread *new_thread;
5214 
5215 	binder_inner_proc_lock(proc);
5216 	thread = binder_get_thread_ilocked(proc, NULL);
5217 	binder_inner_proc_unlock(proc);
5218 	if (!thread) {
5219 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5220 		if (new_thread == NULL)
5221 			return NULL;
5222 		binder_inner_proc_lock(proc);
5223 		thread = binder_get_thread_ilocked(proc, new_thread);
5224 		binder_inner_proc_unlock(proc);
5225 		if (thread != new_thread)
5226 			kfree(new_thread);
5227 	}
5228 	return thread;
5229 }
5230 
5231 static void binder_free_proc(struct binder_proc *proc)
5232 {
5233 	struct binder_device *device;
5234 
5235 	BUG_ON(!list_empty(&proc->todo));
5236 	BUG_ON(!list_empty(&proc->delivered_death));
5237 	if (proc->outstanding_txns)
5238 		pr_warn("%s: Unexpected outstanding_txns %d\n",
5239 			__func__, proc->outstanding_txns);
5240 	device = container_of(proc->context, struct binder_device, context);
5241 	if (refcount_dec_and_test(&device->ref)) {
5242 		kfree(proc->context->name);
5243 		kfree(device);
5244 	}
5245 	binder_alloc_deferred_release(&proc->alloc);
5246 	put_task_struct(proc->tsk);
5247 	put_cred(proc->cred);
5248 	binder_stats_deleted(BINDER_STAT_PROC);
5249 	dbitmap_free(&proc->dmap);
5250 	kfree(proc);
5251 }
5252 
5253 static void binder_free_thread(struct binder_thread *thread)
5254 {
5255 	BUG_ON(!list_empty(&thread->todo));
5256 	binder_stats_deleted(BINDER_STAT_THREAD);
5257 	binder_proc_dec_tmpref(thread->proc);
5258 	kfree(thread);
5259 }
5260 
5261 static int binder_thread_release(struct binder_proc *proc,
5262 				 struct binder_thread *thread)
5263 {
5264 	struct binder_transaction *t;
5265 	struct binder_transaction *send_reply = NULL;
5266 	int active_transactions = 0;
5267 	struct binder_transaction *last_t = NULL;
5268 
5269 	binder_inner_proc_lock(thread->proc);
5270 	/*
5271 	 * take a ref on the proc so it survives
5272 	 * after we remove this thread from proc->threads.
5273 	 * The corresponding dec is when we actually
5274 	 * free the thread in binder_free_thread()
5275 	 */
5276 	proc->tmp_ref++;
5277 	/*
5278 	 * take a ref on this thread to ensure it
5279 	 * survives while we are releasing it
5280 	 */
5281 	atomic_inc(&thread->tmp_ref);
5282 	rb_erase(&thread->rb_node, &proc->threads);
5283 	t = thread->transaction_stack;
5284 	if (t) {
5285 		spin_lock(&t->lock);
5286 		if (t->to_thread == thread)
5287 			send_reply = t;
5288 	} else {
5289 		__acquire(&t->lock);
5290 	}
5291 	thread->is_dead = true;
5292 
5293 	while (t) {
5294 		last_t = t;
5295 		active_transactions++;
5296 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5297 			     "release %d:%d transaction %d %s, still active\n",
5298 			      proc->pid, thread->pid,
5299 			     t->debug_id,
5300 			     (t->to_thread == thread) ? "in" : "out");
5301 
5302 		if (t->to_thread == thread) {
5303 			thread->proc->outstanding_txns--;
5304 			t->to_proc = NULL;
5305 			t->to_thread = NULL;
5306 			if (t->buffer) {
5307 				t->buffer->transaction = NULL;
5308 				t->buffer = NULL;
5309 			}
5310 			t = t->to_parent;
5311 		} else if (t->from == thread) {
5312 			t->from = NULL;
5313 			t = t->from_parent;
5314 		} else
5315 			BUG();
5316 		spin_unlock(&last_t->lock);
5317 		if (t)
5318 			spin_lock(&t->lock);
5319 		else
5320 			__acquire(&t->lock);
5321 	}
5322 	/* annotation for sparse, lock not acquired in last iteration above */
5323 	__release(&t->lock);
5324 
5325 	/*
5326 	 * If this thread used poll, make sure we remove the waitqueue from any
5327 	 * poll data structures holding it.
5328 	 */
5329 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5330 		wake_up_pollfree(&thread->wait);
5331 
5332 	binder_inner_proc_unlock(thread->proc);
5333 
5334 	/*
5335 	 * This is needed to avoid races between wake_up_pollfree() above and
5336 	 * someone else removing the last entry from the queue for other reasons
5337 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5338 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5339 	 * we can be sure they're done after we call synchronize_rcu().
5340 	 */
5341 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5342 		synchronize_rcu();
5343 
5344 	if (send_reply)
5345 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5346 	binder_release_work(proc, &thread->todo);
5347 	binder_thread_dec_tmpref(thread);
5348 	return active_transactions;
5349 }
5350 
5351 static __poll_t binder_poll(struct file *filp,
5352 				struct poll_table_struct *wait)
5353 {
5354 	struct binder_proc *proc = filp->private_data;
5355 	struct binder_thread *thread = NULL;
5356 	bool wait_for_proc_work;
5357 
5358 	thread = binder_get_thread(proc);
5359 	if (!thread)
5360 		return EPOLLERR;
5361 
5362 	binder_inner_proc_lock(thread->proc);
5363 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5364 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5365 
5366 	binder_inner_proc_unlock(thread->proc);
5367 
5368 	poll_wait(filp, &thread->wait, wait);
5369 
5370 	if (binder_has_work(thread, wait_for_proc_work))
5371 		return EPOLLIN;
5372 
5373 	return 0;
5374 }
5375 
5376 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5377 				struct binder_thread *thread)
5378 {
5379 	int ret = 0;
5380 	struct binder_proc *proc = filp->private_data;
5381 	void __user *ubuf = (void __user *)arg;
5382 	struct binder_write_read bwr;
5383 
5384 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5385 		ret = -EFAULT;
5386 		goto out;
5387 	}
5388 	binder_debug(BINDER_DEBUG_READ_WRITE,
5389 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5390 		     proc->pid, thread->pid,
5391 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5392 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5393 
5394 	if (bwr.write_size > 0) {
5395 		ret = binder_thread_write(proc, thread,
5396 					  bwr.write_buffer,
5397 					  bwr.write_size,
5398 					  &bwr.write_consumed);
5399 		trace_binder_write_done(ret);
5400 		if (ret < 0) {
5401 			bwr.read_consumed = 0;
5402 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5403 				ret = -EFAULT;
5404 			goto out;
5405 		}
5406 	}
5407 	if (bwr.read_size > 0) {
5408 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5409 					 bwr.read_size,
5410 					 &bwr.read_consumed,
5411 					 filp->f_flags & O_NONBLOCK);
5412 		trace_binder_read_done(ret);
5413 		binder_inner_proc_lock(proc);
5414 		if (!binder_worklist_empty_ilocked(&proc->todo))
5415 			binder_wakeup_proc_ilocked(proc);
5416 		binder_inner_proc_unlock(proc);
5417 		if (ret < 0) {
5418 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5419 				ret = -EFAULT;
5420 			goto out;
5421 		}
5422 	}
5423 	binder_debug(BINDER_DEBUG_READ_WRITE,
5424 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5425 		     proc->pid, thread->pid,
5426 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5427 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5428 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5429 		ret = -EFAULT;
5430 		goto out;
5431 	}
5432 out:
5433 	return ret;
5434 }
5435 
5436 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5437 				    struct flat_binder_object *fbo)
5438 {
5439 	int ret = 0;
5440 	struct binder_proc *proc = filp->private_data;
5441 	struct binder_context *context = proc->context;
5442 	struct binder_node *new_node;
5443 	kuid_t curr_euid = current_euid();
5444 
5445 	mutex_lock(&context->context_mgr_node_lock);
5446 	if (context->binder_context_mgr_node) {
5447 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5448 		ret = -EBUSY;
5449 		goto out;
5450 	}
5451 	ret = security_binder_set_context_mgr(proc->cred);
5452 	if (ret < 0)
5453 		goto out;
5454 	if (uid_valid(context->binder_context_mgr_uid)) {
5455 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5456 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5457 			       from_kuid(&init_user_ns, curr_euid),
5458 			       from_kuid(&init_user_ns,
5459 					 context->binder_context_mgr_uid));
5460 			ret = -EPERM;
5461 			goto out;
5462 		}
5463 	} else {
5464 		context->binder_context_mgr_uid = curr_euid;
5465 	}
5466 	new_node = binder_new_node(proc, fbo);
5467 	if (!new_node) {
5468 		ret = -ENOMEM;
5469 		goto out;
5470 	}
5471 	binder_node_lock(new_node);
5472 	new_node->local_weak_refs++;
5473 	new_node->local_strong_refs++;
5474 	new_node->has_strong_ref = 1;
5475 	new_node->has_weak_ref = 1;
5476 	context->binder_context_mgr_node = new_node;
5477 	binder_node_unlock(new_node);
5478 	binder_put_node(new_node);
5479 out:
5480 	mutex_unlock(&context->context_mgr_node_lock);
5481 	return ret;
5482 }
5483 
5484 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5485 		struct binder_node_info_for_ref *info)
5486 {
5487 	struct binder_node *node;
5488 	struct binder_context *context = proc->context;
5489 	__u32 handle = info->handle;
5490 
5491 	if (info->strong_count || info->weak_count || info->reserved1 ||
5492 	    info->reserved2 || info->reserved3) {
5493 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5494 				  proc->pid);
5495 		return -EINVAL;
5496 	}
5497 
5498 	/* This ioctl may only be used by the context manager */
5499 	mutex_lock(&context->context_mgr_node_lock);
5500 	if (!context->binder_context_mgr_node ||
5501 		context->binder_context_mgr_node->proc != proc) {
5502 		mutex_unlock(&context->context_mgr_node_lock);
5503 		return -EPERM;
5504 	}
5505 	mutex_unlock(&context->context_mgr_node_lock);
5506 
5507 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5508 	if (!node)
5509 		return -EINVAL;
5510 
5511 	info->strong_count = node->local_strong_refs +
5512 		node->internal_strong_refs;
5513 	info->weak_count = node->local_weak_refs;
5514 
5515 	binder_put_node(node);
5516 
5517 	return 0;
5518 }
5519 
5520 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5521 				struct binder_node_debug_info *info)
5522 {
5523 	struct rb_node *n;
5524 	binder_uintptr_t ptr = info->ptr;
5525 
5526 	memset(info, 0, sizeof(*info));
5527 
5528 	binder_inner_proc_lock(proc);
5529 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5530 		struct binder_node *node = rb_entry(n, struct binder_node,
5531 						    rb_node);
5532 		if (node->ptr > ptr) {
5533 			info->ptr = node->ptr;
5534 			info->cookie = node->cookie;
5535 			info->has_strong_ref = node->has_strong_ref;
5536 			info->has_weak_ref = node->has_weak_ref;
5537 			break;
5538 		}
5539 	}
5540 	binder_inner_proc_unlock(proc);
5541 
5542 	return 0;
5543 }
5544 
5545 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5546 {
5547 	struct rb_node *n;
5548 	struct binder_thread *thread;
5549 
5550 	if (proc->outstanding_txns > 0)
5551 		return true;
5552 
5553 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5554 		thread = rb_entry(n, struct binder_thread, rb_node);
5555 		if (thread->transaction_stack)
5556 			return true;
5557 	}
5558 	return false;
5559 }
5560 
5561 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5562 {
5563 	struct binder_node *prev = NULL;
5564 	struct rb_node *n;
5565 	struct binder_ref *ref;
5566 
5567 	binder_inner_proc_lock(proc);
5568 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5569 		struct binder_node *node;
5570 
5571 		node = rb_entry(n, struct binder_node, rb_node);
5572 		binder_inc_node_tmpref_ilocked(node);
5573 		binder_inner_proc_unlock(proc);
5574 		if (prev)
5575 			binder_put_node(prev);
5576 		binder_node_lock(node);
5577 		hlist_for_each_entry(ref, &node->refs, node_entry) {
5578 			/*
5579 			 * Need the node lock to synchronize
5580 			 * with new notification requests and the
5581 			 * inner lock to synchronize with queued
5582 			 * freeze notifications.
5583 			 */
5584 			binder_inner_proc_lock(ref->proc);
5585 			if (!ref->freeze) {
5586 				binder_inner_proc_unlock(ref->proc);
5587 				continue;
5588 			}
5589 			ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5590 			if (list_empty(&ref->freeze->work.entry)) {
5591 				ref->freeze->is_frozen = is_frozen;
5592 				binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5593 				binder_wakeup_proc_ilocked(ref->proc);
5594 			} else {
5595 				if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5596 					ref->freeze->resend = true;
5597 				ref->freeze->is_frozen = is_frozen;
5598 			}
5599 			binder_inner_proc_unlock(ref->proc);
5600 		}
5601 		prev = node;
5602 		binder_node_unlock(node);
5603 		binder_inner_proc_lock(proc);
5604 		if (proc->is_dead)
5605 			break;
5606 	}
5607 	binder_inner_proc_unlock(proc);
5608 	if (prev)
5609 		binder_put_node(prev);
5610 }
5611 
5612 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5613 			       struct binder_proc *target_proc)
5614 {
5615 	int ret = 0;
5616 
5617 	if (!info->enable) {
5618 		binder_inner_proc_lock(target_proc);
5619 		target_proc->sync_recv = false;
5620 		target_proc->async_recv = false;
5621 		target_proc->is_frozen = false;
5622 		binder_inner_proc_unlock(target_proc);
5623 		binder_add_freeze_work(target_proc, false);
5624 		return 0;
5625 	}
5626 
5627 	/*
5628 	 * Freezing the target. Prevent new transactions by
5629 	 * setting frozen state. If timeout specified, wait
5630 	 * for transactions to drain.
5631 	 */
5632 	binder_inner_proc_lock(target_proc);
5633 	target_proc->sync_recv = false;
5634 	target_proc->async_recv = false;
5635 	target_proc->is_frozen = true;
5636 	binder_inner_proc_unlock(target_proc);
5637 
5638 	if (info->timeout_ms > 0)
5639 		ret = wait_event_interruptible_timeout(
5640 			target_proc->freeze_wait,
5641 			(!target_proc->outstanding_txns),
5642 			msecs_to_jiffies(info->timeout_ms));
5643 
5644 	/* Check pending transactions that wait for reply */
5645 	if (ret >= 0) {
5646 		binder_inner_proc_lock(target_proc);
5647 		if (binder_txns_pending_ilocked(target_proc))
5648 			ret = -EAGAIN;
5649 		binder_inner_proc_unlock(target_proc);
5650 	}
5651 
5652 	if (ret < 0) {
5653 		binder_inner_proc_lock(target_proc);
5654 		target_proc->is_frozen = false;
5655 		binder_inner_proc_unlock(target_proc);
5656 	} else {
5657 		binder_add_freeze_work(target_proc, true);
5658 	}
5659 
5660 	return ret;
5661 }
5662 
5663 static int binder_ioctl_get_freezer_info(
5664 				struct binder_frozen_status_info *info)
5665 {
5666 	struct binder_proc *target_proc;
5667 	bool found = false;
5668 	__u32 txns_pending;
5669 
5670 	info->sync_recv = 0;
5671 	info->async_recv = 0;
5672 
5673 	mutex_lock(&binder_procs_lock);
5674 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5675 		if (target_proc->pid == info->pid) {
5676 			found = true;
5677 			binder_inner_proc_lock(target_proc);
5678 			txns_pending = binder_txns_pending_ilocked(target_proc);
5679 			info->sync_recv |= target_proc->sync_recv |
5680 					(txns_pending << 1);
5681 			info->async_recv |= target_proc->async_recv;
5682 			binder_inner_proc_unlock(target_proc);
5683 		}
5684 	}
5685 	mutex_unlock(&binder_procs_lock);
5686 
5687 	if (!found)
5688 		return -EINVAL;
5689 
5690 	return 0;
5691 }
5692 
5693 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5694 					   void __user *ubuf)
5695 {
5696 	struct binder_extended_error ee;
5697 
5698 	binder_inner_proc_lock(thread->proc);
5699 	ee = thread->ee;
5700 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5701 	binder_inner_proc_unlock(thread->proc);
5702 
5703 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5704 		return -EFAULT;
5705 
5706 	return 0;
5707 }
5708 
5709 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5710 {
5711 	int ret;
5712 	struct binder_proc *proc = filp->private_data;
5713 	struct binder_thread *thread;
5714 	void __user *ubuf = (void __user *)arg;
5715 
5716 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5717 			proc->pid, current->pid, cmd, arg);*/
5718 
5719 	binder_selftest_alloc(&proc->alloc);
5720 
5721 	trace_binder_ioctl(cmd, arg);
5722 
5723 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5724 	if (ret)
5725 		goto err_unlocked;
5726 
5727 	thread = binder_get_thread(proc);
5728 	if (thread == NULL) {
5729 		ret = -ENOMEM;
5730 		goto err;
5731 	}
5732 
5733 	switch (cmd) {
5734 	case BINDER_WRITE_READ:
5735 		ret = binder_ioctl_write_read(filp, arg, thread);
5736 		if (ret)
5737 			goto err;
5738 		break;
5739 	case BINDER_SET_MAX_THREADS: {
5740 		u32 max_threads;
5741 
5742 		if (copy_from_user(&max_threads, ubuf,
5743 				   sizeof(max_threads))) {
5744 			ret = -EINVAL;
5745 			goto err;
5746 		}
5747 		binder_inner_proc_lock(proc);
5748 		proc->max_threads = max_threads;
5749 		binder_inner_proc_unlock(proc);
5750 		break;
5751 	}
5752 	case BINDER_SET_CONTEXT_MGR_EXT: {
5753 		struct flat_binder_object fbo;
5754 
5755 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5756 			ret = -EINVAL;
5757 			goto err;
5758 		}
5759 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5760 		if (ret)
5761 			goto err;
5762 		break;
5763 	}
5764 	case BINDER_SET_CONTEXT_MGR:
5765 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5766 		if (ret)
5767 			goto err;
5768 		break;
5769 	case BINDER_THREAD_EXIT:
5770 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5771 			     proc->pid, thread->pid);
5772 		binder_thread_release(proc, thread);
5773 		thread = NULL;
5774 		break;
5775 	case BINDER_VERSION: {
5776 		struct binder_version __user *ver = ubuf;
5777 
5778 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5779 			     &ver->protocol_version)) {
5780 			ret = -EINVAL;
5781 			goto err;
5782 		}
5783 		break;
5784 	}
5785 	case BINDER_GET_NODE_INFO_FOR_REF: {
5786 		struct binder_node_info_for_ref info;
5787 
5788 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5789 			ret = -EFAULT;
5790 			goto err;
5791 		}
5792 
5793 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5794 		if (ret < 0)
5795 			goto err;
5796 
5797 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5798 			ret = -EFAULT;
5799 			goto err;
5800 		}
5801 
5802 		break;
5803 	}
5804 	case BINDER_GET_NODE_DEBUG_INFO: {
5805 		struct binder_node_debug_info info;
5806 
5807 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5808 			ret = -EFAULT;
5809 			goto err;
5810 		}
5811 
5812 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5813 		if (ret < 0)
5814 			goto err;
5815 
5816 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5817 			ret = -EFAULT;
5818 			goto err;
5819 		}
5820 		break;
5821 	}
5822 	case BINDER_FREEZE: {
5823 		struct binder_freeze_info info;
5824 		struct binder_proc **target_procs = NULL, *target_proc;
5825 		int target_procs_count = 0, i = 0;
5826 
5827 		ret = 0;
5828 
5829 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5830 			ret = -EFAULT;
5831 			goto err;
5832 		}
5833 
5834 		mutex_lock(&binder_procs_lock);
5835 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5836 			if (target_proc->pid == info.pid)
5837 				target_procs_count++;
5838 		}
5839 
5840 		if (target_procs_count == 0) {
5841 			mutex_unlock(&binder_procs_lock);
5842 			ret = -EINVAL;
5843 			goto err;
5844 		}
5845 
5846 		target_procs = kcalloc(target_procs_count,
5847 				       sizeof(struct binder_proc *),
5848 				       GFP_KERNEL);
5849 
5850 		if (!target_procs) {
5851 			mutex_unlock(&binder_procs_lock);
5852 			ret = -ENOMEM;
5853 			goto err;
5854 		}
5855 
5856 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5857 			if (target_proc->pid != info.pid)
5858 				continue;
5859 
5860 			binder_inner_proc_lock(target_proc);
5861 			target_proc->tmp_ref++;
5862 			binder_inner_proc_unlock(target_proc);
5863 
5864 			target_procs[i++] = target_proc;
5865 		}
5866 		mutex_unlock(&binder_procs_lock);
5867 
5868 		for (i = 0; i < target_procs_count; i++) {
5869 			if (ret >= 0)
5870 				ret = binder_ioctl_freeze(&info,
5871 							  target_procs[i]);
5872 
5873 			binder_proc_dec_tmpref(target_procs[i]);
5874 		}
5875 
5876 		kfree(target_procs);
5877 
5878 		if (ret < 0)
5879 			goto err;
5880 		break;
5881 	}
5882 	case BINDER_GET_FROZEN_INFO: {
5883 		struct binder_frozen_status_info info;
5884 
5885 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5886 			ret = -EFAULT;
5887 			goto err;
5888 		}
5889 
5890 		ret = binder_ioctl_get_freezer_info(&info);
5891 		if (ret < 0)
5892 			goto err;
5893 
5894 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5895 			ret = -EFAULT;
5896 			goto err;
5897 		}
5898 		break;
5899 	}
5900 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5901 		uint32_t enable;
5902 
5903 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5904 			ret = -EFAULT;
5905 			goto err;
5906 		}
5907 		binder_inner_proc_lock(proc);
5908 		proc->oneway_spam_detection_enabled = (bool)enable;
5909 		binder_inner_proc_unlock(proc);
5910 		break;
5911 	}
5912 	case BINDER_GET_EXTENDED_ERROR:
5913 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5914 		if (ret < 0)
5915 			goto err;
5916 		break;
5917 	default:
5918 		ret = -EINVAL;
5919 		goto err;
5920 	}
5921 	ret = 0;
5922 err:
5923 	if (thread)
5924 		thread->looper_need_return = false;
5925 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5926 	if (ret && ret != -EINTR)
5927 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5928 err_unlocked:
5929 	trace_binder_ioctl_done(ret);
5930 	return ret;
5931 }
5932 
5933 static void binder_vma_open(struct vm_area_struct *vma)
5934 {
5935 	struct binder_proc *proc = vma->vm_private_data;
5936 
5937 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5938 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5939 		     proc->pid, vma->vm_start, vma->vm_end,
5940 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5941 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5942 }
5943 
5944 static void binder_vma_close(struct vm_area_struct *vma)
5945 {
5946 	struct binder_proc *proc = vma->vm_private_data;
5947 
5948 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5949 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5950 		     proc->pid, vma->vm_start, vma->vm_end,
5951 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5952 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5953 	binder_alloc_vma_close(&proc->alloc);
5954 }
5955 
5956 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5957 {
5958 	return VM_FAULT_SIGBUS;
5959 }
5960 
5961 static const struct vm_operations_struct binder_vm_ops = {
5962 	.open = binder_vma_open,
5963 	.close = binder_vma_close,
5964 	.fault = binder_vm_fault,
5965 };
5966 
5967 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5968 {
5969 	struct binder_proc *proc = filp->private_data;
5970 
5971 	if (proc->tsk != current->group_leader)
5972 		return -EINVAL;
5973 
5974 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5975 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5976 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5977 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5978 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5979 
5980 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5981 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5982 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5983 		return -EPERM;
5984 	}
5985 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5986 
5987 	vma->vm_ops = &binder_vm_ops;
5988 	vma->vm_private_data = proc;
5989 
5990 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5991 }
5992 
5993 static int binder_open(struct inode *nodp, struct file *filp)
5994 {
5995 	struct binder_proc *proc, *itr;
5996 	struct binder_device *binder_dev;
5997 	struct binderfs_info *info;
5998 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5999 	bool existing_pid = false;
6000 
6001 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6002 		     current->group_leader->pid, current->pid);
6003 
6004 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6005 	if (proc == NULL)
6006 		return -ENOMEM;
6007 
6008 	dbitmap_init(&proc->dmap);
6009 	spin_lock_init(&proc->inner_lock);
6010 	spin_lock_init(&proc->outer_lock);
6011 	get_task_struct(current->group_leader);
6012 	proc->tsk = current->group_leader;
6013 	proc->cred = get_cred(filp->f_cred);
6014 	INIT_LIST_HEAD(&proc->todo);
6015 	init_waitqueue_head(&proc->freeze_wait);
6016 	proc->default_priority = task_nice(current);
6017 	/* binderfs stashes devices in i_private */
6018 	if (is_binderfs_device(nodp)) {
6019 		binder_dev = nodp->i_private;
6020 		info = nodp->i_sb->s_fs_info;
6021 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
6022 	} else {
6023 		binder_dev = container_of(filp->private_data,
6024 					  struct binder_device, miscdev);
6025 	}
6026 	refcount_inc(&binder_dev->ref);
6027 	proc->context = &binder_dev->context;
6028 	binder_alloc_init(&proc->alloc);
6029 
6030 	binder_stats_created(BINDER_STAT_PROC);
6031 	proc->pid = current->group_leader->pid;
6032 	INIT_LIST_HEAD(&proc->delivered_death);
6033 	INIT_LIST_HEAD(&proc->delivered_freeze);
6034 	INIT_LIST_HEAD(&proc->waiting_threads);
6035 	filp->private_data = proc;
6036 
6037 	mutex_lock(&binder_procs_lock);
6038 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6039 		if (itr->pid == proc->pid) {
6040 			existing_pid = true;
6041 			break;
6042 		}
6043 	}
6044 	hlist_add_head(&proc->proc_node, &binder_procs);
6045 	mutex_unlock(&binder_procs_lock);
6046 
6047 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
6048 		char strbuf[11];
6049 
6050 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6051 		/*
6052 		 * proc debug entries are shared between contexts.
6053 		 * Only create for the first PID to avoid debugfs log spamming
6054 		 * The printing code will anyway print all contexts for a given
6055 		 * PID so this is not a problem.
6056 		 */
6057 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6058 			binder_debugfs_dir_entry_proc,
6059 			(void *)(unsigned long)proc->pid,
6060 			&proc_fops);
6061 	}
6062 
6063 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
6064 		char strbuf[11];
6065 		struct dentry *binderfs_entry;
6066 
6067 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6068 		/*
6069 		 * Similar to debugfs, the process specific log file is shared
6070 		 * between contexts. Only create for the first PID.
6071 		 * This is ok since same as debugfs, the log file will contain
6072 		 * information on all contexts of a given PID.
6073 		 */
6074 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6075 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6076 		if (!IS_ERR(binderfs_entry)) {
6077 			proc->binderfs_entry = binderfs_entry;
6078 		} else {
6079 			int error;
6080 
6081 			error = PTR_ERR(binderfs_entry);
6082 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
6083 				strbuf, error);
6084 		}
6085 	}
6086 
6087 	return 0;
6088 }
6089 
6090 static int binder_flush(struct file *filp, fl_owner_t id)
6091 {
6092 	struct binder_proc *proc = filp->private_data;
6093 
6094 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6095 
6096 	return 0;
6097 }
6098 
6099 static void binder_deferred_flush(struct binder_proc *proc)
6100 {
6101 	struct rb_node *n;
6102 	int wake_count = 0;
6103 
6104 	binder_inner_proc_lock(proc);
6105 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6106 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6107 
6108 		thread->looper_need_return = true;
6109 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6110 			wake_up_interruptible(&thread->wait);
6111 			wake_count++;
6112 		}
6113 	}
6114 	binder_inner_proc_unlock(proc);
6115 
6116 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6117 		     "binder_flush: %d woke %d threads\n", proc->pid,
6118 		     wake_count);
6119 }
6120 
6121 static int binder_release(struct inode *nodp, struct file *filp)
6122 {
6123 	struct binder_proc *proc = filp->private_data;
6124 
6125 	debugfs_remove(proc->debugfs_entry);
6126 
6127 	if (proc->binderfs_entry) {
6128 		binderfs_remove_file(proc->binderfs_entry);
6129 		proc->binderfs_entry = NULL;
6130 	}
6131 
6132 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6133 
6134 	return 0;
6135 }
6136 
6137 static int binder_node_release(struct binder_node *node, int refs)
6138 {
6139 	struct binder_ref *ref;
6140 	int death = 0;
6141 	struct binder_proc *proc = node->proc;
6142 
6143 	binder_release_work(proc, &node->async_todo);
6144 
6145 	binder_node_lock(node);
6146 	binder_inner_proc_lock(proc);
6147 	binder_dequeue_work_ilocked(&node->work);
6148 	/*
6149 	 * The caller must have taken a temporary ref on the node,
6150 	 */
6151 	BUG_ON(!node->tmp_refs);
6152 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6153 		binder_inner_proc_unlock(proc);
6154 		binder_node_unlock(node);
6155 		binder_free_node(node);
6156 
6157 		return refs;
6158 	}
6159 
6160 	node->proc = NULL;
6161 	node->local_strong_refs = 0;
6162 	node->local_weak_refs = 0;
6163 	binder_inner_proc_unlock(proc);
6164 
6165 	spin_lock(&binder_dead_nodes_lock);
6166 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
6167 	spin_unlock(&binder_dead_nodes_lock);
6168 
6169 	hlist_for_each_entry(ref, &node->refs, node_entry) {
6170 		refs++;
6171 		/*
6172 		 * Need the node lock to synchronize
6173 		 * with new notification requests and the
6174 		 * inner lock to synchronize with queued
6175 		 * death notifications.
6176 		 */
6177 		binder_inner_proc_lock(ref->proc);
6178 		if (!ref->death) {
6179 			binder_inner_proc_unlock(ref->proc);
6180 			continue;
6181 		}
6182 
6183 		death++;
6184 
6185 		BUG_ON(!list_empty(&ref->death->work.entry));
6186 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6187 		binder_enqueue_work_ilocked(&ref->death->work,
6188 					    &ref->proc->todo);
6189 		binder_wakeup_proc_ilocked(ref->proc);
6190 		binder_inner_proc_unlock(ref->proc);
6191 	}
6192 
6193 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
6194 		     "node %d now dead, refs %d, death %d\n",
6195 		     node->debug_id, refs, death);
6196 	binder_node_unlock(node);
6197 	binder_put_node(node);
6198 
6199 	return refs;
6200 }
6201 
6202 static void binder_deferred_release(struct binder_proc *proc)
6203 {
6204 	struct binder_context *context = proc->context;
6205 	struct rb_node *n;
6206 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6207 
6208 	mutex_lock(&binder_procs_lock);
6209 	hlist_del(&proc->proc_node);
6210 	mutex_unlock(&binder_procs_lock);
6211 
6212 	mutex_lock(&context->context_mgr_node_lock);
6213 	if (context->binder_context_mgr_node &&
6214 	    context->binder_context_mgr_node->proc == proc) {
6215 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
6216 			     "%s: %d context_mgr_node gone\n",
6217 			     __func__, proc->pid);
6218 		context->binder_context_mgr_node = NULL;
6219 	}
6220 	mutex_unlock(&context->context_mgr_node_lock);
6221 	binder_inner_proc_lock(proc);
6222 	/*
6223 	 * Make sure proc stays alive after we
6224 	 * remove all the threads
6225 	 */
6226 	proc->tmp_ref++;
6227 
6228 	proc->is_dead = true;
6229 	proc->is_frozen = false;
6230 	proc->sync_recv = false;
6231 	proc->async_recv = false;
6232 	threads = 0;
6233 	active_transactions = 0;
6234 	while ((n = rb_first(&proc->threads))) {
6235 		struct binder_thread *thread;
6236 
6237 		thread = rb_entry(n, struct binder_thread, rb_node);
6238 		binder_inner_proc_unlock(proc);
6239 		threads++;
6240 		active_transactions += binder_thread_release(proc, thread);
6241 		binder_inner_proc_lock(proc);
6242 	}
6243 
6244 	nodes = 0;
6245 	incoming_refs = 0;
6246 	while ((n = rb_first(&proc->nodes))) {
6247 		struct binder_node *node;
6248 
6249 		node = rb_entry(n, struct binder_node, rb_node);
6250 		nodes++;
6251 		/*
6252 		 * take a temporary ref on the node before
6253 		 * calling binder_node_release() which will either
6254 		 * kfree() the node or call binder_put_node()
6255 		 */
6256 		binder_inc_node_tmpref_ilocked(node);
6257 		rb_erase(&node->rb_node, &proc->nodes);
6258 		binder_inner_proc_unlock(proc);
6259 		incoming_refs = binder_node_release(node, incoming_refs);
6260 		binder_inner_proc_lock(proc);
6261 	}
6262 	binder_inner_proc_unlock(proc);
6263 
6264 	outgoing_refs = 0;
6265 	binder_proc_lock(proc);
6266 	while ((n = rb_first(&proc->refs_by_desc))) {
6267 		struct binder_ref *ref;
6268 
6269 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
6270 		outgoing_refs++;
6271 		binder_cleanup_ref_olocked(ref);
6272 		binder_proc_unlock(proc);
6273 		binder_free_ref(ref);
6274 		binder_proc_lock(proc);
6275 	}
6276 	binder_proc_unlock(proc);
6277 
6278 	binder_release_work(proc, &proc->todo);
6279 	binder_release_work(proc, &proc->delivered_death);
6280 	binder_release_work(proc, &proc->delivered_freeze);
6281 
6282 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6283 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6284 		     __func__, proc->pid, threads, nodes, incoming_refs,
6285 		     outgoing_refs, active_transactions);
6286 
6287 	binder_proc_dec_tmpref(proc);
6288 }
6289 
6290 static void binder_deferred_func(struct work_struct *work)
6291 {
6292 	struct binder_proc *proc;
6293 
6294 	int defer;
6295 
6296 	do {
6297 		mutex_lock(&binder_deferred_lock);
6298 		if (!hlist_empty(&binder_deferred_list)) {
6299 			proc = hlist_entry(binder_deferred_list.first,
6300 					struct binder_proc, deferred_work_node);
6301 			hlist_del_init(&proc->deferred_work_node);
6302 			defer = proc->deferred_work;
6303 			proc->deferred_work = 0;
6304 		} else {
6305 			proc = NULL;
6306 			defer = 0;
6307 		}
6308 		mutex_unlock(&binder_deferred_lock);
6309 
6310 		if (defer & BINDER_DEFERRED_FLUSH)
6311 			binder_deferred_flush(proc);
6312 
6313 		if (defer & BINDER_DEFERRED_RELEASE)
6314 			binder_deferred_release(proc); /* frees proc */
6315 	} while (proc);
6316 }
6317 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6318 
6319 static void
6320 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6321 {
6322 	mutex_lock(&binder_deferred_lock);
6323 	proc->deferred_work |= defer;
6324 	if (hlist_unhashed(&proc->deferred_work_node)) {
6325 		hlist_add_head(&proc->deferred_work_node,
6326 				&binder_deferred_list);
6327 		schedule_work(&binder_deferred_work);
6328 	}
6329 	mutex_unlock(&binder_deferred_lock);
6330 }
6331 
6332 static void print_binder_transaction_ilocked(struct seq_file *m,
6333 					     struct binder_proc *proc,
6334 					     const char *prefix,
6335 					     struct binder_transaction *t)
6336 {
6337 	struct binder_proc *to_proc;
6338 	struct binder_buffer *buffer = t->buffer;
6339 	ktime_t current_time = ktime_get();
6340 
6341 	spin_lock(&t->lock);
6342 	to_proc = t->to_proc;
6343 	seq_printf(m,
6344 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6345 		   prefix, t->debug_id, t,
6346 		   t->from_pid,
6347 		   t->from_tid,
6348 		   to_proc ? to_proc->pid : 0,
6349 		   t->to_thread ? t->to_thread->pid : 0,
6350 		   t->code, t->flags, t->priority, t->need_reply,
6351 		   ktime_ms_delta(current_time, t->start_time));
6352 	spin_unlock(&t->lock);
6353 
6354 	if (proc != to_proc) {
6355 		/*
6356 		 * Can only safely deref buffer if we are holding the
6357 		 * correct proc inner lock for this node
6358 		 */
6359 		seq_puts(m, "\n");
6360 		return;
6361 	}
6362 
6363 	if (buffer == NULL) {
6364 		seq_puts(m, " buffer free\n");
6365 		return;
6366 	}
6367 	if (buffer->target_node)
6368 		seq_printf(m, " node %d", buffer->target_node->debug_id);
6369 	seq_printf(m, " size %zd:%zd offset %lx\n",
6370 		   buffer->data_size, buffer->offsets_size,
6371 		   buffer->user_data - proc->alloc.vm_start);
6372 }
6373 
6374 static void print_binder_work_ilocked(struct seq_file *m,
6375 				     struct binder_proc *proc,
6376 				     const char *prefix,
6377 				     const char *transaction_prefix,
6378 				     struct binder_work *w)
6379 {
6380 	struct binder_node *node;
6381 	struct binder_transaction *t;
6382 
6383 	switch (w->type) {
6384 	case BINDER_WORK_TRANSACTION:
6385 		t = container_of(w, struct binder_transaction, work);
6386 		print_binder_transaction_ilocked(
6387 				m, proc, transaction_prefix, t);
6388 		break;
6389 	case BINDER_WORK_RETURN_ERROR: {
6390 		struct binder_error *e = container_of(
6391 				w, struct binder_error, work);
6392 
6393 		seq_printf(m, "%stransaction error: %u\n",
6394 			   prefix, e->cmd);
6395 	} break;
6396 	case BINDER_WORK_TRANSACTION_COMPLETE:
6397 		seq_printf(m, "%stransaction complete\n", prefix);
6398 		break;
6399 	case BINDER_WORK_NODE:
6400 		node = container_of(w, struct binder_node, work);
6401 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6402 			   prefix, node->debug_id,
6403 			   (u64)node->ptr, (u64)node->cookie);
6404 		break;
6405 	case BINDER_WORK_DEAD_BINDER:
6406 		seq_printf(m, "%shas dead binder\n", prefix);
6407 		break;
6408 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6409 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6410 		break;
6411 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6412 		seq_printf(m, "%shas cleared death notification\n", prefix);
6413 		break;
6414 	case BINDER_WORK_FROZEN_BINDER:
6415 		seq_printf(m, "%shas frozen binder\n", prefix);
6416 		break;
6417 	case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6418 		seq_printf(m, "%shas cleared freeze notification\n", prefix);
6419 		break;
6420 	default:
6421 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6422 		break;
6423 	}
6424 }
6425 
6426 static void print_binder_thread_ilocked(struct seq_file *m,
6427 					struct binder_thread *thread,
6428 					int print_always)
6429 {
6430 	struct binder_transaction *t;
6431 	struct binder_work *w;
6432 	size_t start_pos = m->count;
6433 	size_t header_pos;
6434 
6435 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6436 			thread->pid, thread->looper,
6437 			thread->looper_need_return,
6438 			atomic_read(&thread->tmp_ref));
6439 	header_pos = m->count;
6440 	t = thread->transaction_stack;
6441 	while (t) {
6442 		if (t->from == thread) {
6443 			print_binder_transaction_ilocked(m, thread->proc,
6444 					"    outgoing transaction", t);
6445 			t = t->from_parent;
6446 		} else if (t->to_thread == thread) {
6447 			print_binder_transaction_ilocked(m, thread->proc,
6448 						 "    incoming transaction", t);
6449 			t = t->to_parent;
6450 		} else {
6451 			print_binder_transaction_ilocked(m, thread->proc,
6452 					"    bad transaction", t);
6453 			t = NULL;
6454 		}
6455 	}
6456 	list_for_each_entry(w, &thread->todo, entry) {
6457 		print_binder_work_ilocked(m, thread->proc, "    ",
6458 					  "    pending transaction", w);
6459 	}
6460 	if (!print_always && m->count == header_pos)
6461 		m->count = start_pos;
6462 }
6463 
6464 static void print_binder_node_nilocked(struct seq_file *m,
6465 				       struct binder_node *node)
6466 {
6467 	struct binder_ref *ref;
6468 	struct binder_work *w;
6469 	int count;
6470 
6471 	count = hlist_count_nodes(&node->refs);
6472 
6473 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6474 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
6475 		   node->has_strong_ref, node->has_weak_ref,
6476 		   node->local_strong_refs, node->local_weak_refs,
6477 		   node->internal_strong_refs, count, node->tmp_refs);
6478 	if (count) {
6479 		seq_puts(m, " proc");
6480 		hlist_for_each_entry(ref, &node->refs, node_entry)
6481 			seq_printf(m, " %d", ref->proc->pid);
6482 	}
6483 	seq_puts(m, "\n");
6484 	if (node->proc) {
6485 		list_for_each_entry(w, &node->async_todo, entry)
6486 			print_binder_work_ilocked(m, node->proc, "    ",
6487 					  "    pending async transaction", w);
6488 	}
6489 }
6490 
6491 static void print_binder_ref_olocked(struct seq_file *m,
6492 				     struct binder_ref *ref)
6493 {
6494 	binder_node_lock(ref->node);
6495 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6496 		   ref->data.debug_id, ref->data.desc,
6497 		   ref->node->proc ? "" : "dead ",
6498 		   ref->node->debug_id, ref->data.strong,
6499 		   ref->data.weak, ref->death);
6500 	binder_node_unlock(ref->node);
6501 }
6502 
6503 static void print_binder_proc(struct seq_file *m,
6504 			      struct binder_proc *proc, int print_all)
6505 {
6506 	struct binder_work *w;
6507 	struct rb_node *n;
6508 	size_t start_pos = m->count;
6509 	size_t header_pos;
6510 	struct binder_node *last_node = NULL;
6511 
6512 	seq_printf(m, "proc %d\n", proc->pid);
6513 	seq_printf(m, "context %s\n", proc->context->name);
6514 	header_pos = m->count;
6515 
6516 	binder_inner_proc_lock(proc);
6517 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6518 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6519 						rb_node), print_all);
6520 
6521 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6522 		struct binder_node *node = rb_entry(n, struct binder_node,
6523 						    rb_node);
6524 		if (!print_all && !node->has_async_transaction)
6525 			continue;
6526 
6527 		/*
6528 		 * take a temporary reference on the node so it
6529 		 * survives and isn't removed from the tree
6530 		 * while we print it.
6531 		 */
6532 		binder_inc_node_tmpref_ilocked(node);
6533 		/* Need to drop inner lock to take node lock */
6534 		binder_inner_proc_unlock(proc);
6535 		if (last_node)
6536 			binder_put_node(last_node);
6537 		binder_node_inner_lock(node);
6538 		print_binder_node_nilocked(m, node);
6539 		binder_node_inner_unlock(node);
6540 		last_node = node;
6541 		binder_inner_proc_lock(proc);
6542 	}
6543 	binder_inner_proc_unlock(proc);
6544 	if (last_node)
6545 		binder_put_node(last_node);
6546 
6547 	if (print_all) {
6548 		binder_proc_lock(proc);
6549 		for (n = rb_first(&proc->refs_by_desc);
6550 		     n != NULL;
6551 		     n = rb_next(n))
6552 			print_binder_ref_olocked(m, rb_entry(n,
6553 							    struct binder_ref,
6554 							    rb_node_desc));
6555 		binder_proc_unlock(proc);
6556 	}
6557 	binder_alloc_print_allocated(m, &proc->alloc);
6558 	binder_inner_proc_lock(proc);
6559 	list_for_each_entry(w, &proc->todo, entry)
6560 		print_binder_work_ilocked(m, proc, "  ",
6561 					  "  pending transaction", w);
6562 	list_for_each_entry(w, &proc->delivered_death, entry) {
6563 		seq_puts(m, "  has delivered dead binder\n");
6564 		break;
6565 	}
6566 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
6567 		seq_puts(m, "  has delivered freeze binder\n");
6568 		break;
6569 	}
6570 	binder_inner_proc_unlock(proc);
6571 	if (!print_all && m->count == header_pos)
6572 		m->count = start_pos;
6573 }
6574 
6575 static const char * const binder_return_strings[] = {
6576 	"BR_ERROR",
6577 	"BR_OK",
6578 	"BR_TRANSACTION",
6579 	"BR_REPLY",
6580 	"BR_ACQUIRE_RESULT",
6581 	"BR_DEAD_REPLY",
6582 	"BR_TRANSACTION_COMPLETE",
6583 	"BR_INCREFS",
6584 	"BR_ACQUIRE",
6585 	"BR_RELEASE",
6586 	"BR_DECREFS",
6587 	"BR_ATTEMPT_ACQUIRE",
6588 	"BR_NOOP",
6589 	"BR_SPAWN_LOOPER",
6590 	"BR_FINISHED",
6591 	"BR_DEAD_BINDER",
6592 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6593 	"BR_FAILED_REPLY",
6594 	"BR_FROZEN_REPLY",
6595 	"BR_ONEWAY_SPAM_SUSPECT",
6596 	"BR_TRANSACTION_PENDING_FROZEN",
6597 	"BR_FROZEN_BINDER",
6598 	"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6599 };
6600 
6601 static const char * const binder_command_strings[] = {
6602 	"BC_TRANSACTION",
6603 	"BC_REPLY",
6604 	"BC_ACQUIRE_RESULT",
6605 	"BC_FREE_BUFFER",
6606 	"BC_INCREFS",
6607 	"BC_ACQUIRE",
6608 	"BC_RELEASE",
6609 	"BC_DECREFS",
6610 	"BC_INCREFS_DONE",
6611 	"BC_ACQUIRE_DONE",
6612 	"BC_ATTEMPT_ACQUIRE",
6613 	"BC_REGISTER_LOOPER",
6614 	"BC_ENTER_LOOPER",
6615 	"BC_EXIT_LOOPER",
6616 	"BC_REQUEST_DEATH_NOTIFICATION",
6617 	"BC_CLEAR_DEATH_NOTIFICATION",
6618 	"BC_DEAD_BINDER_DONE",
6619 	"BC_TRANSACTION_SG",
6620 	"BC_REPLY_SG",
6621 	"BC_REQUEST_FREEZE_NOTIFICATION",
6622 	"BC_CLEAR_FREEZE_NOTIFICATION",
6623 	"BC_FREEZE_NOTIFICATION_DONE",
6624 };
6625 
6626 static const char * const binder_objstat_strings[] = {
6627 	"proc",
6628 	"thread",
6629 	"node",
6630 	"ref",
6631 	"death",
6632 	"transaction",
6633 	"transaction_complete",
6634 	"freeze",
6635 };
6636 
6637 static void print_binder_stats(struct seq_file *m, const char *prefix,
6638 			       struct binder_stats *stats)
6639 {
6640 	int i;
6641 
6642 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6643 		     ARRAY_SIZE(binder_command_strings));
6644 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6645 		int temp = atomic_read(&stats->bc[i]);
6646 
6647 		if (temp)
6648 			seq_printf(m, "%s%s: %d\n", prefix,
6649 				   binder_command_strings[i], temp);
6650 	}
6651 
6652 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6653 		     ARRAY_SIZE(binder_return_strings));
6654 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6655 		int temp = atomic_read(&stats->br[i]);
6656 
6657 		if (temp)
6658 			seq_printf(m, "%s%s: %d\n", prefix,
6659 				   binder_return_strings[i], temp);
6660 	}
6661 
6662 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6663 		     ARRAY_SIZE(binder_objstat_strings));
6664 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6665 		     ARRAY_SIZE(stats->obj_deleted));
6666 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6667 		int created = atomic_read(&stats->obj_created[i]);
6668 		int deleted = atomic_read(&stats->obj_deleted[i]);
6669 
6670 		if (created || deleted)
6671 			seq_printf(m, "%s%s: active %d total %d\n",
6672 				prefix,
6673 				binder_objstat_strings[i],
6674 				created - deleted,
6675 				created);
6676 	}
6677 }
6678 
6679 static void print_binder_proc_stats(struct seq_file *m,
6680 				    struct binder_proc *proc)
6681 {
6682 	struct binder_work *w;
6683 	struct binder_thread *thread;
6684 	struct rb_node *n;
6685 	int count, strong, weak, ready_threads;
6686 	size_t free_async_space =
6687 		binder_alloc_get_free_async_space(&proc->alloc);
6688 
6689 	seq_printf(m, "proc %d\n", proc->pid);
6690 	seq_printf(m, "context %s\n", proc->context->name);
6691 	count = 0;
6692 	ready_threads = 0;
6693 	binder_inner_proc_lock(proc);
6694 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6695 		count++;
6696 
6697 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6698 		ready_threads++;
6699 
6700 	seq_printf(m, "  threads: %d\n", count);
6701 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6702 			"  ready threads %d\n"
6703 			"  free async space %zd\n", proc->requested_threads,
6704 			proc->requested_threads_started, proc->max_threads,
6705 			ready_threads,
6706 			free_async_space);
6707 	count = 0;
6708 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6709 		count++;
6710 	binder_inner_proc_unlock(proc);
6711 	seq_printf(m, "  nodes: %d\n", count);
6712 	count = 0;
6713 	strong = 0;
6714 	weak = 0;
6715 	binder_proc_lock(proc);
6716 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6717 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6718 						  rb_node_desc);
6719 		count++;
6720 		strong += ref->data.strong;
6721 		weak += ref->data.weak;
6722 	}
6723 	binder_proc_unlock(proc);
6724 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6725 
6726 	count = binder_alloc_get_allocated_count(&proc->alloc);
6727 	seq_printf(m, "  buffers: %d\n", count);
6728 
6729 	binder_alloc_print_pages(m, &proc->alloc);
6730 
6731 	count = 0;
6732 	binder_inner_proc_lock(proc);
6733 	list_for_each_entry(w, &proc->todo, entry) {
6734 		if (w->type == BINDER_WORK_TRANSACTION)
6735 			count++;
6736 	}
6737 	binder_inner_proc_unlock(proc);
6738 	seq_printf(m, "  pending transactions: %d\n", count);
6739 
6740 	print_binder_stats(m, "  ", &proc->stats);
6741 }
6742 
6743 static int state_show(struct seq_file *m, void *unused)
6744 {
6745 	struct binder_proc *proc;
6746 	struct binder_node *node;
6747 	struct binder_node *last_node = NULL;
6748 
6749 	seq_puts(m, "binder state:\n");
6750 
6751 	spin_lock(&binder_dead_nodes_lock);
6752 	if (!hlist_empty(&binder_dead_nodes))
6753 		seq_puts(m, "dead nodes:\n");
6754 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6755 		/*
6756 		 * take a temporary reference on the node so it
6757 		 * survives and isn't removed from the list
6758 		 * while we print it.
6759 		 */
6760 		node->tmp_refs++;
6761 		spin_unlock(&binder_dead_nodes_lock);
6762 		if (last_node)
6763 			binder_put_node(last_node);
6764 		binder_node_lock(node);
6765 		print_binder_node_nilocked(m, node);
6766 		binder_node_unlock(node);
6767 		last_node = node;
6768 		spin_lock(&binder_dead_nodes_lock);
6769 	}
6770 	spin_unlock(&binder_dead_nodes_lock);
6771 	if (last_node)
6772 		binder_put_node(last_node);
6773 
6774 	mutex_lock(&binder_procs_lock);
6775 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6776 		print_binder_proc(m, proc, 1);
6777 	mutex_unlock(&binder_procs_lock);
6778 
6779 	return 0;
6780 }
6781 
6782 static int stats_show(struct seq_file *m, void *unused)
6783 {
6784 	struct binder_proc *proc;
6785 
6786 	seq_puts(m, "binder stats:\n");
6787 
6788 	print_binder_stats(m, "", &binder_stats);
6789 
6790 	mutex_lock(&binder_procs_lock);
6791 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6792 		print_binder_proc_stats(m, proc);
6793 	mutex_unlock(&binder_procs_lock);
6794 
6795 	return 0;
6796 }
6797 
6798 static int transactions_show(struct seq_file *m, void *unused)
6799 {
6800 	struct binder_proc *proc;
6801 
6802 	seq_puts(m, "binder transactions:\n");
6803 	mutex_lock(&binder_procs_lock);
6804 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6805 		print_binder_proc(m, proc, 0);
6806 	mutex_unlock(&binder_procs_lock);
6807 
6808 	return 0;
6809 }
6810 
6811 static int proc_show(struct seq_file *m, void *unused)
6812 {
6813 	struct binder_proc *itr;
6814 	int pid = (unsigned long)m->private;
6815 
6816 	mutex_lock(&binder_procs_lock);
6817 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6818 		if (itr->pid == pid) {
6819 			seq_puts(m, "binder proc state:\n");
6820 			print_binder_proc(m, itr, 1);
6821 		}
6822 	}
6823 	mutex_unlock(&binder_procs_lock);
6824 
6825 	return 0;
6826 }
6827 
6828 static void print_binder_transaction_log_entry(struct seq_file *m,
6829 					struct binder_transaction_log_entry *e)
6830 {
6831 	int debug_id = READ_ONCE(e->debug_id_done);
6832 	/*
6833 	 * read barrier to guarantee debug_id_done read before
6834 	 * we print the log values
6835 	 */
6836 	smp_rmb();
6837 	seq_printf(m,
6838 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6839 		   e->debug_id, (e->call_type == 2) ? "reply" :
6840 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6841 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6842 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6843 		   e->return_error, e->return_error_param,
6844 		   e->return_error_line);
6845 	/*
6846 	 * read-barrier to guarantee read of debug_id_done after
6847 	 * done printing the fields of the entry
6848 	 */
6849 	smp_rmb();
6850 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6851 			"\n" : " (incomplete)\n");
6852 }
6853 
6854 static int transaction_log_show(struct seq_file *m, void *unused)
6855 {
6856 	struct binder_transaction_log *log = m->private;
6857 	unsigned int log_cur = atomic_read(&log->cur);
6858 	unsigned int count;
6859 	unsigned int cur;
6860 	int i;
6861 
6862 	count = log_cur + 1;
6863 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6864 		0 : count % ARRAY_SIZE(log->entry);
6865 	if (count > ARRAY_SIZE(log->entry) || log->full)
6866 		count = ARRAY_SIZE(log->entry);
6867 	for (i = 0; i < count; i++) {
6868 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6869 
6870 		print_binder_transaction_log_entry(m, &log->entry[index]);
6871 	}
6872 	return 0;
6873 }
6874 
6875 const struct file_operations binder_fops = {
6876 	.owner = THIS_MODULE,
6877 	.poll = binder_poll,
6878 	.unlocked_ioctl = binder_ioctl,
6879 	.compat_ioctl = compat_ptr_ioctl,
6880 	.mmap = binder_mmap,
6881 	.open = binder_open,
6882 	.flush = binder_flush,
6883 	.release = binder_release,
6884 };
6885 
6886 DEFINE_SHOW_ATTRIBUTE(state);
6887 DEFINE_SHOW_ATTRIBUTE(stats);
6888 DEFINE_SHOW_ATTRIBUTE(transactions);
6889 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6890 
6891 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6892 	{
6893 		.name = "state",
6894 		.mode = 0444,
6895 		.fops = &state_fops,
6896 		.data = NULL,
6897 	},
6898 	{
6899 		.name = "stats",
6900 		.mode = 0444,
6901 		.fops = &stats_fops,
6902 		.data = NULL,
6903 	},
6904 	{
6905 		.name = "transactions",
6906 		.mode = 0444,
6907 		.fops = &transactions_fops,
6908 		.data = NULL,
6909 	},
6910 	{
6911 		.name = "transaction_log",
6912 		.mode = 0444,
6913 		.fops = &transaction_log_fops,
6914 		.data = &binder_transaction_log,
6915 	},
6916 	{
6917 		.name = "failed_transaction_log",
6918 		.mode = 0444,
6919 		.fops = &transaction_log_fops,
6920 		.data = &binder_transaction_log_failed,
6921 	},
6922 	{} /* terminator */
6923 };
6924 
6925 void binder_add_device(struct binder_device *device)
6926 {
6927 	hlist_add_head(&device->hlist, &binder_devices);
6928 }
6929 
6930 static int __init init_binder_device(const char *name)
6931 {
6932 	int ret;
6933 	struct binder_device *binder_device;
6934 
6935 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6936 	if (!binder_device)
6937 		return -ENOMEM;
6938 
6939 	binder_device->miscdev.fops = &binder_fops;
6940 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6941 	binder_device->miscdev.name = name;
6942 
6943 	refcount_set(&binder_device->ref, 1);
6944 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6945 	binder_device->context.name = name;
6946 	mutex_init(&binder_device->context.context_mgr_node_lock);
6947 
6948 	ret = misc_register(&binder_device->miscdev);
6949 	if (ret < 0) {
6950 		kfree(binder_device);
6951 		return ret;
6952 	}
6953 
6954 	hlist_add_head(&binder_device->hlist, &binder_devices);
6955 
6956 	return ret;
6957 }
6958 
6959 static int __init binder_init(void)
6960 {
6961 	int ret;
6962 	char *device_name, *device_tmp;
6963 	struct binder_device *device;
6964 	struct hlist_node *tmp;
6965 	char *device_names = NULL;
6966 	const struct binder_debugfs_entry *db_entry;
6967 
6968 	ret = binder_alloc_shrinker_init();
6969 	if (ret)
6970 		return ret;
6971 
6972 	atomic_set(&binder_transaction_log.cur, ~0U);
6973 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6974 
6975 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6976 
6977 	binder_for_each_debugfs_entry(db_entry)
6978 		debugfs_create_file(db_entry->name,
6979 					db_entry->mode,
6980 					binder_debugfs_dir_entry_root,
6981 					db_entry->data,
6982 					db_entry->fops);
6983 
6984 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6985 						binder_debugfs_dir_entry_root);
6986 
6987 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6988 	    strcmp(binder_devices_param, "") != 0) {
6989 		/*
6990 		* Copy the module_parameter string, because we don't want to
6991 		* tokenize it in-place.
6992 		 */
6993 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6994 		if (!device_names) {
6995 			ret = -ENOMEM;
6996 			goto err_alloc_device_names_failed;
6997 		}
6998 
6999 		device_tmp = device_names;
7000 		while ((device_name = strsep(&device_tmp, ","))) {
7001 			ret = init_binder_device(device_name);
7002 			if (ret)
7003 				goto err_init_binder_device_failed;
7004 		}
7005 	}
7006 
7007 	ret = init_binderfs();
7008 	if (ret)
7009 		goto err_init_binder_device_failed;
7010 
7011 	return ret;
7012 
7013 err_init_binder_device_failed:
7014 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7015 		misc_deregister(&device->miscdev);
7016 		hlist_del(&device->hlist);
7017 		kfree(device);
7018 	}
7019 
7020 	kfree(device_names);
7021 
7022 err_alloc_device_names_failed:
7023 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7024 	binder_alloc_shrinker_exit();
7025 
7026 	return ret;
7027 }
7028 
7029 device_initcall(binder_init);
7030 
7031 #define CREATE_TRACE_POINTS
7032 #include "binder_trace.h"
7033 
7034 MODULE_LICENSE("GPL v2");
7035