xref: /linux/drivers/android/binder.c (revision a1944676767e855869b6af8e1c7e185372feaf31)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <uapi/linux/android/binder.h>
72 
73 #include <linux/cacheflush.h>
74 
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77 
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80 
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84 
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91 
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94 
95 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
96 
97 enum {
98 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
99 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
100 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
101 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
102 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
103 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
104 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
105 	BINDER_DEBUG_USER_REFS              = 1U << 7,
106 	BINDER_DEBUG_THREADS                = 1U << 8,
107 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
108 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
109 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
110 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
111 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
112 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
113 };
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
120 
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123 
124 static int binder_set_stop_on_user_error(const char *val,
125 					 const struct kernel_param *kp)
126 {
127 	int ret;
128 
129 	ret = param_set_int(val, kp);
130 	if (binder_stop_on_user_error < 2)
131 		wake_up(&binder_user_error_wait);
132 	return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 	param_get_int, &binder_stop_on_user_error, 0644);
136 
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 {
139 	struct va_format vaf;
140 	va_list args;
141 
142 	if (binder_debug_mask & mask) {
143 		va_start(args, format);
144 		vaf.va = &args;
145 		vaf.fmt = format;
146 		pr_info_ratelimited("%pV", &vaf);
147 		va_end(args);
148 	}
149 }
150 
151 #define binder_txn_error(x...) \
152 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153 
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 {
156 	struct va_format vaf;
157 	va_list args;
158 
159 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 		va_start(args, format);
161 		vaf.va = &args;
162 		vaf.fmt = format;
163 		pr_info_ratelimited("%pV", &vaf);
164 		va_end(args);
165 	}
166 
167 	if (binder_stop_on_user_error)
168 		binder_stop_on_user_error = 2;
169 }
170 
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172 	do { \
173 		(ee)->id = _id; \
174 		(ee)->command = _command; \
175 		(ee)->param = _param; \
176 	} while (0)
177 
178 #define to_flat_binder_object(hdr) \
179 	container_of(hdr, struct flat_binder_object, hdr)
180 
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182 
183 #define to_binder_buffer_object(hdr) \
184 	container_of(hdr, struct binder_buffer_object, hdr)
185 
186 #define to_binder_fd_array_object(hdr) \
187 	container_of(hdr, struct binder_fd_array_object, hdr)
188 
189 static struct binder_stats binder_stats;
190 
191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193 	atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195 
196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_created[type]);
199 }
200 
201 struct binder_transaction_log_entry {
202 	int debug_id;
203 	int debug_id_done;
204 	int call_type;
205 	int from_proc;
206 	int from_thread;
207 	int target_handle;
208 	int to_proc;
209 	int to_thread;
210 	int to_node;
211 	int data_size;
212 	int offsets_size;
213 	int return_error_line;
214 	uint32_t return_error;
215 	uint32_t return_error_param;
216 	char context_name[BINDERFS_MAX_NAME + 1];
217 };
218 
219 struct binder_transaction_log {
220 	atomic_t cur;
221 	bool full;
222 	struct binder_transaction_log_entry entry[32];
223 };
224 
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
227 
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 	struct binder_transaction_log *log)
230 {
231 	struct binder_transaction_log_entry *e;
232 	unsigned int cur = atomic_inc_return(&log->cur);
233 
234 	if (cur >= ARRAY_SIZE(log->entry))
235 		log->full = true;
236 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 	WRITE_ONCE(e->debug_id_done, 0);
238 	/*
239 	 * write-barrier to synchronize access to e->debug_id_done.
240 	 * We make sure the initialized 0 value is seen before
241 	 * memset() other fields are zeroed by memset.
242 	 */
243 	smp_wmb();
244 	memset(e, 0, sizeof(*e));
245 	return e;
246 }
247 
248 enum binder_deferred_state {
249 	BINDER_DEFERRED_FLUSH        = 0x01,
250 	BINDER_DEFERRED_RELEASE      = 0x02,
251 };
252 
253 enum {
254 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
255 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
256 	BINDER_LOOPER_STATE_EXITED      = 0x04,
257 	BINDER_LOOPER_STATE_INVALID     = 0x08,
258 	BINDER_LOOPER_STATE_WAITING     = 0x10,
259 	BINDER_LOOPER_STATE_POLL        = 0x20,
260 };
261 
262 /**
263  * binder_proc_lock() - Acquire outer lock for given binder_proc
264  * @proc:         struct binder_proc to acquire
265  *
266  * Acquires proc->outer_lock. Used to protect binder_ref
267  * structures associated with the given proc.
268  */
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
271 _binder_proc_lock(struct binder_proc *proc, int line)
272 	__acquires(&proc->outer_lock)
273 {
274 	binder_debug(BINDER_DEBUG_SPINLOCKS,
275 		     "%s: line=%d\n", __func__, line);
276 	spin_lock(&proc->outer_lock);
277 }
278 
279 /**
280  * binder_proc_unlock() - Release spinlock for given binder_proc
281  * @proc:                struct binder_proc to acquire
282  *
283  * Release lock acquired via binder_proc_lock()
284  */
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 	__releases(&proc->outer_lock)
289 {
290 	binder_debug(BINDER_DEBUG_SPINLOCKS,
291 		     "%s: line=%d\n", __func__, line);
292 	spin_unlock(&proc->outer_lock);
293 }
294 
295 /**
296  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297  * @proc:         struct binder_proc to acquire
298  *
299  * Acquires proc->inner_lock. Used to protect todo lists
300  */
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 	__acquires(&proc->inner_lock)
305 {
306 	binder_debug(BINDER_DEBUG_SPINLOCKS,
307 		     "%s: line=%d\n", __func__, line);
308 	spin_lock(&proc->inner_lock);
309 }
310 
311 /**
312  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313  * @proc:         struct binder_proc to acquire
314  *
315  * Release lock acquired via binder_inner_proc_lock()
316  */
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 	__releases(&proc->inner_lock)
321 {
322 	binder_debug(BINDER_DEBUG_SPINLOCKS,
323 		     "%s: line=%d\n", __func__, line);
324 	spin_unlock(&proc->inner_lock);
325 }
326 
327 /**
328  * binder_node_lock() - Acquire spinlock for given binder_node
329  * @node:         struct binder_node to acquire
330  *
331  * Acquires node->lock. Used to protect binder_node fields
332  */
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
335 _binder_node_lock(struct binder_node *node, int line)
336 	__acquires(&node->lock)
337 {
338 	binder_debug(BINDER_DEBUG_SPINLOCKS,
339 		     "%s: line=%d\n", __func__, line);
340 	spin_lock(&node->lock);
341 }
342 
343 /**
344  * binder_node_unlock() - Release spinlock for given binder_proc
345  * @node:         struct binder_node to acquire
346  *
347  * Release lock acquired via binder_node_lock()
348  */
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
351 _binder_node_unlock(struct binder_node *node, int line)
352 	__releases(&node->lock)
353 {
354 	binder_debug(BINDER_DEBUG_SPINLOCKS,
355 		     "%s: line=%d\n", __func__, line);
356 	spin_unlock(&node->lock);
357 }
358 
359 /**
360  * binder_node_inner_lock() - Acquire node and inner locks
361  * @node:         struct binder_node to acquire
362  *
363  * Acquires node->lock. If node->proc also acquires
364  * proc->inner_lock. Used to protect binder_node fields
365  */
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
368 _binder_node_inner_lock(struct binder_node *node, int line)
369 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 {
371 	binder_debug(BINDER_DEBUG_SPINLOCKS,
372 		     "%s: line=%d\n", __func__, line);
373 	spin_lock(&node->lock);
374 	if (node->proc)
375 		binder_inner_proc_lock(node->proc);
376 	else
377 		/* annotation for sparse */
378 		__acquire(&node->proc->inner_lock);
379 }
380 
381 /**
382  * binder_node_inner_unlock() - Release node and inner locks
383  * @node:         struct binder_node to acquire
384  *
385  * Release lock acquired via binder_node_lock()
386  */
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 	__releases(&node->lock) __releases(&node->proc->inner_lock)
391 {
392 	struct binder_proc *proc = node->proc;
393 
394 	binder_debug(BINDER_DEBUG_SPINLOCKS,
395 		     "%s: line=%d\n", __func__, line);
396 	if (proc)
397 		binder_inner_proc_unlock(proc);
398 	else
399 		/* annotation for sparse */
400 		__release(&node->proc->inner_lock);
401 	spin_unlock(&node->lock);
402 }
403 
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 {
406 	return list_empty(list);
407 }
408 
409 /**
410  * binder_worklist_empty() - Check if no items on the work list
411  * @proc:       binder_proc associated with list
412  * @list:	list to check
413  *
414  * Return: true if there are no items on list, else false
415  */
416 static bool binder_worklist_empty(struct binder_proc *proc,
417 				  struct list_head *list)
418 {
419 	bool ret;
420 
421 	binder_inner_proc_lock(proc);
422 	ret = binder_worklist_empty_ilocked(list);
423 	binder_inner_proc_unlock(proc);
424 	return ret;
425 }
426 
427 /**
428  * binder_enqueue_work_ilocked() - Add an item to the work list
429  * @work:         struct binder_work to add to list
430  * @target_list:  list to add work to
431  *
432  * Adds the work to the specified list. Asserts that work
433  * is not already on a list.
434  *
435  * Requires the proc->inner_lock to be held.
436  */
437 static void
438 binder_enqueue_work_ilocked(struct binder_work *work,
439 			   struct list_head *target_list)
440 {
441 	BUG_ON(target_list == NULL);
442 	BUG_ON(work->entry.next && !list_empty(&work->entry));
443 	list_add_tail(&work->entry, target_list);
444 }
445 
446 /**
447  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448  * @thread:       thread to queue work to
449  * @work:         struct binder_work to add to list
450  *
451  * Adds the work to the todo list of the thread. Doesn't set the process_todo
452  * flag, which means that (if it wasn't already set) the thread will go to
453  * sleep without handling this work when it calls read.
454  *
455  * Requires the proc->inner_lock to be held.
456  */
457 static void
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 					    struct binder_work *work)
460 {
461 	WARN_ON(!list_empty(&thread->waiting_thread_node));
462 	binder_enqueue_work_ilocked(work, &thread->todo);
463 }
464 
465 /**
466  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467  * @thread:       thread to queue work to
468  * @work:         struct binder_work to add to list
469  *
470  * Adds the work to the todo list of the thread, and enables processing
471  * of the todo queue.
472  *
473  * Requires the proc->inner_lock to be held.
474  */
475 static void
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 				   struct binder_work *work)
478 {
479 	WARN_ON(!list_empty(&thread->waiting_thread_node));
480 	binder_enqueue_work_ilocked(work, &thread->todo);
481 
482 	/* (e)poll-based threads require an explicit wakeup signal when
483 	 * queuing their own work; they rely on these events to consume
484 	 * messages without I/O block. Without it, threads risk waiting
485 	 * indefinitely without handling the work.
486 	 */
487 	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 	    thread->pid == current->pid && !thread->process_todo)
489 		wake_up_interruptible_sync(&thread->wait);
490 
491 	thread->process_todo = true;
492 }
493 
494 /**
495  * binder_enqueue_thread_work() - Add an item to the thread work list
496  * @thread:       thread to queue work to
497  * @work:         struct binder_work to add to list
498  *
499  * Adds the work to the todo list of the thread, and enables processing
500  * of the todo queue.
501  */
502 static void
503 binder_enqueue_thread_work(struct binder_thread *thread,
504 			   struct binder_work *work)
505 {
506 	binder_inner_proc_lock(thread->proc);
507 	binder_enqueue_thread_work_ilocked(thread, work);
508 	binder_inner_proc_unlock(thread->proc);
509 }
510 
511 static void
512 binder_dequeue_work_ilocked(struct binder_work *work)
513 {
514 	list_del_init(&work->entry);
515 }
516 
517 /**
518  * binder_dequeue_work() - Removes an item from the work list
519  * @proc:         binder_proc associated with list
520  * @work:         struct binder_work to remove from list
521  *
522  * Removes the specified work item from whatever list it is on.
523  * Can safely be called if work is not on any list.
524  */
525 static void
526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527 {
528 	binder_inner_proc_lock(proc);
529 	binder_dequeue_work_ilocked(work);
530 	binder_inner_proc_unlock(proc);
531 }
532 
533 static struct binder_work *binder_dequeue_work_head_ilocked(
534 					struct list_head *list)
535 {
536 	struct binder_work *w;
537 
538 	w = list_first_entry_or_null(list, struct binder_work, entry);
539 	if (w)
540 		list_del_init(&w->entry);
541 	return w;
542 }
543 
544 static void
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549 
550 static bool binder_has_work_ilocked(struct binder_thread *thread,
551 				    bool do_proc_work)
552 {
553 	return thread->process_todo ||
554 		thread->looper_need_return ||
555 		(do_proc_work &&
556 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
557 }
558 
559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560 {
561 	bool has_work;
562 
563 	binder_inner_proc_lock(thread->proc);
564 	has_work = binder_has_work_ilocked(thread, do_proc_work);
565 	binder_inner_proc_unlock(thread->proc);
566 
567 	return has_work;
568 }
569 
570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571 {
572 	return !thread->transaction_stack &&
573 		binder_worklist_empty_ilocked(&thread->todo) &&
574 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
575 				   BINDER_LOOPER_STATE_REGISTERED));
576 }
577 
578 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
579 					       bool sync)
580 {
581 	struct rb_node *n;
582 	struct binder_thread *thread;
583 
584 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
585 		thread = rb_entry(n, struct binder_thread, rb_node);
586 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
587 		    binder_available_for_proc_work_ilocked(thread)) {
588 			if (sync)
589 				wake_up_interruptible_sync(&thread->wait);
590 			else
591 				wake_up_interruptible(&thread->wait);
592 		}
593 	}
594 }
595 
596 /**
597  * binder_select_thread_ilocked() - selects a thread for doing proc work.
598  * @proc:	process to select a thread from
599  *
600  * Note that calling this function moves the thread off the waiting_threads
601  * list, so it can only be woken up by the caller of this function, or a
602  * signal. Therefore, callers *should* always wake up the thread this function
603  * returns.
604  *
605  * Return:	If there's a thread currently waiting for process work,
606  *		returns that thread. Otherwise returns NULL.
607  */
608 static struct binder_thread *
609 binder_select_thread_ilocked(struct binder_proc *proc)
610 {
611 	struct binder_thread *thread;
612 
613 	assert_spin_locked(&proc->inner_lock);
614 	thread = list_first_entry_or_null(&proc->waiting_threads,
615 					  struct binder_thread,
616 					  waiting_thread_node);
617 
618 	if (thread)
619 		list_del_init(&thread->waiting_thread_node);
620 
621 	return thread;
622 }
623 
624 /**
625  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
626  * @proc:	process to wake up a thread in
627  * @thread:	specific thread to wake-up (may be NULL)
628  * @sync:	whether to do a synchronous wake-up
629  *
630  * This function wakes up a thread in the @proc process.
631  * The caller may provide a specific thread to wake-up in
632  * the @thread parameter. If @thread is NULL, this function
633  * will wake up threads that have called poll().
634  *
635  * Note that for this function to work as expected, callers
636  * should first call binder_select_thread() to find a thread
637  * to handle the work (if they don't have a thread already),
638  * and pass the result into the @thread parameter.
639  */
640 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
641 					 struct binder_thread *thread,
642 					 bool sync)
643 {
644 	assert_spin_locked(&proc->inner_lock);
645 
646 	if (thread) {
647 		if (sync)
648 			wake_up_interruptible_sync(&thread->wait);
649 		else
650 			wake_up_interruptible(&thread->wait);
651 		return;
652 	}
653 
654 	/* Didn't find a thread waiting for proc work; this can happen
655 	 * in two scenarios:
656 	 * 1. All threads are busy handling transactions
657 	 *    In that case, one of those threads should call back into
658 	 *    the kernel driver soon and pick up this work.
659 	 * 2. Threads are using the (e)poll interface, in which case
660 	 *    they may be blocked on the waitqueue without having been
661 	 *    added to waiting_threads. For this case, we just iterate
662 	 *    over all threads not handling transaction work, and
663 	 *    wake them all up. We wake all because we don't know whether
664 	 *    a thread that called into (e)poll is handling non-binder
665 	 *    work currently.
666 	 */
667 	binder_wakeup_poll_threads_ilocked(proc, sync);
668 }
669 
670 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
671 {
672 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
673 
674 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
675 }
676 
677 static void binder_set_nice(long nice)
678 {
679 	long min_nice;
680 
681 	if (can_nice(current, nice)) {
682 		set_user_nice(current, nice);
683 		return;
684 	}
685 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
686 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
687 		     "%d: nice value %ld not allowed use %ld instead\n",
688 		      current->pid, nice, min_nice);
689 	set_user_nice(current, min_nice);
690 	if (min_nice <= MAX_NICE)
691 		return;
692 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
693 }
694 
695 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
696 						   binder_uintptr_t ptr)
697 {
698 	struct rb_node *n = proc->nodes.rb_node;
699 	struct binder_node *node;
700 
701 	assert_spin_locked(&proc->inner_lock);
702 
703 	while (n) {
704 		node = rb_entry(n, struct binder_node, rb_node);
705 
706 		if (ptr < node->ptr)
707 			n = n->rb_left;
708 		else if (ptr > node->ptr)
709 			n = n->rb_right;
710 		else {
711 			/*
712 			 * take an implicit weak reference
713 			 * to ensure node stays alive until
714 			 * call to binder_put_node()
715 			 */
716 			binder_inc_node_tmpref_ilocked(node);
717 			return node;
718 		}
719 	}
720 	return NULL;
721 }
722 
723 static struct binder_node *binder_get_node(struct binder_proc *proc,
724 					   binder_uintptr_t ptr)
725 {
726 	struct binder_node *node;
727 
728 	binder_inner_proc_lock(proc);
729 	node = binder_get_node_ilocked(proc, ptr);
730 	binder_inner_proc_unlock(proc);
731 	return node;
732 }
733 
734 static struct binder_node *binder_init_node_ilocked(
735 						struct binder_proc *proc,
736 						struct binder_node *new_node,
737 						struct flat_binder_object *fp)
738 {
739 	struct rb_node **p = &proc->nodes.rb_node;
740 	struct rb_node *parent = NULL;
741 	struct binder_node *node;
742 	binder_uintptr_t ptr = fp ? fp->binder : 0;
743 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
744 	__u32 flags = fp ? fp->flags : 0;
745 
746 	assert_spin_locked(&proc->inner_lock);
747 
748 	while (*p) {
749 
750 		parent = *p;
751 		node = rb_entry(parent, struct binder_node, rb_node);
752 
753 		if (ptr < node->ptr)
754 			p = &(*p)->rb_left;
755 		else if (ptr > node->ptr)
756 			p = &(*p)->rb_right;
757 		else {
758 			/*
759 			 * A matching node is already in
760 			 * the rb tree. Abandon the init
761 			 * and return it.
762 			 */
763 			binder_inc_node_tmpref_ilocked(node);
764 			return node;
765 		}
766 	}
767 	node = new_node;
768 	binder_stats_created(BINDER_STAT_NODE);
769 	node->tmp_refs++;
770 	rb_link_node(&node->rb_node, parent, p);
771 	rb_insert_color(&node->rb_node, &proc->nodes);
772 	node->debug_id = atomic_inc_return(&binder_last_id);
773 	node->proc = proc;
774 	node->ptr = ptr;
775 	node->cookie = cookie;
776 	node->work.type = BINDER_WORK_NODE;
777 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
778 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
779 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
780 	spin_lock_init(&node->lock);
781 	INIT_LIST_HEAD(&node->work.entry);
782 	INIT_LIST_HEAD(&node->async_todo);
783 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
784 		     "%d:%d node %d u%016llx c%016llx created\n",
785 		     proc->pid, current->pid, node->debug_id,
786 		     (u64)node->ptr, (u64)node->cookie);
787 
788 	return node;
789 }
790 
791 static struct binder_node *binder_new_node(struct binder_proc *proc,
792 					   struct flat_binder_object *fp)
793 {
794 	struct binder_node *node;
795 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
796 
797 	if (!new_node)
798 		return NULL;
799 	binder_inner_proc_lock(proc);
800 	node = binder_init_node_ilocked(proc, new_node, fp);
801 	binder_inner_proc_unlock(proc);
802 	if (node != new_node)
803 		/*
804 		 * The node was already added by another thread
805 		 */
806 		kfree(new_node);
807 
808 	return node;
809 }
810 
811 static void binder_free_node(struct binder_node *node)
812 {
813 	kfree(node);
814 	binder_stats_deleted(BINDER_STAT_NODE);
815 }
816 
817 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
818 				    int internal,
819 				    struct list_head *target_list)
820 {
821 	struct binder_proc *proc = node->proc;
822 
823 	assert_spin_locked(&node->lock);
824 	if (proc)
825 		assert_spin_locked(&proc->inner_lock);
826 	if (strong) {
827 		if (internal) {
828 			if (target_list == NULL &&
829 			    node->internal_strong_refs == 0 &&
830 			    !(node->proc &&
831 			      node == node->proc->context->binder_context_mgr_node &&
832 			      node->has_strong_ref)) {
833 				pr_err("invalid inc strong node for %d\n",
834 					node->debug_id);
835 				return -EINVAL;
836 			}
837 			node->internal_strong_refs++;
838 		} else
839 			node->local_strong_refs++;
840 		if (!node->has_strong_ref && target_list) {
841 			struct binder_thread *thread = container_of(target_list,
842 						    struct binder_thread, todo);
843 			binder_dequeue_work_ilocked(&node->work);
844 			BUG_ON(&thread->todo != target_list);
845 			binder_enqueue_deferred_thread_work_ilocked(thread,
846 								   &node->work);
847 		}
848 	} else {
849 		if (!internal)
850 			node->local_weak_refs++;
851 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
852 			if (target_list == NULL) {
853 				pr_err("invalid inc weak node for %d\n",
854 					node->debug_id);
855 				return -EINVAL;
856 			}
857 			/*
858 			 * See comment above
859 			 */
860 			binder_enqueue_work_ilocked(&node->work, target_list);
861 		}
862 	}
863 	return 0;
864 }
865 
866 static int binder_inc_node(struct binder_node *node, int strong, int internal,
867 			   struct list_head *target_list)
868 {
869 	int ret;
870 
871 	binder_node_inner_lock(node);
872 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
873 	binder_node_inner_unlock(node);
874 
875 	return ret;
876 }
877 
878 static bool binder_dec_node_nilocked(struct binder_node *node,
879 				     int strong, int internal)
880 {
881 	struct binder_proc *proc = node->proc;
882 
883 	assert_spin_locked(&node->lock);
884 	if (proc)
885 		assert_spin_locked(&proc->inner_lock);
886 	if (strong) {
887 		if (internal)
888 			node->internal_strong_refs--;
889 		else
890 			node->local_strong_refs--;
891 		if (node->local_strong_refs || node->internal_strong_refs)
892 			return false;
893 	} else {
894 		if (!internal)
895 			node->local_weak_refs--;
896 		if (node->local_weak_refs || node->tmp_refs ||
897 				!hlist_empty(&node->refs))
898 			return false;
899 	}
900 
901 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
902 		if (list_empty(&node->work.entry)) {
903 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
904 			binder_wakeup_proc_ilocked(proc);
905 		}
906 	} else {
907 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
908 		    !node->local_weak_refs && !node->tmp_refs) {
909 			if (proc) {
910 				binder_dequeue_work_ilocked(&node->work);
911 				rb_erase(&node->rb_node, &proc->nodes);
912 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
913 					     "refless node %d deleted\n",
914 					     node->debug_id);
915 			} else {
916 				BUG_ON(!list_empty(&node->work.entry));
917 				spin_lock(&binder_dead_nodes_lock);
918 				/*
919 				 * tmp_refs could have changed so
920 				 * check it again
921 				 */
922 				if (node->tmp_refs) {
923 					spin_unlock(&binder_dead_nodes_lock);
924 					return false;
925 				}
926 				hlist_del(&node->dead_node);
927 				spin_unlock(&binder_dead_nodes_lock);
928 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
929 					     "dead node %d deleted\n",
930 					     node->debug_id);
931 			}
932 			return true;
933 		}
934 	}
935 	return false;
936 }
937 
938 static void binder_dec_node(struct binder_node *node, int strong, int internal)
939 {
940 	bool free_node;
941 
942 	binder_node_inner_lock(node);
943 	free_node = binder_dec_node_nilocked(node, strong, internal);
944 	binder_node_inner_unlock(node);
945 	if (free_node)
946 		binder_free_node(node);
947 }
948 
949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
950 {
951 	/*
952 	 * No call to binder_inc_node() is needed since we
953 	 * don't need to inform userspace of any changes to
954 	 * tmp_refs
955 	 */
956 	node->tmp_refs++;
957 }
958 
959 /**
960  * binder_inc_node_tmpref() - take a temporary reference on node
961  * @node:	node to reference
962  *
963  * Take reference on node to prevent the node from being freed
964  * while referenced only by a local variable. The inner lock is
965  * needed to serialize with the node work on the queue (which
966  * isn't needed after the node is dead). If the node is dead
967  * (node->proc is NULL), use binder_dead_nodes_lock to protect
968  * node->tmp_refs against dead-node-only cases where the node
969  * lock cannot be acquired (eg traversing the dead node list to
970  * print nodes)
971  */
972 static void binder_inc_node_tmpref(struct binder_node *node)
973 {
974 	binder_node_lock(node);
975 	if (node->proc)
976 		binder_inner_proc_lock(node->proc);
977 	else
978 		spin_lock(&binder_dead_nodes_lock);
979 	binder_inc_node_tmpref_ilocked(node);
980 	if (node->proc)
981 		binder_inner_proc_unlock(node->proc);
982 	else
983 		spin_unlock(&binder_dead_nodes_lock);
984 	binder_node_unlock(node);
985 }
986 
987 /**
988  * binder_dec_node_tmpref() - remove a temporary reference on node
989  * @node:	node to reference
990  *
991  * Release temporary reference on node taken via binder_inc_node_tmpref()
992  */
993 static void binder_dec_node_tmpref(struct binder_node *node)
994 {
995 	bool free_node;
996 
997 	binder_node_inner_lock(node);
998 	if (!node->proc)
999 		spin_lock(&binder_dead_nodes_lock);
1000 	else
1001 		__acquire(&binder_dead_nodes_lock);
1002 	node->tmp_refs--;
1003 	BUG_ON(node->tmp_refs < 0);
1004 	if (!node->proc)
1005 		spin_unlock(&binder_dead_nodes_lock);
1006 	else
1007 		__release(&binder_dead_nodes_lock);
1008 	/*
1009 	 * Call binder_dec_node() to check if all refcounts are 0
1010 	 * and cleanup is needed. Calling with strong=0 and internal=1
1011 	 * causes no actual reference to be released in binder_dec_node().
1012 	 * If that changes, a change is needed here too.
1013 	 */
1014 	free_node = binder_dec_node_nilocked(node, 0, 1);
1015 	binder_node_inner_unlock(node);
1016 	if (free_node)
1017 		binder_free_node(node);
1018 }
1019 
1020 static void binder_put_node(struct binder_node *node)
1021 {
1022 	binder_dec_node_tmpref(node);
1023 }
1024 
1025 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1026 						 u32 desc, bool need_strong_ref)
1027 {
1028 	struct rb_node *n = proc->refs_by_desc.rb_node;
1029 	struct binder_ref *ref;
1030 
1031 	while (n) {
1032 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1033 
1034 		if (desc < ref->data.desc) {
1035 			n = n->rb_left;
1036 		} else if (desc > ref->data.desc) {
1037 			n = n->rb_right;
1038 		} else if (need_strong_ref && !ref->data.strong) {
1039 			binder_user_error("tried to use weak ref as strong ref\n");
1040 			return NULL;
1041 		} else {
1042 			return ref;
1043 		}
1044 	}
1045 	return NULL;
1046 }
1047 
1048 /* Find the smallest unused descriptor the "slow way" */
1049 static u32 slow_desc_lookup_olocked(struct binder_proc *proc)
1050 {
1051 	struct binder_ref *ref;
1052 	struct rb_node *n;
1053 	u32 desc;
1054 
1055 	desc = 1;
1056 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1057 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1058 		if (ref->data.desc > desc)
1059 			break;
1060 		desc = ref->data.desc + 1;
1061 	}
1062 
1063 	return desc;
1064 }
1065 
1066 /*
1067  * Find an available reference descriptor ID. The proc->outer_lock might
1068  * be released in the process, in which case -EAGAIN is returned and the
1069  * @desc should be considered invalid.
1070  */
1071 static int get_ref_desc_olocked(struct binder_proc *proc,
1072 				struct binder_node *node,
1073 				u32 *desc)
1074 {
1075 	struct dbitmap *dmap = &proc->dmap;
1076 	unsigned long *new, bit;
1077 	unsigned int nbits;
1078 
1079 	/* 0 is reserved for the context manager */
1080 	if (node == proc->context->binder_context_mgr_node) {
1081 		*desc = 0;
1082 		return 0;
1083 	}
1084 
1085 	if (!dbitmap_enabled(dmap)) {
1086 		*desc = slow_desc_lookup_olocked(proc);
1087 		return 0;
1088 	}
1089 
1090 	if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) {
1091 		*desc = bit;
1092 		return 0;
1093 	}
1094 
1095 	/*
1096 	 * The dbitmap is full and needs to grow. The proc->outer_lock
1097 	 * is briefly released to allocate the new bitmap safely.
1098 	 */
1099 	nbits = dbitmap_grow_nbits(dmap);
1100 	binder_proc_unlock(proc);
1101 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1102 	binder_proc_lock(proc);
1103 	dbitmap_grow(dmap, new, nbits);
1104 
1105 	return -EAGAIN;
1106 }
1107 
1108 /**
1109  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1110  * @proc:	binder_proc that owns the ref
1111  * @node:	binder_node of target
1112  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1113  *
1114  * Look up the ref for the given node and return it if it exists
1115  *
1116  * If it doesn't exist and the caller provides a newly allocated
1117  * ref, initialize the fields of the newly allocated ref and insert
1118  * into the given proc rb_trees and node refs list.
1119  *
1120  * Return:	the ref for node. It is possible that another thread
1121  *		allocated/initialized the ref first in which case the
1122  *		returned ref would be different than the passed-in
1123  *		new_ref. new_ref must be kfree'd by the caller in
1124  *		this case.
1125  */
1126 static struct binder_ref *binder_get_ref_for_node_olocked(
1127 					struct binder_proc *proc,
1128 					struct binder_node *node,
1129 					struct binder_ref *new_ref)
1130 {
1131 	struct binder_ref *ref;
1132 	struct rb_node *parent;
1133 	struct rb_node **p;
1134 	u32 desc;
1135 
1136 retry:
1137 	p = &proc->refs_by_node.rb_node;
1138 	parent = NULL;
1139 	while (*p) {
1140 		parent = *p;
1141 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1142 
1143 		if (node < ref->node)
1144 			p = &(*p)->rb_left;
1145 		else if (node > ref->node)
1146 			p = &(*p)->rb_right;
1147 		else
1148 			return ref;
1149 	}
1150 	if (!new_ref)
1151 		return NULL;
1152 
1153 	/* might release the proc->outer_lock */
1154 	if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1155 		goto retry;
1156 
1157 	binder_stats_created(BINDER_STAT_REF);
1158 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1159 	new_ref->proc = proc;
1160 	new_ref->node = node;
1161 	rb_link_node(&new_ref->rb_node_node, parent, p);
1162 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1163 
1164 	new_ref->data.desc = desc;
1165 	p = &proc->refs_by_desc.rb_node;
1166 	while (*p) {
1167 		parent = *p;
1168 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1169 
1170 		if (new_ref->data.desc < ref->data.desc)
1171 			p = &(*p)->rb_left;
1172 		else if (new_ref->data.desc > ref->data.desc)
1173 			p = &(*p)->rb_right;
1174 		else
1175 			BUG();
1176 	}
1177 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1178 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1179 
1180 	binder_node_lock(node);
1181 	hlist_add_head(&new_ref->node_entry, &node->refs);
1182 
1183 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1184 		     "%d new ref %d desc %d for node %d\n",
1185 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1186 		      node->debug_id);
1187 	binder_node_unlock(node);
1188 	return new_ref;
1189 }
1190 
1191 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1192 {
1193 	struct dbitmap *dmap = &ref->proc->dmap;
1194 	bool delete_node = false;
1195 
1196 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1197 		     "%d delete ref %d desc %d for node %d\n",
1198 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1199 		      ref->node->debug_id);
1200 
1201 	if (dbitmap_enabled(dmap))
1202 		dbitmap_clear_bit(dmap, ref->data.desc);
1203 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1204 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1205 
1206 	binder_node_inner_lock(ref->node);
1207 	if (ref->data.strong)
1208 		binder_dec_node_nilocked(ref->node, 1, 1);
1209 
1210 	hlist_del(&ref->node_entry);
1211 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1212 	binder_node_inner_unlock(ref->node);
1213 	/*
1214 	 * Clear ref->node unless we want the caller to free the node
1215 	 */
1216 	if (!delete_node) {
1217 		/*
1218 		 * The caller uses ref->node to determine
1219 		 * whether the node needs to be freed. Clear
1220 		 * it since the node is still alive.
1221 		 */
1222 		ref->node = NULL;
1223 	}
1224 
1225 	if (ref->death) {
1226 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1227 			     "%d delete ref %d desc %d has death notification\n",
1228 			      ref->proc->pid, ref->data.debug_id,
1229 			      ref->data.desc);
1230 		binder_dequeue_work(ref->proc, &ref->death->work);
1231 		binder_stats_deleted(BINDER_STAT_DEATH);
1232 	}
1233 	binder_stats_deleted(BINDER_STAT_REF);
1234 }
1235 
1236 /**
1237  * binder_inc_ref_olocked() - increment the ref for given handle
1238  * @ref:         ref to be incremented
1239  * @strong:      if true, strong increment, else weak
1240  * @target_list: list to queue node work on
1241  *
1242  * Increment the ref. @ref->proc->outer_lock must be held on entry
1243  *
1244  * Return: 0, if successful, else errno
1245  */
1246 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1247 				  struct list_head *target_list)
1248 {
1249 	int ret;
1250 
1251 	if (strong) {
1252 		if (ref->data.strong == 0) {
1253 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1254 			if (ret)
1255 				return ret;
1256 		}
1257 		ref->data.strong++;
1258 	} else {
1259 		if (ref->data.weak == 0) {
1260 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1261 			if (ret)
1262 				return ret;
1263 		}
1264 		ref->data.weak++;
1265 	}
1266 	return 0;
1267 }
1268 
1269 /**
1270  * binder_dec_ref_olocked() - dec the ref for given handle
1271  * @ref:	ref to be decremented
1272  * @strong:	if true, strong decrement, else weak
1273  *
1274  * Decrement the ref.
1275  *
1276  * Return: %true if ref is cleaned up and ready to be freed.
1277  */
1278 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1279 {
1280 	if (strong) {
1281 		if (ref->data.strong == 0) {
1282 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1283 					  ref->proc->pid, ref->data.debug_id,
1284 					  ref->data.desc, ref->data.strong,
1285 					  ref->data.weak);
1286 			return false;
1287 		}
1288 		ref->data.strong--;
1289 		if (ref->data.strong == 0)
1290 			binder_dec_node(ref->node, strong, 1);
1291 	} else {
1292 		if (ref->data.weak == 0) {
1293 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1294 					  ref->proc->pid, ref->data.debug_id,
1295 					  ref->data.desc, ref->data.strong,
1296 					  ref->data.weak);
1297 			return false;
1298 		}
1299 		ref->data.weak--;
1300 	}
1301 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1302 		binder_cleanup_ref_olocked(ref);
1303 		return true;
1304 	}
1305 	return false;
1306 }
1307 
1308 /**
1309  * binder_get_node_from_ref() - get the node from the given proc/desc
1310  * @proc:	proc containing the ref
1311  * @desc:	the handle associated with the ref
1312  * @need_strong_ref: if true, only return node if ref is strong
1313  * @rdata:	the id/refcount data for the ref
1314  *
1315  * Given a proc and ref handle, return the associated binder_node
1316  *
1317  * Return: a binder_node or NULL if not found or not strong when strong required
1318  */
1319 static struct binder_node *binder_get_node_from_ref(
1320 		struct binder_proc *proc,
1321 		u32 desc, bool need_strong_ref,
1322 		struct binder_ref_data *rdata)
1323 {
1324 	struct binder_node *node;
1325 	struct binder_ref *ref;
1326 
1327 	binder_proc_lock(proc);
1328 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1329 	if (!ref)
1330 		goto err_no_ref;
1331 	node = ref->node;
1332 	/*
1333 	 * Take an implicit reference on the node to ensure
1334 	 * it stays alive until the call to binder_put_node()
1335 	 */
1336 	binder_inc_node_tmpref(node);
1337 	if (rdata)
1338 		*rdata = ref->data;
1339 	binder_proc_unlock(proc);
1340 
1341 	return node;
1342 
1343 err_no_ref:
1344 	binder_proc_unlock(proc);
1345 	return NULL;
1346 }
1347 
1348 /**
1349  * binder_free_ref() - free the binder_ref
1350  * @ref:	ref to free
1351  *
1352  * Free the binder_ref. Free the binder_node indicated by ref->node
1353  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1354  */
1355 static void binder_free_ref(struct binder_ref *ref)
1356 {
1357 	if (ref->node)
1358 		binder_free_node(ref->node);
1359 	kfree(ref->death);
1360 	kfree(ref);
1361 }
1362 
1363 /* shrink descriptor bitmap if needed */
1364 static void try_shrink_dmap(struct binder_proc *proc)
1365 {
1366 	unsigned long *new;
1367 	int nbits;
1368 
1369 	binder_proc_lock(proc);
1370 	nbits = dbitmap_shrink_nbits(&proc->dmap);
1371 	binder_proc_unlock(proc);
1372 
1373 	if (!nbits)
1374 		return;
1375 
1376 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1377 	binder_proc_lock(proc);
1378 	dbitmap_shrink(&proc->dmap, new, nbits);
1379 	binder_proc_unlock(proc);
1380 }
1381 
1382 /**
1383  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1384  * @proc:	proc containing the ref
1385  * @desc:	the handle associated with the ref
1386  * @increment:	true=inc reference, false=dec reference
1387  * @strong:	true=strong reference, false=weak reference
1388  * @rdata:	the id/refcount data for the ref
1389  *
1390  * Given a proc and ref handle, increment or decrement the ref
1391  * according to "increment" arg.
1392  *
1393  * Return: 0 if successful, else errno
1394  */
1395 static int binder_update_ref_for_handle(struct binder_proc *proc,
1396 		uint32_t desc, bool increment, bool strong,
1397 		struct binder_ref_data *rdata)
1398 {
1399 	int ret = 0;
1400 	struct binder_ref *ref;
1401 	bool delete_ref = false;
1402 
1403 	binder_proc_lock(proc);
1404 	ref = binder_get_ref_olocked(proc, desc, strong);
1405 	if (!ref) {
1406 		ret = -EINVAL;
1407 		goto err_no_ref;
1408 	}
1409 	if (increment)
1410 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1411 	else
1412 		delete_ref = binder_dec_ref_olocked(ref, strong);
1413 
1414 	if (rdata)
1415 		*rdata = ref->data;
1416 	binder_proc_unlock(proc);
1417 
1418 	if (delete_ref) {
1419 		binder_free_ref(ref);
1420 		try_shrink_dmap(proc);
1421 	}
1422 	return ret;
1423 
1424 err_no_ref:
1425 	binder_proc_unlock(proc);
1426 	return ret;
1427 }
1428 
1429 /**
1430  * binder_dec_ref_for_handle() - dec the ref for given handle
1431  * @proc:	proc containing the ref
1432  * @desc:	the handle associated with the ref
1433  * @strong:	true=strong reference, false=weak reference
1434  * @rdata:	the id/refcount data for the ref
1435  *
1436  * Just calls binder_update_ref_for_handle() to decrement the ref.
1437  *
1438  * Return: 0 if successful, else errno
1439  */
1440 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1441 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1442 {
1443 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1444 }
1445 
1446 
1447 /**
1448  * binder_inc_ref_for_node() - increment the ref for given proc/node
1449  * @proc:	 proc containing the ref
1450  * @node:	 target node
1451  * @strong:	 true=strong reference, false=weak reference
1452  * @target_list: worklist to use if node is incremented
1453  * @rdata:	 the id/refcount data for the ref
1454  *
1455  * Given a proc and node, increment the ref. Create the ref if it
1456  * doesn't already exist
1457  *
1458  * Return: 0 if successful, else errno
1459  */
1460 static int binder_inc_ref_for_node(struct binder_proc *proc,
1461 			struct binder_node *node,
1462 			bool strong,
1463 			struct list_head *target_list,
1464 			struct binder_ref_data *rdata)
1465 {
1466 	struct binder_ref *ref;
1467 	struct binder_ref *new_ref = NULL;
1468 	int ret = 0;
1469 
1470 	binder_proc_lock(proc);
1471 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1472 	if (!ref) {
1473 		binder_proc_unlock(proc);
1474 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1475 		if (!new_ref)
1476 			return -ENOMEM;
1477 		binder_proc_lock(proc);
1478 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1479 	}
1480 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1481 	*rdata = ref->data;
1482 	if (ret && ref == new_ref) {
1483 		/*
1484 		 * Cleanup the failed reference here as the target
1485 		 * could now be dead and have already released its
1486 		 * references by now. Calling on the new reference
1487 		 * with strong=0 and a tmp_refs will not decrement
1488 		 * the node. The new_ref gets kfree'd below.
1489 		 */
1490 		binder_cleanup_ref_olocked(new_ref);
1491 		ref = NULL;
1492 	}
1493 
1494 	binder_proc_unlock(proc);
1495 	if (new_ref && ref != new_ref)
1496 		/*
1497 		 * Another thread created the ref first so
1498 		 * free the one we allocated
1499 		 */
1500 		kfree(new_ref);
1501 	return ret;
1502 }
1503 
1504 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1505 					   struct binder_transaction *t)
1506 {
1507 	BUG_ON(!target_thread);
1508 	assert_spin_locked(&target_thread->proc->inner_lock);
1509 	BUG_ON(target_thread->transaction_stack != t);
1510 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1511 	target_thread->transaction_stack =
1512 		target_thread->transaction_stack->from_parent;
1513 	t->from = NULL;
1514 }
1515 
1516 /**
1517  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1518  * @thread:	thread to decrement
1519  *
1520  * A thread needs to be kept alive while being used to create or
1521  * handle a transaction. binder_get_txn_from() is used to safely
1522  * extract t->from from a binder_transaction and keep the thread
1523  * indicated by t->from from being freed. When done with that
1524  * binder_thread, this function is called to decrement the
1525  * tmp_ref and free if appropriate (thread has been released
1526  * and no transaction being processed by the driver)
1527  */
1528 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1529 {
1530 	/*
1531 	 * atomic is used to protect the counter value while
1532 	 * it cannot reach zero or thread->is_dead is false
1533 	 */
1534 	binder_inner_proc_lock(thread->proc);
1535 	atomic_dec(&thread->tmp_ref);
1536 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1537 		binder_inner_proc_unlock(thread->proc);
1538 		binder_free_thread(thread);
1539 		return;
1540 	}
1541 	binder_inner_proc_unlock(thread->proc);
1542 }
1543 
1544 /**
1545  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1546  * @proc:	proc to decrement
1547  *
1548  * A binder_proc needs to be kept alive while being used to create or
1549  * handle a transaction. proc->tmp_ref is incremented when
1550  * creating a new transaction or the binder_proc is currently in-use
1551  * by threads that are being released. When done with the binder_proc,
1552  * this function is called to decrement the counter and free the
1553  * proc if appropriate (proc has been released, all threads have
1554  * been released and not currenly in-use to process a transaction).
1555  */
1556 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1557 {
1558 	binder_inner_proc_lock(proc);
1559 	proc->tmp_ref--;
1560 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1561 			!proc->tmp_ref) {
1562 		binder_inner_proc_unlock(proc);
1563 		binder_free_proc(proc);
1564 		return;
1565 	}
1566 	binder_inner_proc_unlock(proc);
1567 }
1568 
1569 /**
1570  * binder_get_txn_from() - safely extract the "from" thread in transaction
1571  * @t:	binder transaction for t->from
1572  *
1573  * Atomically return the "from" thread and increment the tmp_ref
1574  * count for the thread to ensure it stays alive until
1575  * binder_thread_dec_tmpref() is called.
1576  *
1577  * Return: the value of t->from
1578  */
1579 static struct binder_thread *binder_get_txn_from(
1580 		struct binder_transaction *t)
1581 {
1582 	struct binder_thread *from;
1583 
1584 	spin_lock(&t->lock);
1585 	from = t->from;
1586 	if (from)
1587 		atomic_inc(&from->tmp_ref);
1588 	spin_unlock(&t->lock);
1589 	return from;
1590 }
1591 
1592 /**
1593  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1594  * @t:	binder transaction for t->from
1595  *
1596  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1597  * to guarantee that the thread cannot be released while operating on it.
1598  * The caller must call binder_inner_proc_unlock() to release the inner lock
1599  * as well as call binder_dec_thread_txn() to release the reference.
1600  *
1601  * Return: the value of t->from
1602  */
1603 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1604 		struct binder_transaction *t)
1605 	__acquires(&t->from->proc->inner_lock)
1606 {
1607 	struct binder_thread *from;
1608 
1609 	from = binder_get_txn_from(t);
1610 	if (!from) {
1611 		__acquire(&from->proc->inner_lock);
1612 		return NULL;
1613 	}
1614 	binder_inner_proc_lock(from->proc);
1615 	if (t->from) {
1616 		BUG_ON(from != t->from);
1617 		return from;
1618 	}
1619 	binder_inner_proc_unlock(from->proc);
1620 	__acquire(&from->proc->inner_lock);
1621 	binder_thread_dec_tmpref(from);
1622 	return NULL;
1623 }
1624 
1625 /**
1626  * binder_free_txn_fixups() - free unprocessed fd fixups
1627  * @t:	binder transaction for t->from
1628  *
1629  * If the transaction is being torn down prior to being
1630  * processed by the target process, free all of the
1631  * fd fixups and fput the file structs. It is safe to
1632  * call this function after the fixups have been
1633  * processed -- in that case, the list will be empty.
1634  */
1635 static void binder_free_txn_fixups(struct binder_transaction *t)
1636 {
1637 	struct binder_txn_fd_fixup *fixup, *tmp;
1638 
1639 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1640 		fput(fixup->file);
1641 		if (fixup->target_fd >= 0)
1642 			put_unused_fd(fixup->target_fd);
1643 		list_del(&fixup->fixup_entry);
1644 		kfree(fixup);
1645 	}
1646 }
1647 
1648 static void binder_txn_latency_free(struct binder_transaction *t)
1649 {
1650 	int from_proc, from_thread, to_proc, to_thread;
1651 
1652 	spin_lock(&t->lock);
1653 	from_proc = t->from ? t->from->proc->pid : 0;
1654 	from_thread = t->from ? t->from->pid : 0;
1655 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1656 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1657 	spin_unlock(&t->lock);
1658 
1659 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1660 }
1661 
1662 static void binder_free_transaction(struct binder_transaction *t)
1663 {
1664 	struct binder_proc *target_proc = t->to_proc;
1665 
1666 	if (target_proc) {
1667 		binder_inner_proc_lock(target_proc);
1668 		target_proc->outstanding_txns--;
1669 		if (target_proc->outstanding_txns < 0)
1670 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1671 				__func__, target_proc->outstanding_txns);
1672 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1673 			wake_up_interruptible_all(&target_proc->freeze_wait);
1674 		if (t->buffer)
1675 			t->buffer->transaction = NULL;
1676 		binder_inner_proc_unlock(target_proc);
1677 	}
1678 	if (trace_binder_txn_latency_free_enabled())
1679 		binder_txn_latency_free(t);
1680 	/*
1681 	 * If the transaction has no target_proc, then
1682 	 * t->buffer->transaction has already been cleared.
1683 	 */
1684 	binder_free_txn_fixups(t);
1685 	kfree(t);
1686 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1687 }
1688 
1689 static void binder_send_failed_reply(struct binder_transaction *t,
1690 				     uint32_t error_code)
1691 {
1692 	struct binder_thread *target_thread;
1693 	struct binder_transaction *next;
1694 
1695 	BUG_ON(t->flags & TF_ONE_WAY);
1696 	while (1) {
1697 		target_thread = binder_get_txn_from_and_acq_inner(t);
1698 		if (target_thread) {
1699 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1700 				     "send failed reply for transaction %d to %d:%d\n",
1701 				      t->debug_id,
1702 				      target_thread->proc->pid,
1703 				      target_thread->pid);
1704 
1705 			binder_pop_transaction_ilocked(target_thread, t);
1706 			if (target_thread->reply_error.cmd == BR_OK) {
1707 				target_thread->reply_error.cmd = error_code;
1708 				binder_enqueue_thread_work_ilocked(
1709 					target_thread,
1710 					&target_thread->reply_error.work);
1711 				wake_up_interruptible(&target_thread->wait);
1712 			} else {
1713 				/*
1714 				 * Cannot get here for normal operation, but
1715 				 * we can if multiple synchronous transactions
1716 				 * are sent without blocking for responses.
1717 				 * Just ignore the 2nd error in this case.
1718 				 */
1719 				pr_warn("Unexpected reply error: %u\n",
1720 					target_thread->reply_error.cmd);
1721 			}
1722 			binder_inner_proc_unlock(target_thread->proc);
1723 			binder_thread_dec_tmpref(target_thread);
1724 			binder_free_transaction(t);
1725 			return;
1726 		}
1727 		__release(&target_thread->proc->inner_lock);
1728 		next = t->from_parent;
1729 
1730 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1731 			     "send failed reply for transaction %d, target dead\n",
1732 			     t->debug_id);
1733 
1734 		binder_free_transaction(t);
1735 		if (next == NULL) {
1736 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1737 				     "reply failed, no target thread at root\n");
1738 			return;
1739 		}
1740 		t = next;
1741 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1742 			     "reply failed, no target thread -- retry %d\n",
1743 			      t->debug_id);
1744 	}
1745 }
1746 
1747 /**
1748  * binder_cleanup_transaction() - cleans up undelivered transaction
1749  * @t:		transaction that needs to be cleaned up
1750  * @reason:	reason the transaction wasn't delivered
1751  * @error_code:	error to return to caller (if synchronous call)
1752  */
1753 static void binder_cleanup_transaction(struct binder_transaction *t,
1754 				       const char *reason,
1755 				       uint32_t error_code)
1756 {
1757 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1758 		binder_send_failed_reply(t, error_code);
1759 	} else {
1760 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1761 			"undelivered transaction %d, %s\n",
1762 			t->debug_id, reason);
1763 		binder_free_transaction(t);
1764 	}
1765 }
1766 
1767 /**
1768  * binder_get_object() - gets object and checks for valid metadata
1769  * @proc:	binder_proc owning the buffer
1770  * @u:		sender's user pointer to base of buffer
1771  * @buffer:	binder_buffer that we're parsing.
1772  * @offset:	offset in the @buffer at which to validate an object.
1773  * @object:	struct binder_object to read into
1774  *
1775  * Copy the binder object at the given offset into @object. If @u is
1776  * provided then the copy is from the sender's buffer. If not, then
1777  * it is copied from the target's @buffer.
1778  *
1779  * Return:	If there's a valid metadata object at @offset, the
1780  *		size of that object. Otherwise, it returns zero. The object
1781  *		is read into the struct binder_object pointed to by @object.
1782  */
1783 static size_t binder_get_object(struct binder_proc *proc,
1784 				const void __user *u,
1785 				struct binder_buffer *buffer,
1786 				unsigned long offset,
1787 				struct binder_object *object)
1788 {
1789 	size_t read_size;
1790 	struct binder_object_header *hdr;
1791 	size_t object_size = 0;
1792 
1793 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1794 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1795 	    !IS_ALIGNED(offset, sizeof(u32)))
1796 		return 0;
1797 
1798 	if (u) {
1799 		if (copy_from_user(object, u + offset, read_size))
1800 			return 0;
1801 	} else {
1802 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1803 						  offset, read_size))
1804 			return 0;
1805 	}
1806 
1807 	/* Ok, now see if we read a complete object. */
1808 	hdr = &object->hdr;
1809 	switch (hdr->type) {
1810 	case BINDER_TYPE_BINDER:
1811 	case BINDER_TYPE_WEAK_BINDER:
1812 	case BINDER_TYPE_HANDLE:
1813 	case BINDER_TYPE_WEAK_HANDLE:
1814 		object_size = sizeof(struct flat_binder_object);
1815 		break;
1816 	case BINDER_TYPE_FD:
1817 		object_size = sizeof(struct binder_fd_object);
1818 		break;
1819 	case BINDER_TYPE_PTR:
1820 		object_size = sizeof(struct binder_buffer_object);
1821 		break;
1822 	case BINDER_TYPE_FDA:
1823 		object_size = sizeof(struct binder_fd_array_object);
1824 		break;
1825 	default:
1826 		return 0;
1827 	}
1828 	if (offset <= buffer->data_size - object_size &&
1829 	    buffer->data_size >= object_size)
1830 		return object_size;
1831 	else
1832 		return 0;
1833 }
1834 
1835 /**
1836  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1837  * @proc:	binder_proc owning the buffer
1838  * @b:		binder_buffer containing the object
1839  * @object:	struct binder_object to read into
1840  * @index:	index in offset array at which the binder_buffer_object is
1841  *		located
1842  * @start_offset: points to the start of the offset array
1843  * @object_offsetp: offset of @object read from @b
1844  * @num_valid:	the number of valid offsets in the offset array
1845  *
1846  * Return:	If @index is within the valid range of the offset array
1847  *		described by @start and @num_valid, and if there's a valid
1848  *		binder_buffer_object at the offset found in index @index
1849  *		of the offset array, that object is returned. Otherwise,
1850  *		%NULL is returned.
1851  *		Note that the offset found in index @index itself is not
1852  *		verified; this function assumes that @num_valid elements
1853  *		from @start were previously verified to have valid offsets.
1854  *		If @object_offsetp is non-NULL, then the offset within
1855  *		@b is written to it.
1856  */
1857 static struct binder_buffer_object *binder_validate_ptr(
1858 						struct binder_proc *proc,
1859 						struct binder_buffer *b,
1860 						struct binder_object *object,
1861 						binder_size_t index,
1862 						binder_size_t start_offset,
1863 						binder_size_t *object_offsetp,
1864 						binder_size_t num_valid)
1865 {
1866 	size_t object_size;
1867 	binder_size_t object_offset;
1868 	unsigned long buffer_offset;
1869 
1870 	if (index >= num_valid)
1871 		return NULL;
1872 
1873 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1874 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1875 					  b, buffer_offset,
1876 					  sizeof(object_offset)))
1877 		return NULL;
1878 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1879 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1880 		return NULL;
1881 	if (object_offsetp)
1882 		*object_offsetp = object_offset;
1883 
1884 	return &object->bbo;
1885 }
1886 
1887 /**
1888  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1889  * @proc:		binder_proc owning the buffer
1890  * @b:			transaction buffer
1891  * @objects_start_offset: offset to start of objects buffer
1892  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1893  * @fixup_offset:	start offset in @buffer to fix up
1894  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1895  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1896  *
1897  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1898  *			allowed.
1899  *
1900  * For safety reasons, we only allow fixups inside a buffer to happen
1901  * at increasing offsets; additionally, we only allow fixup on the last
1902  * buffer object that was verified, or one of its parents.
1903  *
1904  * Example of what is allowed:
1905  *
1906  * A
1907  *   B (parent = A, offset = 0)
1908  *   C (parent = A, offset = 16)
1909  *     D (parent = C, offset = 0)
1910  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1911  *
1912  * Examples of what is not allowed:
1913  *
1914  * Decreasing offsets within the same parent:
1915  * A
1916  *   C (parent = A, offset = 16)
1917  *   B (parent = A, offset = 0) // decreasing offset within A
1918  *
1919  * Referring to a parent that wasn't the last object or any of its parents:
1920  * A
1921  *   B (parent = A, offset = 0)
1922  *   C (parent = A, offset = 0)
1923  *   C (parent = A, offset = 16)
1924  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1925  */
1926 static bool binder_validate_fixup(struct binder_proc *proc,
1927 				  struct binder_buffer *b,
1928 				  binder_size_t objects_start_offset,
1929 				  binder_size_t buffer_obj_offset,
1930 				  binder_size_t fixup_offset,
1931 				  binder_size_t last_obj_offset,
1932 				  binder_size_t last_min_offset)
1933 {
1934 	if (!last_obj_offset) {
1935 		/* Nothing to fix up in */
1936 		return false;
1937 	}
1938 
1939 	while (last_obj_offset != buffer_obj_offset) {
1940 		unsigned long buffer_offset;
1941 		struct binder_object last_object;
1942 		struct binder_buffer_object *last_bbo;
1943 		size_t object_size = binder_get_object(proc, NULL, b,
1944 						       last_obj_offset,
1945 						       &last_object);
1946 		if (object_size != sizeof(*last_bbo))
1947 			return false;
1948 
1949 		last_bbo = &last_object.bbo;
1950 		/*
1951 		 * Safe to retrieve the parent of last_obj, since it
1952 		 * was already previously verified by the driver.
1953 		 */
1954 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1955 			return false;
1956 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1957 		buffer_offset = objects_start_offset +
1958 			sizeof(binder_size_t) * last_bbo->parent;
1959 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1960 						  &last_obj_offset,
1961 						  b, buffer_offset,
1962 						  sizeof(last_obj_offset)))
1963 			return false;
1964 	}
1965 	return (fixup_offset >= last_min_offset);
1966 }
1967 
1968 /**
1969  * struct binder_task_work_cb - for deferred close
1970  *
1971  * @twork:                callback_head for task work
1972  * @fd:                   fd to close
1973  *
1974  * Structure to pass task work to be handled after
1975  * returning from binder_ioctl() via task_work_add().
1976  */
1977 struct binder_task_work_cb {
1978 	struct callback_head twork;
1979 	struct file *file;
1980 };
1981 
1982 /**
1983  * binder_do_fd_close() - close list of file descriptors
1984  * @twork:	callback head for task work
1985  *
1986  * It is not safe to call ksys_close() during the binder_ioctl()
1987  * function if there is a chance that binder's own file descriptor
1988  * might be closed. This is to meet the requirements for using
1989  * fdget() (see comments for __fget_light()). Therefore use
1990  * task_work_add() to schedule the close operation once we have
1991  * returned from binder_ioctl(). This function is a callback
1992  * for that mechanism and does the actual ksys_close() on the
1993  * given file descriptor.
1994  */
1995 static void binder_do_fd_close(struct callback_head *twork)
1996 {
1997 	struct binder_task_work_cb *twcb = container_of(twork,
1998 			struct binder_task_work_cb, twork);
1999 
2000 	fput(twcb->file);
2001 	kfree(twcb);
2002 }
2003 
2004 /**
2005  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2006  * @fd:		file-descriptor to close
2007  *
2008  * See comments in binder_do_fd_close(). This function is used to schedule
2009  * a file-descriptor to be closed after returning from binder_ioctl().
2010  */
2011 static void binder_deferred_fd_close(int fd)
2012 {
2013 	struct binder_task_work_cb *twcb;
2014 
2015 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2016 	if (!twcb)
2017 		return;
2018 	init_task_work(&twcb->twork, binder_do_fd_close);
2019 	twcb->file = file_close_fd(fd);
2020 	if (twcb->file) {
2021 		// pin it until binder_do_fd_close(); see comments there
2022 		get_file(twcb->file);
2023 		filp_close(twcb->file, current->files);
2024 		task_work_add(current, &twcb->twork, TWA_RESUME);
2025 	} else {
2026 		kfree(twcb);
2027 	}
2028 }
2029 
2030 static void binder_transaction_buffer_release(struct binder_proc *proc,
2031 					      struct binder_thread *thread,
2032 					      struct binder_buffer *buffer,
2033 					      binder_size_t off_end_offset,
2034 					      bool is_failure)
2035 {
2036 	int debug_id = buffer->debug_id;
2037 	binder_size_t off_start_offset, buffer_offset;
2038 
2039 	binder_debug(BINDER_DEBUG_TRANSACTION,
2040 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2041 		     proc->pid, buffer->debug_id,
2042 		     buffer->data_size, buffer->offsets_size,
2043 		     (unsigned long long)off_end_offset);
2044 
2045 	if (buffer->target_node)
2046 		binder_dec_node(buffer->target_node, 1, 0);
2047 
2048 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2049 
2050 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2051 	     buffer_offset += sizeof(binder_size_t)) {
2052 		struct binder_object_header *hdr;
2053 		size_t object_size = 0;
2054 		struct binder_object object;
2055 		binder_size_t object_offset;
2056 
2057 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2058 						   buffer, buffer_offset,
2059 						   sizeof(object_offset)))
2060 			object_size = binder_get_object(proc, NULL, buffer,
2061 							object_offset, &object);
2062 		if (object_size == 0) {
2063 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2064 			       debug_id, (u64)object_offset, buffer->data_size);
2065 			continue;
2066 		}
2067 		hdr = &object.hdr;
2068 		switch (hdr->type) {
2069 		case BINDER_TYPE_BINDER:
2070 		case BINDER_TYPE_WEAK_BINDER: {
2071 			struct flat_binder_object *fp;
2072 			struct binder_node *node;
2073 
2074 			fp = to_flat_binder_object(hdr);
2075 			node = binder_get_node(proc, fp->binder);
2076 			if (node == NULL) {
2077 				pr_err("transaction release %d bad node %016llx\n",
2078 				       debug_id, (u64)fp->binder);
2079 				break;
2080 			}
2081 			binder_debug(BINDER_DEBUG_TRANSACTION,
2082 				     "        node %d u%016llx\n",
2083 				     node->debug_id, (u64)node->ptr);
2084 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2085 					0);
2086 			binder_put_node(node);
2087 		} break;
2088 		case BINDER_TYPE_HANDLE:
2089 		case BINDER_TYPE_WEAK_HANDLE: {
2090 			struct flat_binder_object *fp;
2091 			struct binder_ref_data rdata;
2092 			int ret;
2093 
2094 			fp = to_flat_binder_object(hdr);
2095 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2096 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2097 
2098 			if (ret) {
2099 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2100 				 debug_id, fp->handle, ret);
2101 				break;
2102 			}
2103 			binder_debug(BINDER_DEBUG_TRANSACTION,
2104 				     "        ref %d desc %d\n",
2105 				     rdata.debug_id, rdata.desc);
2106 		} break;
2107 
2108 		case BINDER_TYPE_FD: {
2109 			/*
2110 			 * No need to close the file here since user-space
2111 			 * closes it for successfully delivered
2112 			 * transactions. For transactions that weren't
2113 			 * delivered, the new fd was never allocated so
2114 			 * there is no need to close and the fput on the
2115 			 * file is done when the transaction is torn
2116 			 * down.
2117 			 */
2118 		} break;
2119 		case BINDER_TYPE_PTR:
2120 			/*
2121 			 * Nothing to do here, this will get cleaned up when the
2122 			 * transaction buffer gets freed
2123 			 */
2124 			break;
2125 		case BINDER_TYPE_FDA: {
2126 			struct binder_fd_array_object *fda;
2127 			struct binder_buffer_object *parent;
2128 			struct binder_object ptr_object;
2129 			binder_size_t fda_offset;
2130 			size_t fd_index;
2131 			binder_size_t fd_buf_size;
2132 			binder_size_t num_valid;
2133 
2134 			if (is_failure) {
2135 				/*
2136 				 * The fd fixups have not been applied so no
2137 				 * fds need to be closed.
2138 				 */
2139 				continue;
2140 			}
2141 
2142 			num_valid = (buffer_offset - off_start_offset) /
2143 						sizeof(binder_size_t);
2144 			fda = to_binder_fd_array_object(hdr);
2145 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2146 						     fda->parent,
2147 						     off_start_offset,
2148 						     NULL,
2149 						     num_valid);
2150 			if (!parent) {
2151 				pr_err("transaction release %d bad parent offset\n",
2152 				       debug_id);
2153 				continue;
2154 			}
2155 			fd_buf_size = sizeof(u32) * fda->num_fds;
2156 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2157 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2158 				       debug_id, (u64)fda->num_fds);
2159 				continue;
2160 			}
2161 			if (fd_buf_size > parent->length ||
2162 			    fda->parent_offset > parent->length - fd_buf_size) {
2163 				/* No space for all file descriptors here. */
2164 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2165 				       debug_id, (u64)fda->num_fds);
2166 				continue;
2167 			}
2168 			/*
2169 			 * the source data for binder_buffer_object is visible
2170 			 * to user-space and the @buffer element is the user
2171 			 * pointer to the buffer_object containing the fd_array.
2172 			 * Convert the address to an offset relative to
2173 			 * the base of the transaction buffer.
2174 			 */
2175 			fda_offset = parent->buffer - buffer->user_data +
2176 				fda->parent_offset;
2177 			for (fd_index = 0; fd_index < fda->num_fds;
2178 			     fd_index++) {
2179 				u32 fd;
2180 				int err;
2181 				binder_size_t offset = fda_offset +
2182 					fd_index * sizeof(fd);
2183 
2184 				err = binder_alloc_copy_from_buffer(
2185 						&proc->alloc, &fd, buffer,
2186 						offset, sizeof(fd));
2187 				WARN_ON(err);
2188 				if (!err) {
2189 					binder_deferred_fd_close(fd);
2190 					/*
2191 					 * Need to make sure the thread goes
2192 					 * back to userspace to complete the
2193 					 * deferred close
2194 					 */
2195 					if (thread)
2196 						thread->looper_need_return = true;
2197 				}
2198 			}
2199 		} break;
2200 		default:
2201 			pr_err("transaction release %d bad object type %x\n",
2202 				debug_id, hdr->type);
2203 			break;
2204 		}
2205 	}
2206 }
2207 
2208 /* Clean up all the objects in the buffer */
2209 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2210 						struct binder_thread *thread,
2211 						struct binder_buffer *buffer,
2212 						bool is_failure)
2213 {
2214 	binder_size_t off_end_offset;
2215 
2216 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2217 	off_end_offset += buffer->offsets_size;
2218 
2219 	binder_transaction_buffer_release(proc, thread, buffer,
2220 					  off_end_offset, is_failure);
2221 }
2222 
2223 static int binder_translate_binder(struct flat_binder_object *fp,
2224 				   struct binder_transaction *t,
2225 				   struct binder_thread *thread)
2226 {
2227 	struct binder_node *node;
2228 	struct binder_proc *proc = thread->proc;
2229 	struct binder_proc *target_proc = t->to_proc;
2230 	struct binder_ref_data rdata;
2231 	int ret = 0;
2232 
2233 	node = binder_get_node(proc, fp->binder);
2234 	if (!node) {
2235 		node = binder_new_node(proc, fp);
2236 		if (!node)
2237 			return -ENOMEM;
2238 	}
2239 	if (fp->cookie != node->cookie) {
2240 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2241 				  proc->pid, thread->pid, (u64)fp->binder,
2242 				  node->debug_id, (u64)fp->cookie,
2243 				  (u64)node->cookie);
2244 		ret = -EINVAL;
2245 		goto done;
2246 	}
2247 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2248 		ret = -EPERM;
2249 		goto done;
2250 	}
2251 
2252 	ret = binder_inc_ref_for_node(target_proc, node,
2253 			fp->hdr.type == BINDER_TYPE_BINDER,
2254 			&thread->todo, &rdata);
2255 	if (ret)
2256 		goto done;
2257 
2258 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2259 		fp->hdr.type = BINDER_TYPE_HANDLE;
2260 	else
2261 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2262 	fp->binder = 0;
2263 	fp->handle = rdata.desc;
2264 	fp->cookie = 0;
2265 
2266 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2267 	binder_debug(BINDER_DEBUG_TRANSACTION,
2268 		     "        node %d u%016llx -> ref %d desc %d\n",
2269 		     node->debug_id, (u64)node->ptr,
2270 		     rdata.debug_id, rdata.desc);
2271 done:
2272 	binder_put_node(node);
2273 	return ret;
2274 }
2275 
2276 static int binder_translate_handle(struct flat_binder_object *fp,
2277 				   struct binder_transaction *t,
2278 				   struct binder_thread *thread)
2279 {
2280 	struct binder_proc *proc = thread->proc;
2281 	struct binder_proc *target_proc = t->to_proc;
2282 	struct binder_node *node;
2283 	struct binder_ref_data src_rdata;
2284 	int ret = 0;
2285 
2286 	node = binder_get_node_from_ref(proc, fp->handle,
2287 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2288 	if (!node) {
2289 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2290 				  proc->pid, thread->pid, fp->handle);
2291 		return -EINVAL;
2292 	}
2293 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2294 		ret = -EPERM;
2295 		goto done;
2296 	}
2297 
2298 	binder_node_lock(node);
2299 	if (node->proc == target_proc) {
2300 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2301 			fp->hdr.type = BINDER_TYPE_BINDER;
2302 		else
2303 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2304 		fp->binder = node->ptr;
2305 		fp->cookie = node->cookie;
2306 		if (node->proc)
2307 			binder_inner_proc_lock(node->proc);
2308 		else
2309 			__acquire(&node->proc->inner_lock);
2310 		binder_inc_node_nilocked(node,
2311 					 fp->hdr.type == BINDER_TYPE_BINDER,
2312 					 0, NULL);
2313 		if (node->proc)
2314 			binder_inner_proc_unlock(node->proc);
2315 		else
2316 			__release(&node->proc->inner_lock);
2317 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2318 		binder_debug(BINDER_DEBUG_TRANSACTION,
2319 			     "        ref %d desc %d -> node %d u%016llx\n",
2320 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2321 			     (u64)node->ptr);
2322 		binder_node_unlock(node);
2323 	} else {
2324 		struct binder_ref_data dest_rdata;
2325 
2326 		binder_node_unlock(node);
2327 		ret = binder_inc_ref_for_node(target_proc, node,
2328 				fp->hdr.type == BINDER_TYPE_HANDLE,
2329 				NULL, &dest_rdata);
2330 		if (ret)
2331 			goto done;
2332 
2333 		fp->binder = 0;
2334 		fp->handle = dest_rdata.desc;
2335 		fp->cookie = 0;
2336 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2337 						    &dest_rdata);
2338 		binder_debug(BINDER_DEBUG_TRANSACTION,
2339 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2340 			     src_rdata.debug_id, src_rdata.desc,
2341 			     dest_rdata.debug_id, dest_rdata.desc,
2342 			     node->debug_id);
2343 	}
2344 done:
2345 	binder_put_node(node);
2346 	return ret;
2347 }
2348 
2349 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2350 			       struct binder_transaction *t,
2351 			       struct binder_thread *thread,
2352 			       struct binder_transaction *in_reply_to)
2353 {
2354 	struct binder_proc *proc = thread->proc;
2355 	struct binder_proc *target_proc = t->to_proc;
2356 	struct binder_txn_fd_fixup *fixup;
2357 	struct file *file;
2358 	int ret = 0;
2359 	bool target_allows_fd;
2360 
2361 	if (in_reply_to)
2362 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2363 	else
2364 		target_allows_fd = t->buffer->target_node->accept_fds;
2365 	if (!target_allows_fd) {
2366 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2367 				  proc->pid, thread->pid,
2368 				  in_reply_to ? "reply" : "transaction",
2369 				  fd);
2370 		ret = -EPERM;
2371 		goto err_fd_not_accepted;
2372 	}
2373 
2374 	file = fget(fd);
2375 	if (!file) {
2376 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2377 				  proc->pid, thread->pid, fd);
2378 		ret = -EBADF;
2379 		goto err_fget;
2380 	}
2381 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2382 	if (ret < 0) {
2383 		ret = -EPERM;
2384 		goto err_security;
2385 	}
2386 
2387 	/*
2388 	 * Add fixup record for this transaction. The allocation
2389 	 * of the fd in the target needs to be done from a
2390 	 * target thread.
2391 	 */
2392 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2393 	if (!fixup) {
2394 		ret = -ENOMEM;
2395 		goto err_alloc;
2396 	}
2397 	fixup->file = file;
2398 	fixup->offset = fd_offset;
2399 	fixup->target_fd = -1;
2400 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2401 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2402 
2403 	return ret;
2404 
2405 err_alloc:
2406 err_security:
2407 	fput(file);
2408 err_fget:
2409 err_fd_not_accepted:
2410 	return ret;
2411 }
2412 
2413 /**
2414  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2415  * @offset	offset in target buffer to fixup
2416  * @skip_size	bytes to skip in copy (fixup will be written later)
2417  * @fixup_data	data to write at fixup offset
2418  * @node	list node
2419  *
2420  * This is used for the pointer fixup list (pf) which is created and consumed
2421  * during binder_transaction() and is only accessed locally. No
2422  * locking is necessary.
2423  *
2424  * The list is ordered by @offset.
2425  */
2426 struct binder_ptr_fixup {
2427 	binder_size_t offset;
2428 	size_t skip_size;
2429 	binder_uintptr_t fixup_data;
2430 	struct list_head node;
2431 };
2432 
2433 /**
2434  * struct binder_sg_copy - scatter-gather data to be copied
2435  * @offset		offset in target buffer
2436  * @sender_uaddr	user address in source buffer
2437  * @length		bytes to copy
2438  * @node		list node
2439  *
2440  * This is used for the sg copy list (sgc) which is created and consumed
2441  * during binder_transaction() and is only accessed locally. No
2442  * locking is necessary.
2443  *
2444  * The list is ordered by @offset.
2445  */
2446 struct binder_sg_copy {
2447 	binder_size_t offset;
2448 	const void __user *sender_uaddr;
2449 	size_t length;
2450 	struct list_head node;
2451 };
2452 
2453 /**
2454  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2455  * @alloc:	binder_alloc associated with @buffer
2456  * @buffer:	binder buffer in target process
2457  * @sgc_head:	list_head of scatter-gather copy list
2458  * @pf_head:	list_head of pointer fixup list
2459  *
2460  * Processes all elements of @sgc_head, applying fixups from @pf_head
2461  * and copying the scatter-gather data from the source process' user
2462  * buffer to the target's buffer. It is expected that the list creation
2463  * and processing all occurs during binder_transaction() so these lists
2464  * are only accessed in local context.
2465  *
2466  * Return: 0=success, else -errno
2467  */
2468 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2469 					 struct binder_buffer *buffer,
2470 					 struct list_head *sgc_head,
2471 					 struct list_head *pf_head)
2472 {
2473 	int ret = 0;
2474 	struct binder_sg_copy *sgc, *tmpsgc;
2475 	struct binder_ptr_fixup *tmppf;
2476 	struct binder_ptr_fixup *pf =
2477 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2478 					 node);
2479 
2480 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2481 		size_t bytes_copied = 0;
2482 
2483 		while (bytes_copied < sgc->length) {
2484 			size_t copy_size;
2485 			size_t bytes_left = sgc->length - bytes_copied;
2486 			size_t offset = sgc->offset + bytes_copied;
2487 
2488 			/*
2489 			 * We copy up to the fixup (pointed to by pf)
2490 			 */
2491 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2492 				       : bytes_left;
2493 			if (!ret && copy_size)
2494 				ret = binder_alloc_copy_user_to_buffer(
2495 						alloc, buffer,
2496 						offset,
2497 						sgc->sender_uaddr + bytes_copied,
2498 						copy_size);
2499 			bytes_copied += copy_size;
2500 			if (copy_size != bytes_left) {
2501 				BUG_ON(!pf);
2502 				/* we stopped at a fixup offset */
2503 				if (pf->skip_size) {
2504 					/*
2505 					 * we are just skipping. This is for
2506 					 * BINDER_TYPE_FDA where the translated
2507 					 * fds will be fixed up when we get
2508 					 * to target context.
2509 					 */
2510 					bytes_copied += pf->skip_size;
2511 				} else {
2512 					/* apply the fixup indicated by pf */
2513 					if (!ret)
2514 						ret = binder_alloc_copy_to_buffer(
2515 							alloc, buffer,
2516 							pf->offset,
2517 							&pf->fixup_data,
2518 							sizeof(pf->fixup_data));
2519 					bytes_copied += sizeof(pf->fixup_data);
2520 				}
2521 				list_del(&pf->node);
2522 				kfree(pf);
2523 				pf = list_first_entry_or_null(pf_head,
2524 						struct binder_ptr_fixup, node);
2525 			}
2526 		}
2527 		list_del(&sgc->node);
2528 		kfree(sgc);
2529 	}
2530 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2531 		BUG_ON(pf->skip_size == 0);
2532 		list_del(&pf->node);
2533 		kfree(pf);
2534 	}
2535 	BUG_ON(!list_empty(sgc_head));
2536 
2537 	return ret > 0 ? -EINVAL : ret;
2538 }
2539 
2540 /**
2541  * binder_cleanup_deferred_txn_lists() - free specified lists
2542  * @sgc_head:	list_head of scatter-gather copy list
2543  * @pf_head:	list_head of pointer fixup list
2544  *
2545  * Called to clean up @sgc_head and @pf_head if there is an
2546  * error.
2547  */
2548 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2549 					      struct list_head *pf_head)
2550 {
2551 	struct binder_sg_copy *sgc, *tmpsgc;
2552 	struct binder_ptr_fixup *pf, *tmppf;
2553 
2554 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2555 		list_del(&sgc->node);
2556 		kfree(sgc);
2557 	}
2558 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2559 		list_del(&pf->node);
2560 		kfree(pf);
2561 	}
2562 }
2563 
2564 /**
2565  * binder_defer_copy() - queue a scatter-gather buffer for copy
2566  * @sgc_head:		list_head of scatter-gather copy list
2567  * @offset:		binder buffer offset in target process
2568  * @sender_uaddr:	user address in source process
2569  * @length:		bytes to copy
2570  *
2571  * Specify a scatter-gather block to be copied. The actual copy must
2572  * be deferred until all the needed fixups are identified and queued.
2573  * Then the copy and fixups are done together so un-translated values
2574  * from the source are never visible in the target buffer.
2575  *
2576  * We are guaranteed that repeated calls to this function will have
2577  * monotonically increasing @offset values so the list will naturally
2578  * be ordered.
2579  *
2580  * Return: 0=success, else -errno
2581  */
2582 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2583 			     const void __user *sender_uaddr, size_t length)
2584 {
2585 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2586 
2587 	if (!bc)
2588 		return -ENOMEM;
2589 
2590 	bc->offset = offset;
2591 	bc->sender_uaddr = sender_uaddr;
2592 	bc->length = length;
2593 	INIT_LIST_HEAD(&bc->node);
2594 
2595 	/*
2596 	 * We are guaranteed that the deferred copies are in-order
2597 	 * so just add to the tail.
2598 	 */
2599 	list_add_tail(&bc->node, sgc_head);
2600 
2601 	return 0;
2602 }
2603 
2604 /**
2605  * binder_add_fixup() - queue a fixup to be applied to sg copy
2606  * @pf_head:	list_head of binder ptr fixup list
2607  * @offset:	binder buffer offset in target process
2608  * @fixup:	bytes to be copied for fixup
2609  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2610  *
2611  * Add the specified fixup to a list ordered by @offset. When copying
2612  * the scatter-gather buffers, the fixup will be copied instead of
2613  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2614  * will be applied later (in target process context), so we just skip
2615  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2616  * value in @fixup.
2617  *
2618  * This function is called *mostly* in @offset order, but there are
2619  * exceptions. Since out-of-order inserts are relatively uncommon,
2620  * we insert the new element by searching backward from the tail of
2621  * the list.
2622  *
2623  * Return: 0=success, else -errno
2624  */
2625 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2626 			    binder_uintptr_t fixup, size_t skip_size)
2627 {
2628 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2629 	struct binder_ptr_fixup *tmppf;
2630 
2631 	if (!pf)
2632 		return -ENOMEM;
2633 
2634 	pf->offset = offset;
2635 	pf->fixup_data = fixup;
2636 	pf->skip_size = skip_size;
2637 	INIT_LIST_HEAD(&pf->node);
2638 
2639 	/* Fixups are *mostly* added in-order, but there are some
2640 	 * exceptions. Look backwards through list for insertion point.
2641 	 */
2642 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2643 		if (tmppf->offset < pf->offset) {
2644 			list_add(&pf->node, &tmppf->node);
2645 			return 0;
2646 		}
2647 	}
2648 	/*
2649 	 * if we get here, then the new offset is the lowest so
2650 	 * insert at the head
2651 	 */
2652 	list_add(&pf->node, pf_head);
2653 	return 0;
2654 }
2655 
2656 static int binder_translate_fd_array(struct list_head *pf_head,
2657 				     struct binder_fd_array_object *fda,
2658 				     const void __user *sender_ubuffer,
2659 				     struct binder_buffer_object *parent,
2660 				     struct binder_buffer_object *sender_uparent,
2661 				     struct binder_transaction *t,
2662 				     struct binder_thread *thread,
2663 				     struct binder_transaction *in_reply_to)
2664 {
2665 	binder_size_t fdi, fd_buf_size;
2666 	binder_size_t fda_offset;
2667 	const void __user *sender_ufda_base;
2668 	struct binder_proc *proc = thread->proc;
2669 	int ret;
2670 
2671 	if (fda->num_fds == 0)
2672 		return 0;
2673 
2674 	fd_buf_size = sizeof(u32) * fda->num_fds;
2675 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2676 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2677 				  proc->pid, thread->pid, (u64)fda->num_fds);
2678 		return -EINVAL;
2679 	}
2680 	if (fd_buf_size > parent->length ||
2681 	    fda->parent_offset > parent->length - fd_buf_size) {
2682 		/* No space for all file descriptors here. */
2683 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2684 				  proc->pid, thread->pid, (u64)fda->num_fds);
2685 		return -EINVAL;
2686 	}
2687 	/*
2688 	 * the source data for binder_buffer_object is visible
2689 	 * to user-space and the @buffer element is the user
2690 	 * pointer to the buffer_object containing the fd_array.
2691 	 * Convert the address to an offset relative to
2692 	 * the base of the transaction buffer.
2693 	 */
2694 	fda_offset = parent->buffer - t->buffer->user_data +
2695 		fda->parent_offset;
2696 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2697 				fda->parent_offset;
2698 
2699 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2700 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2701 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2702 				  proc->pid, thread->pid);
2703 		return -EINVAL;
2704 	}
2705 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2706 	if (ret)
2707 		return ret;
2708 
2709 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2710 		u32 fd;
2711 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2712 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2713 
2714 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2715 		if (!ret)
2716 			ret = binder_translate_fd(fd, offset, t, thread,
2717 						  in_reply_to);
2718 		if (ret)
2719 			return ret > 0 ? -EINVAL : ret;
2720 	}
2721 	return 0;
2722 }
2723 
2724 static int binder_fixup_parent(struct list_head *pf_head,
2725 			       struct binder_transaction *t,
2726 			       struct binder_thread *thread,
2727 			       struct binder_buffer_object *bp,
2728 			       binder_size_t off_start_offset,
2729 			       binder_size_t num_valid,
2730 			       binder_size_t last_fixup_obj_off,
2731 			       binder_size_t last_fixup_min_off)
2732 {
2733 	struct binder_buffer_object *parent;
2734 	struct binder_buffer *b = t->buffer;
2735 	struct binder_proc *proc = thread->proc;
2736 	struct binder_proc *target_proc = t->to_proc;
2737 	struct binder_object object;
2738 	binder_size_t buffer_offset;
2739 	binder_size_t parent_offset;
2740 
2741 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2742 		return 0;
2743 
2744 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2745 				     off_start_offset, &parent_offset,
2746 				     num_valid);
2747 	if (!parent) {
2748 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2749 				  proc->pid, thread->pid);
2750 		return -EINVAL;
2751 	}
2752 
2753 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2754 				   parent_offset, bp->parent_offset,
2755 				   last_fixup_obj_off,
2756 				   last_fixup_min_off)) {
2757 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2758 				  proc->pid, thread->pid);
2759 		return -EINVAL;
2760 	}
2761 
2762 	if (parent->length < sizeof(binder_uintptr_t) ||
2763 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2764 		/* No space for a pointer here! */
2765 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2766 				  proc->pid, thread->pid);
2767 		return -EINVAL;
2768 	}
2769 
2770 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2771 
2772 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2773 }
2774 
2775 /**
2776  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2777  * @t1: the pending async txn in the frozen process
2778  * @t2: the new async txn to supersede the outdated pending one
2779  *
2780  * Return:  true if t2 can supersede t1
2781  *          false if t2 can not supersede t1
2782  */
2783 static bool binder_can_update_transaction(struct binder_transaction *t1,
2784 					  struct binder_transaction *t2)
2785 {
2786 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2787 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2788 		return false;
2789 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2790 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2791 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2792 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2793 		return true;
2794 	return false;
2795 }
2796 
2797 /**
2798  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2799  * @t:		 new async transaction
2800  * @target_list: list to find outdated transaction
2801  *
2802  * Return: the outdated transaction if found
2803  *         NULL if no outdated transacton can be found
2804  *
2805  * Requires the proc->inner_lock to be held.
2806  */
2807 static struct binder_transaction *
2808 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2809 					 struct list_head *target_list)
2810 {
2811 	struct binder_work *w;
2812 
2813 	list_for_each_entry(w, target_list, entry) {
2814 		struct binder_transaction *t_queued;
2815 
2816 		if (w->type != BINDER_WORK_TRANSACTION)
2817 			continue;
2818 		t_queued = container_of(w, struct binder_transaction, work);
2819 		if (binder_can_update_transaction(t_queued, t))
2820 			return t_queued;
2821 	}
2822 	return NULL;
2823 }
2824 
2825 /**
2826  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2827  * @t:		transaction to send
2828  * @proc:	process to send the transaction to
2829  * @thread:	thread in @proc to send the transaction to (may be NULL)
2830  *
2831  * This function queues a transaction to the specified process. It will try
2832  * to find a thread in the target process to handle the transaction and
2833  * wake it up. If no thread is found, the work is queued to the proc
2834  * waitqueue.
2835  *
2836  * If the @thread parameter is not NULL, the transaction is always queued
2837  * to the waitlist of that specific thread.
2838  *
2839  * Return:	0 if the transaction was successfully queued
2840  *		BR_DEAD_REPLY if the target process or thread is dead
2841  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2842  *			the sync transaction was rejected
2843  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2844  *		and the async transaction was successfully queued
2845  */
2846 static int binder_proc_transaction(struct binder_transaction *t,
2847 				    struct binder_proc *proc,
2848 				    struct binder_thread *thread)
2849 {
2850 	struct binder_node *node = t->buffer->target_node;
2851 	bool oneway = !!(t->flags & TF_ONE_WAY);
2852 	bool pending_async = false;
2853 	struct binder_transaction *t_outdated = NULL;
2854 	bool frozen = false;
2855 
2856 	BUG_ON(!node);
2857 	binder_node_lock(node);
2858 	if (oneway) {
2859 		BUG_ON(thread);
2860 		if (node->has_async_transaction)
2861 			pending_async = true;
2862 		else
2863 			node->has_async_transaction = true;
2864 	}
2865 
2866 	binder_inner_proc_lock(proc);
2867 	if (proc->is_frozen) {
2868 		frozen = true;
2869 		proc->sync_recv |= !oneway;
2870 		proc->async_recv |= oneway;
2871 	}
2872 
2873 	if ((frozen && !oneway) || proc->is_dead ||
2874 			(thread && thread->is_dead)) {
2875 		binder_inner_proc_unlock(proc);
2876 		binder_node_unlock(node);
2877 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2878 	}
2879 
2880 	if (!thread && !pending_async)
2881 		thread = binder_select_thread_ilocked(proc);
2882 
2883 	if (thread) {
2884 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2885 	} else if (!pending_async) {
2886 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2887 	} else {
2888 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2889 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2890 									      &node->async_todo);
2891 			if (t_outdated) {
2892 				binder_debug(BINDER_DEBUG_TRANSACTION,
2893 					     "txn %d supersedes %d\n",
2894 					     t->debug_id, t_outdated->debug_id);
2895 				list_del_init(&t_outdated->work.entry);
2896 				proc->outstanding_txns--;
2897 			}
2898 		}
2899 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2900 	}
2901 
2902 	if (!pending_async)
2903 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2904 
2905 	proc->outstanding_txns++;
2906 	binder_inner_proc_unlock(proc);
2907 	binder_node_unlock(node);
2908 
2909 	/*
2910 	 * To reduce potential contention, free the outdated transaction and
2911 	 * buffer after releasing the locks.
2912 	 */
2913 	if (t_outdated) {
2914 		struct binder_buffer *buffer = t_outdated->buffer;
2915 
2916 		t_outdated->buffer = NULL;
2917 		buffer->transaction = NULL;
2918 		trace_binder_transaction_update_buffer_release(buffer);
2919 		binder_release_entire_buffer(proc, NULL, buffer, false);
2920 		binder_alloc_free_buf(&proc->alloc, buffer);
2921 		kfree(t_outdated);
2922 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2923 	}
2924 
2925 	if (oneway && frozen)
2926 		return BR_TRANSACTION_PENDING_FROZEN;
2927 
2928 	return 0;
2929 }
2930 
2931 /**
2932  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2933  * @node:         struct binder_node for which to get refs
2934  * @procp:        returns @node->proc if valid
2935  * @error:        if no @procp then returns BR_DEAD_REPLY
2936  *
2937  * User-space normally keeps the node alive when creating a transaction
2938  * since it has a reference to the target. The local strong ref keeps it
2939  * alive if the sending process dies before the target process processes
2940  * the transaction. If the source process is malicious or has a reference
2941  * counting bug, relying on the local strong ref can fail.
2942  *
2943  * Since user-space can cause the local strong ref to go away, we also take
2944  * a tmpref on the node to ensure it survives while we are constructing
2945  * the transaction. We also need a tmpref on the proc while we are
2946  * constructing the transaction, so we take that here as well.
2947  *
2948  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2949  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2950  * target proc has died, @error is set to BR_DEAD_REPLY.
2951  */
2952 static struct binder_node *binder_get_node_refs_for_txn(
2953 		struct binder_node *node,
2954 		struct binder_proc **procp,
2955 		uint32_t *error)
2956 {
2957 	struct binder_node *target_node = NULL;
2958 
2959 	binder_node_inner_lock(node);
2960 	if (node->proc) {
2961 		target_node = node;
2962 		binder_inc_node_nilocked(node, 1, 0, NULL);
2963 		binder_inc_node_tmpref_ilocked(node);
2964 		node->proc->tmp_ref++;
2965 		*procp = node->proc;
2966 	} else
2967 		*error = BR_DEAD_REPLY;
2968 	binder_node_inner_unlock(node);
2969 
2970 	return target_node;
2971 }
2972 
2973 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2974 				      uint32_t command, int32_t param)
2975 {
2976 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2977 
2978 	if (!from) {
2979 		/* annotation for sparse */
2980 		__release(&from->proc->inner_lock);
2981 		return;
2982 	}
2983 
2984 	/* don't override existing errors */
2985 	if (from->ee.command == BR_OK)
2986 		binder_set_extended_error(&from->ee, id, command, param);
2987 	binder_inner_proc_unlock(from->proc);
2988 	binder_thread_dec_tmpref(from);
2989 }
2990 
2991 static void binder_transaction(struct binder_proc *proc,
2992 			       struct binder_thread *thread,
2993 			       struct binder_transaction_data *tr, int reply,
2994 			       binder_size_t extra_buffers_size)
2995 {
2996 	int ret;
2997 	struct binder_transaction *t;
2998 	struct binder_work *w;
2999 	struct binder_work *tcomplete;
3000 	binder_size_t buffer_offset = 0;
3001 	binder_size_t off_start_offset, off_end_offset;
3002 	binder_size_t off_min;
3003 	binder_size_t sg_buf_offset, sg_buf_end_offset;
3004 	binder_size_t user_offset = 0;
3005 	struct binder_proc *target_proc = NULL;
3006 	struct binder_thread *target_thread = NULL;
3007 	struct binder_node *target_node = NULL;
3008 	struct binder_transaction *in_reply_to = NULL;
3009 	struct binder_transaction_log_entry *e;
3010 	uint32_t return_error = 0;
3011 	uint32_t return_error_param = 0;
3012 	uint32_t return_error_line = 0;
3013 	binder_size_t last_fixup_obj_off = 0;
3014 	binder_size_t last_fixup_min_off = 0;
3015 	struct binder_context *context = proc->context;
3016 	int t_debug_id = atomic_inc_return(&binder_last_id);
3017 	ktime_t t_start_time = ktime_get();
3018 	char *secctx = NULL;
3019 	u32 secctx_sz = 0;
3020 	struct list_head sgc_head;
3021 	struct list_head pf_head;
3022 	const void __user *user_buffer = (const void __user *)
3023 				(uintptr_t)tr->data.ptr.buffer;
3024 	INIT_LIST_HEAD(&sgc_head);
3025 	INIT_LIST_HEAD(&pf_head);
3026 
3027 	e = binder_transaction_log_add(&binder_transaction_log);
3028 	e->debug_id = t_debug_id;
3029 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3030 	e->from_proc = proc->pid;
3031 	e->from_thread = thread->pid;
3032 	e->target_handle = tr->target.handle;
3033 	e->data_size = tr->data_size;
3034 	e->offsets_size = tr->offsets_size;
3035 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3036 
3037 	binder_inner_proc_lock(proc);
3038 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3039 	binder_inner_proc_unlock(proc);
3040 
3041 	if (reply) {
3042 		binder_inner_proc_lock(proc);
3043 		in_reply_to = thread->transaction_stack;
3044 		if (in_reply_to == NULL) {
3045 			binder_inner_proc_unlock(proc);
3046 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3047 					  proc->pid, thread->pid);
3048 			return_error = BR_FAILED_REPLY;
3049 			return_error_param = -EPROTO;
3050 			return_error_line = __LINE__;
3051 			goto err_empty_call_stack;
3052 		}
3053 		if (in_reply_to->to_thread != thread) {
3054 			spin_lock(&in_reply_to->lock);
3055 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3056 				proc->pid, thread->pid, in_reply_to->debug_id,
3057 				in_reply_to->to_proc ?
3058 				in_reply_to->to_proc->pid : 0,
3059 				in_reply_to->to_thread ?
3060 				in_reply_to->to_thread->pid : 0);
3061 			spin_unlock(&in_reply_to->lock);
3062 			binder_inner_proc_unlock(proc);
3063 			return_error = BR_FAILED_REPLY;
3064 			return_error_param = -EPROTO;
3065 			return_error_line = __LINE__;
3066 			in_reply_to = NULL;
3067 			goto err_bad_call_stack;
3068 		}
3069 		thread->transaction_stack = in_reply_to->to_parent;
3070 		binder_inner_proc_unlock(proc);
3071 		binder_set_nice(in_reply_to->saved_priority);
3072 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3073 		if (target_thread == NULL) {
3074 			/* annotation for sparse */
3075 			__release(&target_thread->proc->inner_lock);
3076 			binder_txn_error("%d:%d reply target not found\n",
3077 				thread->pid, proc->pid);
3078 			return_error = BR_DEAD_REPLY;
3079 			return_error_line = __LINE__;
3080 			goto err_dead_binder;
3081 		}
3082 		if (target_thread->transaction_stack != in_reply_to) {
3083 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3084 				proc->pid, thread->pid,
3085 				target_thread->transaction_stack ?
3086 				target_thread->transaction_stack->debug_id : 0,
3087 				in_reply_to->debug_id);
3088 			binder_inner_proc_unlock(target_thread->proc);
3089 			return_error = BR_FAILED_REPLY;
3090 			return_error_param = -EPROTO;
3091 			return_error_line = __LINE__;
3092 			in_reply_to = NULL;
3093 			target_thread = NULL;
3094 			goto err_dead_binder;
3095 		}
3096 		target_proc = target_thread->proc;
3097 		target_proc->tmp_ref++;
3098 		binder_inner_proc_unlock(target_thread->proc);
3099 	} else {
3100 		if (tr->target.handle) {
3101 			struct binder_ref *ref;
3102 
3103 			/*
3104 			 * There must already be a strong ref
3105 			 * on this node. If so, do a strong
3106 			 * increment on the node to ensure it
3107 			 * stays alive until the transaction is
3108 			 * done.
3109 			 */
3110 			binder_proc_lock(proc);
3111 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3112 						     true);
3113 			if (ref) {
3114 				target_node = binder_get_node_refs_for_txn(
3115 						ref->node, &target_proc,
3116 						&return_error);
3117 			} else {
3118 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3119 						  proc->pid, thread->pid, tr->target.handle);
3120 				return_error = BR_FAILED_REPLY;
3121 			}
3122 			binder_proc_unlock(proc);
3123 		} else {
3124 			mutex_lock(&context->context_mgr_node_lock);
3125 			target_node = context->binder_context_mgr_node;
3126 			if (target_node)
3127 				target_node = binder_get_node_refs_for_txn(
3128 						target_node, &target_proc,
3129 						&return_error);
3130 			else
3131 				return_error = BR_DEAD_REPLY;
3132 			mutex_unlock(&context->context_mgr_node_lock);
3133 			if (target_node && target_proc->pid == proc->pid) {
3134 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3135 						  proc->pid, thread->pid);
3136 				return_error = BR_FAILED_REPLY;
3137 				return_error_param = -EINVAL;
3138 				return_error_line = __LINE__;
3139 				goto err_invalid_target_handle;
3140 			}
3141 		}
3142 		if (!target_node) {
3143 			binder_txn_error("%d:%d cannot find target node\n",
3144 				thread->pid, proc->pid);
3145 			/*
3146 			 * return_error is set above
3147 			 */
3148 			return_error_param = -EINVAL;
3149 			return_error_line = __LINE__;
3150 			goto err_dead_binder;
3151 		}
3152 		e->to_node = target_node->debug_id;
3153 		if (WARN_ON(proc == target_proc)) {
3154 			binder_txn_error("%d:%d self transactions not allowed\n",
3155 				thread->pid, proc->pid);
3156 			return_error = BR_FAILED_REPLY;
3157 			return_error_param = -EINVAL;
3158 			return_error_line = __LINE__;
3159 			goto err_invalid_target_handle;
3160 		}
3161 		if (security_binder_transaction(proc->cred,
3162 						target_proc->cred) < 0) {
3163 			binder_txn_error("%d:%d transaction credentials failed\n",
3164 				thread->pid, proc->pid);
3165 			return_error = BR_FAILED_REPLY;
3166 			return_error_param = -EPERM;
3167 			return_error_line = __LINE__;
3168 			goto err_invalid_target_handle;
3169 		}
3170 		binder_inner_proc_lock(proc);
3171 
3172 		w = list_first_entry_or_null(&thread->todo,
3173 					     struct binder_work, entry);
3174 		if (!(tr->flags & TF_ONE_WAY) && w &&
3175 		    w->type == BINDER_WORK_TRANSACTION) {
3176 			/*
3177 			 * Do not allow new outgoing transaction from a
3178 			 * thread that has a transaction at the head of
3179 			 * its todo list. Only need to check the head
3180 			 * because binder_select_thread_ilocked picks a
3181 			 * thread from proc->waiting_threads to enqueue
3182 			 * the transaction, and nothing is queued to the
3183 			 * todo list while the thread is on waiting_threads.
3184 			 */
3185 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3186 					  proc->pid, thread->pid);
3187 			binder_inner_proc_unlock(proc);
3188 			return_error = BR_FAILED_REPLY;
3189 			return_error_param = -EPROTO;
3190 			return_error_line = __LINE__;
3191 			goto err_bad_todo_list;
3192 		}
3193 
3194 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3195 			struct binder_transaction *tmp;
3196 
3197 			tmp = thread->transaction_stack;
3198 			if (tmp->to_thread != thread) {
3199 				spin_lock(&tmp->lock);
3200 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3201 					proc->pid, thread->pid, tmp->debug_id,
3202 					tmp->to_proc ? tmp->to_proc->pid : 0,
3203 					tmp->to_thread ?
3204 					tmp->to_thread->pid : 0);
3205 				spin_unlock(&tmp->lock);
3206 				binder_inner_proc_unlock(proc);
3207 				return_error = BR_FAILED_REPLY;
3208 				return_error_param = -EPROTO;
3209 				return_error_line = __LINE__;
3210 				goto err_bad_call_stack;
3211 			}
3212 			while (tmp) {
3213 				struct binder_thread *from;
3214 
3215 				spin_lock(&tmp->lock);
3216 				from = tmp->from;
3217 				if (from && from->proc == target_proc) {
3218 					atomic_inc(&from->tmp_ref);
3219 					target_thread = from;
3220 					spin_unlock(&tmp->lock);
3221 					break;
3222 				}
3223 				spin_unlock(&tmp->lock);
3224 				tmp = tmp->from_parent;
3225 			}
3226 		}
3227 		binder_inner_proc_unlock(proc);
3228 	}
3229 	if (target_thread)
3230 		e->to_thread = target_thread->pid;
3231 	e->to_proc = target_proc->pid;
3232 
3233 	/* TODO: reuse incoming transaction for reply */
3234 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3235 	if (t == NULL) {
3236 		binder_txn_error("%d:%d cannot allocate transaction\n",
3237 			thread->pid, proc->pid);
3238 		return_error = BR_FAILED_REPLY;
3239 		return_error_param = -ENOMEM;
3240 		return_error_line = __LINE__;
3241 		goto err_alloc_t_failed;
3242 	}
3243 	INIT_LIST_HEAD(&t->fd_fixups);
3244 	binder_stats_created(BINDER_STAT_TRANSACTION);
3245 	spin_lock_init(&t->lock);
3246 
3247 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3248 	if (tcomplete == NULL) {
3249 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3250 			thread->pid, proc->pid);
3251 		return_error = BR_FAILED_REPLY;
3252 		return_error_param = -ENOMEM;
3253 		return_error_line = __LINE__;
3254 		goto err_alloc_tcomplete_failed;
3255 	}
3256 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3257 
3258 	t->debug_id = t_debug_id;
3259 	t->start_time = t_start_time;
3260 
3261 	if (reply)
3262 		binder_debug(BINDER_DEBUG_TRANSACTION,
3263 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3264 			     proc->pid, thread->pid, t->debug_id,
3265 			     target_proc->pid, target_thread->pid,
3266 			     (u64)tr->data.ptr.buffer,
3267 			     (u64)tr->data.ptr.offsets,
3268 			     (u64)tr->data_size, (u64)tr->offsets_size,
3269 			     (u64)extra_buffers_size);
3270 	else
3271 		binder_debug(BINDER_DEBUG_TRANSACTION,
3272 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3273 			     proc->pid, thread->pid, t->debug_id,
3274 			     target_proc->pid, target_node->debug_id,
3275 			     (u64)tr->data.ptr.buffer,
3276 			     (u64)tr->data.ptr.offsets,
3277 			     (u64)tr->data_size, (u64)tr->offsets_size,
3278 			     (u64)extra_buffers_size);
3279 
3280 	if (!reply && !(tr->flags & TF_ONE_WAY))
3281 		t->from = thread;
3282 	else
3283 		t->from = NULL;
3284 	t->from_pid = proc->pid;
3285 	t->from_tid = thread->pid;
3286 	t->sender_euid = task_euid(proc->tsk);
3287 	t->to_proc = target_proc;
3288 	t->to_thread = target_thread;
3289 	t->code = tr->code;
3290 	t->flags = tr->flags;
3291 	t->priority = task_nice(current);
3292 
3293 	if (target_node && target_node->txn_security_ctx) {
3294 		u32 secid;
3295 		size_t added_size;
3296 
3297 		security_cred_getsecid(proc->cred, &secid);
3298 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3299 		if (ret) {
3300 			binder_txn_error("%d:%d failed to get security context\n",
3301 				thread->pid, proc->pid);
3302 			return_error = BR_FAILED_REPLY;
3303 			return_error_param = ret;
3304 			return_error_line = __LINE__;
3305 			goto err_get_secctx_failed;
3306 		}
3307 		added_size = ALIGN(secctx_sz, sizeof(u64));
3308 		extra_buffers_size += added_size;
3309 		if (extra_buffers_size < added_size) {
3310 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3311 				thread->pid, proc->pid);
3312 			return_error = BR_FAILED_REPLY;
3313 			return_error_param = -EINVAL;
3314 			return_error_line = __LINE__;
3315 			goto err_bad_extra_size;
3316 		}
3317 	}
3318 
3319 	trace_binder_transaction(reply, t, target_node);
3320 
3321 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3322 		tr->offsets_size, extra_buffers_size,
3323 		!reply && (t->flags & TF_ONE_WAY));
3324 	if (IS_ERR(t->buffer)) {
3325 		char *s;
3326 
3327 		ret = PTR_ERR(t->buffer);
3328 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3329 			: (ret == -ENOSPC) ? ": no space left"
3330 			: (ret == -ENOMEM) ? ": memory allocation failed"
3331 			: "";
3332 		binder_txn_error("cannot allocate buffer%s", s);
3333 
3334 		return_error_param = PTR_ERR(t->buffer);
3335 		return_error = return_error_param == -ESRCH ?
3336 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3337 		return_error_line = __LINE__;
3338 		t->buffer = NULL;
3339 		goto err_binder_alloc_buf_failed;
3340 	}
3341 	if (secctx) {
3342 		int err;
3343 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3344 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3345 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3346 				    ALIGN(secctx_sz, sizeof(u64));
3347 
3348 		t->security_ctx = t->buffer->user_data + buf_offset;
3349 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3350 						  t->buffer, buf_offset,
3351 						  secctx, secctx_sz);
3352 		if (err) {
3353 			t->security_ctx = 0;
3354 			WARN_ON(1);
3355 		}
3356 		security_release_secctx(secctx, secctx_sz);
3357 		secctx = NULL;
3358 	}
3359 	t->buffer->debug_id = t->debug_id;
3360 	t->buffer->transaction = t;
3361 	t->buffer->target_node = target_node;
3362 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3363 	trace_binder_transaction_alloc_buf(t->buffer);
3364 
3365 	if (binder_alloc_copy_user_to_buffer(
3366 				&target_proc->alloc,
3367 				t->buffer,
3368 				ALIGN(tr->data_size, sizeof(void *)),
3369 				(const void __user *)
3370 					(uintptr_t)tr->data.ptr.offsets,
3371 				tr->offsets_size)) {
3372 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3373 				proc->pid, thread->pid);
3374 		return_error = BR_FAILED_REPLY;
3375 		return_error_param = -EFAULT;
3376 		return_error_line = __LINE__;
3377 		goto err_copy_data_failed;
3378 	}
3379 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3380 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3381 				proc->pid, thread->pid, (u64)tr->offsets_size);
3382 		return_error = BR_FAILED_REPLY;
3383 		return_error_param = -EINVAL;
3384 		return_error_line = __LINE__;
3385 		goto err_bad_offset;
3386 	}
3387 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3388 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3389 				  proc->pid, thread->pid,
3390 				  (u64)extra_buffers_size);
3391 		return_error = BR_FAILED_REPLY;
3392 		return_error_param = -EINVAL;
3393 		return_error_line = __LINE__;
3394 		goto err_bad_offset;
3395 	}
3396 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3397 	buffer_offset = off_start_offset;
3398 	off_end_offset = off_start_offset + tr->offsets_size;
3399 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3400 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3401 		ALIGN(secctx_sz, sizeof(u64));
3402 	off_min = 0;
3403 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3404 	     buffer_offset += sizeof(binder_size_t)) {
3405 		struct binder_object_header *hdr;
3406 		size_t object_size;
3407 		struct binder_object object;
3408 		binder_size_t object_offset;
3409 		binder_size_t copy_size;
3410 
3411 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3412 						  &object_offset,
3413 						  t->buffer,
3414 						  buffer_offset,
3415 						  sizeof(object_offset))) {
3416 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3417 				thread->pid, proc->pid);
3418 			return_error = BR_FAILED_REPLY;
3419 			return_error_param = -EINVAL;
3420 			return_error_line = __LINE__;
3421 			goto err_bad_offset;
3422 		}
3423 
3424 		/*
3425 		 * Copy the source user buffer up to the next object
3426 		 * that will be processed.
3427 		 */
3428 		copy_size = object_offset - user_offset;
3429 		if (copy_size && (user_offset > object_offset ||
3430 				binder_alloc_copy_user_to_buffer(
3431 					&target_proc->alloc,
3432 					t->buffer, user_offset,
3433 					user_buffer + user_offset,
3434 					copy_size))) {
3435 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3436 					proc->pid, thread->pid);
3437 			return_error = BR_FAILED_REPLY;
3438 			return_error_param = -EFAULT;
3439 			return_error_line = __LINE__;
3440 			goto err_copy_data_failed;
3441 		}
3442 		object_size = binder_get_object(target_proc, user_buffer,
3443 				t->buffer, object_offset, &object);
3444 		if (object_size == 0 || object_offset < off_min) {
3445 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3446 					  proc->pid, thread->pid,
3447 					  (u64)object_offset,
3448 					  (u64)off_min,
3449 					  (u64)t->buffer->data_size);
3450 			return_error = BR_FAILED_REPLY;
3451 			return_error_param = -EINVAL;
3452 			return_error_line = __LINE__;
3453 			goto err_bad_offset;
3454 		}
3455 		/*
3456 		 * Set offset to the next buffer fragment to be
3457 		 * copied
3458 		 */
3459 		user_offset = object_offset + object_size;
3460 
3461 		hdr = &object.hdr;
3462 		off_min = object_offset + object_size;
3463 		switch (hdr->type) {
3464 		case BINDER_TYPE_BINDER:
3465 		case BINDER_TYPE_WEAK_BINDER: {
3466 			struct flat_binder_object *fp;
3467 
3468 			fp = to_flat_binder_object(hdr);
3469 			ret = binder_translate_binder(fp, t, thread);
3470 
3471 			if (ret < 0 ||
3472 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3473 							t->buffer,
3474 							object_offset,
3475 							fp, sizeof(*fp))) {
3476 				binder_txn_error("%d:%d translate binder failed\n",
3477 					thread->pid, proc->pid);
3478 				return_error = BR_FAILED_REPLY;
3479 				return_error_param = ret;
3480 				return_error_line = __LINE__;
3481 				goto err_translate_failed;
3482 			}
3483 		} break;
3484 		case BINDER_TYPE_HANDLE:
3485 		case BINDER_TYPE_WEAK_HANDLE: {
3486 			struct flat_binder_object *fp;
3487 
3488 			fp = to_flat_binder_object(hdr);
3489 			ret = binder_translate_handle(fp, t, thread);
3490 			if (ret < 0 ||
3491 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3492 							t->buffer,
3493 							object_offset,
3494 							fp, sizeof(*fp))) {
3495 				binder_txn_error("%d:%d translate handle failed\n",
3496 					thread->pid, proc->pid);
3497 				return_error = BR_FAILED_REPLY;
3498 				return_error_param = ret;
3499 				return_error_line = __LINE__;
3500 				goto err_translate_failed;
3501 			}
3502 		} break;
3503 
3504 		case BINDER_TYPE_FD: {
3505 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3506 			binder_size_t fd_offset = object_offset +
3507 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3508 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3509 						      thread, in_reply_to);
3510 
3511 			fp->pad_binder = 0;
3512 			if (ret < 0 ||
3513 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3514 							t->buffer,
3515 							object_offset,
3516 							fp, sizeof(*fp))) {
3517 				binder_txn_error("%d:%d translate fd failed\n",
3518 					thread->pid, proc->pid);
3519 				return_error = BR_FAILED_REPLY;
3520 				return_error_param = ret;
3521 				return_error_line = __LINE__;
3522 				goto err_translate_failed;
3523 			}
3524 		} break;
3525 		case BINDER_TYPE_FDA: {
3526 			struct binder_object ptr_object;
3527 			binder_size_t parent_offset;
3528 			struct binder_object user_object;
3529 			size_t user_parent_size;
3530 			struct binder_fd_array_object *fda =
3531 				to_binder_fd_array_object(hdr);
3532 			size_t num_valid = (buffer_offset - off_start_offset) /
3533 						sizeof(binder_size_t);
3534 			struct binder_buffer_object *parent =
3535 				binder_validate_ptr(target_proc, t->buffer,
3536 						    &ptr_object, fda->parent,
3537 						    off_start_offset,
3538 						    &parent_offset,
3539 						    num_valid);
3540 			if (!parent) {
3541 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3542 						  proc->pid, thread->pid);
3543 				return_error = BR_FAILED_REPLY;
3544 				return_error_param = -EINVAL;
3545 				return_error_line = __LINE__;
3546 				goto err_bad_parent;
3547 			}
3548 			if (!binder_validate_fixup(target_proc, t->buffer,
3549 						   off_start_offset,
3550 						   parent_offset,
3551 						   fda->parent_offset,
3552 						   last_fixup_obj_off,
3553 						   last_fixup_min_off)) {
3554 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3555 						  proc->pid, thread->pid);
3556 				return_error = BR_FAILED_REPLY;
3557 				return_error_param = -EINVAL;
3558 				return_error_line = __LINE__;
3559 				goto err_bad_parent;
3560 			}
3561 			/*
3562 			 * We need to read the user version of the parent
3563 			 * object to get the original user offset
3564 			 */
3565 			user_parent_size =
3566 				binder_get_object(proc, user_buffer, t->buffer,
3567 						  parent_offset, &user_object);
3568 			if (user_parent_size != sizeof(user_object.bbo)) {
3569 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3570 						  proc->pid, thread->pid,
3571 						  user_parent_size,
3572 						  sizeof(user_object.bbo));
3573 				return_error = BR_FAILED_REPLY;
3574 				return_error_param = -EINVAL;
3575 				return_error_line = __LINE__;
3576 				goto err_bad_parent;
3577 			}
3578 			ret = binder_translate_fd_array(&pf_head, fda,
3579 							user_buffer, parent,
3580 							&user_object.bbo, t,
3581 							thread, in_reply_to);
3582 			if (!ret)
3583 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3584 								  t->buffer,
3585 								  object_offset,
3586 								  fda, sizeof(*fda));
3587 			if (ret) {
3588 				binder_txn_error("%d:%d translate fd array failed\n",
3589 					thread->pid, proc->pid);
3590 				return_error = BR_FAILED_REPLY;
3591 				return_error_param = ret > 0 ? -EINVAL : ret;
3592 				return_error_line = __LINE__;
3593 				goto err_translate_failed;
3594 			}
3595 			last_fixup_obj_off = parent_offset;
3596 			last_fixup_min_off =
3597 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3598 		} break;
3599 		case BINDER_TYPE_PTR: {
3600 			struct binder_buffer_object *bp =
3601 				to_binder_buffer_object(hdr);
3602 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3603 			size_t num_valid;
3604 
3605 			if (bp->length > buf_left) {
3606 				binder_user_error("%d:%d got transaction with too large buffer\n",
3607 						  proc->pid, thread->pid);
3608 				return_error = BR_FAILED_REPLY;
3609 				return_error_param = -EINVAL;
3610 				return_error_line = __LINE__;
3611 				goto err_bad_offset;
3612 			}
3613 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3614 				(const void __user *)(uintptr_t)bp->buffer,
3615 				bp->length);
3616 			if (ret) {
3617 				binder_txn_error("%d:%d deferred copy failed\n",
3618 					thread->pid, proc->pid);
3619 				return_error = BR_FAILED_REPLY;
3620 				return_error_param = ret;
3621 				return_error_line = __LINE__;
3622 				goto err_translate_failed;
3623 			}
3624 			/* Fixup buffer pointer to target proc address space */
3625 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3626 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3627 
3628 			num_valid = (buffer_offset - off_start_offset) /
3629 					sizeof(binder_size_t);
3630 			ret = binder_fixup_parent(&pf_head, t,
3631 						  thread, bp,
3632 						  off_start_offset,
3633 						  num_valid,
3634 						  last_fixup_obj_off,
3635 						  last_fixup_min_off);
3636 			if (ret < 0 ||
3637 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3638 							t->buffer,
3639 							object_offset,
3640 							bp, sizeof(*bp))) {
3641 				binder_txn_error("%d:%d failed to fixup parent\n",
3642 					thread->pid, proc->pid);
3643 				return_error = BR_FAILED_REPLY;
3644 				return_error_param = ret;
3645 				return_error_line = __LINE__;
3646 				goto err_translate_failed;
3647 			}
3648 			last_fixup_obj_off = object_offset;
3649 			last_fixup_min_off = 0;
3650 		} break;
3651 		default:
3652 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3653 				proc->pid, thread->pid, hdr->type);
3654 			return_error = BR_FAILED_REPLY;
3655 			return_error_param = -EINVAL;
3656 			return_error_line = __LINE__;
3657 			goto err_bad_object_type;
3658 		}
3659 	}
3660 	/* Done processing objects, copy the rest of the buffer */
3661 	if (binder_alloc_copy_user_to_buffer(
3662 				&target_proc->alloc,
3663 				t->buffer, user_offset,
3664 				user_buffer + user_offset,
3665 				tr->data_size - user_offset)) {
3666 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3667 				proc->pid, thread->pid);
3668 		return_error = BR_FAILED_REPLY;
3669 		return_error_param = -EFAULT;
3670 		return_error_line = __LINE__;
3671 		goto err_copy_data_failed;
3672 	}
3673 
3674 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3675 					    &sgc_head, &pf_head);
3676 	if (ret) {
3677 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3678 				  proc->pid, thread->pid);
3679 		return_error = BR_FAILED_REPLY;
3680 		return_error_param = ret;
3681 		return_error_line = __LINE__;
3682 		goto err_copy_data_failed;
3683 	}
3684 	if (t->buffer->oneway_spam_suspect)
3685 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3686 	else
3687 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3688 	t->work.type = BINDER_WORK_TRANSACTION;
3689 
3690 	if (reply) {
3691 		binder_enqueue_thread_work(thread, tcomplete);
3692 		binder_inner_proc_lock(target_proc);
3693 		if (target_thread->is_dead) {
3694 			return_error = BR_DEAD_REPLY;
3695 			binder_inner_proc_unlock(target_proc);
3696 			goto err_dead_proc_or_thread;
3697 		}
3698 		BUG_ON(t->buffer->async_transaction != 0);
3699 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3700 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3701 		target_proc->outstanding_txns++;
3702 		binder_inner_proc_unlock(target_proc);
3703 		wake_up_interruptible_sync(&target_thread->wait);
3704 		binder_free_transaction(in_reply_to);
3705 	} else if (!(t->flags & TF_ONE_WAY)) {
3706 		BUG_ON(t->buffer->async_transaction != 0);
3707 		binder_inner_proc_lock(proc);
3708 		/*
3709 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3710 		 * userspace immediately; this allows the target process to
3711 		 * immediately start processing this transaction, reducing
3712 		 * latency. We will then return the TRANSACTION_COMPLETE when
3713 		 * the target replies (or there is an error).
3714 		 */
3715 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3716 		t->need_reply = 1;
3717 		t->from_parent = thread->transaction_stack;
3718 		thread->transaction_stack = t;
3719 		binder_inner_proc_unlock(proc);
3720 		return_error = binder_proc_transaction(t,
3721 				target_proc, target_thread);
3722 		if (return_error) {
3723 			binder_inner_proc_lock(proc);
3724 			binder_pop_transaction_ilocked(thread, t);
3725 			binder_inner_proc_unlock(proc);
3726 			goto err_dead_proc_or_thread;
3727 		}
3728 	} else {
3729 		BUG_ON(target_node == NULL);
3730 		BUG_ON(t->buffer->async_transaction != 1);
3731 		return_error = binder_proc_transaction(t, target_proc, NULL);
3732 		/*
3733 		 * Let the caller know when async transaction reaches a frozen
3734 		 * process and is put in a pending queue, waiting for the target
3735 		 * process to be unfrozen.
3736 		 */
3737 		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3738 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3739 		binder_enqueue_thread_work(thread, tcomplete);
3740 		if (return_error &&
3741 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3742 			goto err_dead_proc_or_thread;
3743 	}
3744 	if (target_thread)
3745 		binder_thread_dec_tmpref(target_thread);
3746 	binder_proc_dec_tmpref(target_proc);
3747 	if (target_node)
3748 		binder_dec_node_tmpref(target_node);
3749 	/*
3750 	 * write barrier to synchronize with initialization
3751 	 * of log entry
3752 	 */
3753 	smp_wmb();
3754 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3755 	return;
3756 
3757 err_dead_proc_or_thread:
3758 	binder_txn_error("%d:%d dead process or thread\n",
3759 		thread->pid, proc->pid);
3760 	return_error_line = __LINE__;
3761 	binder_dequeue_work(proc, tcomplete);
3762 err_translate_failed:
3763 err_bad_object_type:
3764 err_bad_offset:
3765 err_bad_parent:
3766 err_copy_data_failed:
3767 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3768 	binder_free_txn_fixups(t);
3769 	trace_binder_transaction_failed_buffer_release(t->buffer);
3770 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3771 					  buffer_offset, true);
3772 	if (target_node)
3773 		binder_dec_node_tmpref(target_node);
3774 	target_node = NULL;
3775 	t->buffer->transaction = NULL;
3776 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3777 err_binder_alloc_buf_failed:
3778 err_bad_extra_size:
3779 	if (secctx)
3780 		security_release_secctx(secctx, secctx_sz);
3781 err_get_secctx_failed:
3782 	kfree(tcomplete);
3783 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3784 err_alloc_tcomplete_failed:
3785 	if (trace_binder_txn_latency_free_enabled())
3786 		binder_txn_latency_free(t);
3787 	kfree(t);
3788 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3789 err_alloc_t_failed:
3790 err_bad_todo_list:
3791 err_bad_call_stack:
3792 err_empty_call_stack:
3793 err_dead_binder:
3794 err_invalid_target_handle:
3795 	if (target_node) {
3796 		binder_dec_node(target_node, 1, 0);
3797 		binder_dec_node_tmpref(target_node);
3798 	}
3799 
3800 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3801 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3802 		     proc->pid, thread->pid, reply ? "reply" :
3803 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3804 		     target_proc ? target_proc->pid : 0,
3805 		     target_thread ? target_thread->pid : 0,
3806 		     t_debug_id, return_error, return_error_param,
3807 		     (u64)tr->data_size, (u64)tr->offsets_size,
3808 		     return_error_line);
3809 
3810 	if (target_thread)
3811 		binder_thread_dec_tmpref(target_thread);
3812 	if (target_proc)
3813 		binder_proc_dec_tmpref(target_proc);
3814 
3815 	{
3816 		struct binder_transaction_log_entry *fe;
3817 
3818 		e->return_error = return_error;
3819 		e->return_error_param = return_error_param;
3820 		e->return_error_line = return_error_line;
3821 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3822 		*fe = *e;
3823 		/*
3824 		 * write barrier to synchronize with initialization
3825 		 * of log entry
3826 		 */
3827 		smp_wmb();
3828 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3829 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3830 	}
3831 
3832 	BUG_ON(thread->return_error.cmd != BR_OK);
3833 	if (in_reply_to) {
3834 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3835 				return_error, return_error_param);
3836 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3837 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3838 		binder_send_failed_reply(in_reply_to, return_error);
3839 	} else {
3840 		binder_inner_proc_lock(proc);
3841 		binder_set_extended_error(&thread->ee, t_debug_id,
3842 				return_error, return_error_param);
3843 		binder_inner_proc_unlock(proc);
3844 		thread->return_error.cmd = return_error;
3845 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3846 	}
3847 }
3848 
3849 /**
3850  * binder_free_buf() - free the specified buffer
3851  * @proc:	binder proc that owns buffer
3852  * @buffer:	buffer to be freed
3853  * @is_failure:	failed to send transaction
3854  *
3855  * If buffer for an async transaction, enqueue the next async
3856  * transaction from the node.
3857  *
3858  * Cleanup buffer and free it.
3859  */
3860 static void
3861 binder_free_buf(struct binder_proc *proc,
3862 		struct binder_thread *thread,
3863 		struct binder_buffer *buffer, bool is_failure)
3864 {
3865 	binder_inner_proc_lock(proc);
3866 	if (buffer->transaction) {
3867 		buffer->transaction->buffer = NULL;
3868 		buffer->transaction = NULL;
3869 	}
3870 	binder_inner_proc_unlock(proc);
3871 	if (buffer->async_transaction && buffer->target_node) {
3872 		struct binder_node *buf_node;
3873 		struct binder_work *w;
3874 
3875 		buf_node = buffer->target_node;
3876 		binder_node_inner_lock(buf_node);
3877 		BUG_ON(!buf_node->has_async_transaction);
3878 		BUG_ON(buf_node->proc != proc);
3879 		w = binder_dequeue_work_head_ilocked(
3880 				&buf_node->async_todo);
3881 		if (!w) {
3882 			buf_node->has_async_transaction = false;
3883 		} else {
3884 			binder_enqueue_work_ilocked(
3885 					w, &proc->todo);
3886 			binder_wakeup_proc_ilocked(proc);
3887 		}
3888 		binder_node_inner_unlock(buf_node);
3889 	}
3890 	trace_binder_transaction_buffer_release(buffer);
3891 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
3892 	binder_alloc_free_buf(&proc->alloc, buffer);
3893 }
3894 
3895 static int binder_thread_write(struct binder_proc *proc,
3896 			struct binder_thread *thread,
3897 			binder_uintptr_t binder_buffer, size_t size,
3898 			binder_size_t *consumed)
3899 {
3900 	uint32_t cmd;
3901 	struct binder_context *context = proc->context;
3902 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3903 	void __user *ptr = buffer + *consumed;
3904 	void __user *end = buffer + size;
3905 
3906 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3907 		int ret;
3908 
3909 		if (get_user(cmd, (uint32_t __user *)ptr))
3910 			return -EFAULT;
3911 		ptr += sizeof(uint32_t);
3912 		trace_binder_command(cmd);
3913 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3914 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3915 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3916 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3917 		}
3918 		switch (cmd) {
3919 		case BC_INCREFS:
3920 		case BC_ACQUIRE:
3921 		case BC_RELEASE:
3922 		case BC_DECREFS: {
3923 			uint32_t target;
3924 			const char *debug_string;
3925 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3926 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3927 			struct binder_ref_data rdata;
3928 
3929 			if (get_user(target, (uint32_t __user *)ptr))
3930 				return -EFAULT;
3931 
3932 			ptr += sizeof(uint32_t);
3933 			ret = -1;
3934 			if (increment && !target) {
3935 				struct binder_node *ctx_mgr_node;
3936 
3937 				mutex_lock(&context->context_mgr_node_lock);
3938 				ctx_mgr_node = context->binder_context_mgr_node;
3939 				if (ctx_mgr_node) {
3940 					if (ctx_mgr_node->proc == proc) {
3941 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3942 								  proc->pid, thread->pid);
3943 						mutex_unlock(&context->context_mgr_node_lock);
3944 						return -EINVAL;
3945 					}
3946 					ret = binder_inc_ref_for_node(
3947 							proc, ctx_mgr_node,
3948 							strong, NULL, &rdata);
3949 				}
3950 				mutex_unlock(&context->context_mgr_node_lock);
3951 			}
3952 			if (ret)
3953 				ret = binder_update_ref_for_handle(
3954 						proc, target, increment, strong,
3955 						&rdata);
3956 			if (!ret && rdata.desc != target) {
3957 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3958 					proc->pid, thread->pid,
3959 					target, rdata.desc);
3960 			}
3961 			switch (cmd) {
3962 			case BC_INCREFS:
3963 				debug_string = "IncRefs";
3964 				break;
3965 			case BC_ACQUIRE:
3966 				debug_string = "Acquire";
3967 				break;
3968 			case BC_RELEASE:
3969 				debug_string = "Release";
3970 				break;
3971 			case BC_DECREFS:
3972 			default:
3973 				debug_string = "DecRefs";
3974 				break;
3975 			}
3976 			if (ret) {
3977 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3978 					proc->pid, thread->pid, debug_string,
3979 					strong, target, ret);
3980 				break;
3981 			}
3982 			binder_debug(BINDER_DEBUG_USER_REFS,
3983 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3984 				     proc->pid, thread->pid, debug_string,
3985 				     rdata.debug_id, rdata.desc, rdata.strong,
3986 				     rdata.weak);
3987 			break;
3988 		}
3989 		case BC_INCREFS_DONE:
3990 		case BC_ACQUIRE_DONE: {
3991 			binder_uintptr_t node_ptr;
3992 			binder_uintptr_t cookie;
3993 			struct binder_node *node;
3994 			bool free_node;
3995 
3996 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3997 				return -EFAULT;
3998 			ptr += sizeof(binder_uintptr_t);
3999 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4000 				return -EFAULT;
4001 			ptr += sizeof(binder_uintptr_t);
4002 			node = binder_get_node(proc, node_ptr);
4003 			if (node == NULL) {
4004 				binder_user_error("%d:%d %s u%016llx no match\n",
4005 					proc->pid, thread->pid,
4006 					cmd == BC_INCREFS_DONE ?
4007 					"BC_INCREFS_DONE" :
4008 					"BC_ACQUIRE_DONE",
4009 					(u64)node_ptr);
4010 				break;
4011 			}
4012 			if (cookie != node->cookie) {
4013 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4014 					proc->pid, thread->pid,
4015 					cmd == BC_INCREFS_DONE ?
4016 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4017 					(u64)node_ptr, node->debug_id,
4018 					(u64)cookie, (u64)node->cookie);
4019 				binder_put_node(node);
4020 				break;
4021 			}
4022 			binder_node_inner_lock(node);
4023 			if (cmd == BC_ACQUIRE_DONE) {
4024 				if (node->pending_strong_ref == 0) {
4025 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4026 						proc->pid, thread->pid,
4027 						node->debug_id);
4028 					binder_node_inner_unlock(node);
4029 					binder_put_node(node);
4030 					break;
4031 				}
4032 				node->pending_strong_ref = 0;
4033 			} else {
4034 				if (node->pending_weak_ref == 0) {
4035 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4036 						proc->pid, thread->pid,
4037 						node->debug_id);
4038 					binder_node_inner_unlock(node);
4039 					binder_put_node(node);
4040 					break;
4041 				}
4042 				node->pending_weak_ref = 0;
4043 			}
4044 			free_node = binder_dec_node_nilocked(node,
4045 					cmd == BC_ACQUIRE_DONE, 0);
4046 			WARN_ON(free_node);
4047 			binder_debug(BINDER_DEBUG_USER_REFS,
4048 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4049 				     proc->pid, thread->pid,
4050 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4051 				     node->debug_id, node->local_strong_refs,
4052 				     node->local_weak_refs, node->tmp_refs);
4053 			binder_node_inner_unlock(node);
4054 			binder_put_node(node);
4055 			break;
4056 		}
4057 		case BC_ATTEMPT_ACQUIRE:
4058 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4059 			return -EINVAL;
4060 		case BC_ACQUIRE_RESULT:
4061 			pr_err("BC_ACQUIRE_RESULT not supported\n");
4062 			return -EINVAL;
4063 
4064 		case BC_FREE_BUFFER: {
4065 			binder_uintptr_t data_ptr;
4066 			struct binder_buffer *buffer;
4067 
4068 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4069 				return -EFAULT;
4070 			ptr += sizeof(binder_uintptr_t);
4071 
4072 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4073 							      data_ptr);
4074 			if (IS_ERR_OR_NULL(buffer)) {
4075 				if (PTR_ERR(buffer) == -EPERM) {
4076 					binder_user_error(
4077 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4078 						proc->pid, thread->pid,
4079 						(u64)data_ptr);
4080 				} else {
4081 					binder_user_error(
4082 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
4083 						proc->pid, thread->pid,
4084 						(u64)data_ptr);
4085 				}
4086 				break;
4087 			}
4088 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4089 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4090 				     proc->pid, thread->pid, (u64)data_ptr,
4091 				     buffer->debug_id,
4092 				     buffer->transaction ? "active" : "finished");
4093 			binder_free_buf(proc, thread, buffer, false);
4094 			break;
4095 		}
4096 
4097 		case BC_TRANSACTION_SG:
4098 		case BC_REPLY_SG: {
4099 			struct binder_transaction_data_sg tr;
4100 
4101 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4102 				return -EFAULT;
4103 			ptr += sizeof(tr);
4104 			binder_transaction(proc, thread, &tr.transaction_data,
4105 					   cmd == BC_REPLY_SG, tr.buffers_size);
4106 			break;
4107 		}
4108 		case BC_TRANSACTION:
4109 		case BC_REPLY: {
4110 			struct binder_transaction_data tr;
4111 
4112 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4113 				return -EFAULT;
4114 			ptr += sizeof(tr);
4115 			binder_transaction(proc, thread, &tr,
4116 					   cmd == BC_REPLY, 0);
4117 			break;
4118 		}
4119 
4120 		case BC_REGISTER_LOOPER:
4121 			binder_debug(BINDER_DEBUG_THREADS,
4122 				     "%d:%d BC_REGISTER_LOOPER\n",
4123 				     proc->pid, thread->pid);
4124 			binder_inner_proc_lock(proc);
4125 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4126 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4127 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4128 					proc->pid, thread->pid);
4129 			} else if (proc->requested_threads == 0) {
4130 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4131 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4132 					proc->pid, thread->pid);
4133 			} else {
4134 				proc->requested_threads--;
4135 				proc->requested_threads_started++;
4136 			}
4137 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4138 			binder_inner_proc_unlock(proc);
4139 			break;
4140 		case BC_ENTER_LOOPER:
4141 			binder_debug(BINDER_DEBUG_THREADS,
4142 				     "%d:%d BC_ENTER_LOOPER\n",
4143 				     proc->pid, thread->pid);
4144 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4145 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4146 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4147 					proc->pid, thread->pid);
4148 			}
4149 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4150 			break;
4151 		case BC_EXIT_LOOPER:
4152 			binder_debug(BINDER_DEBUG_THREADS,
4153 				     "%d:%d BC_EXIT_LOOPER\n",
4154 				     proc->pid, thread->pid);
4155 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4156 			break;
4157 
4158 		case BC_REQUEST_DEATH_NOTIFICATION:
4159 		case BC_CLEAR_DEATH_NOTIFICATION: {
4160 			uint32_t target;
4161 			binder_uintptr_t cookie;
4162 			struct binder_ref *ref;
4163 			struct binder_ref_death *death = NULL;
4164 
4165 			if (get_user(target, (uint32_t __user *)ptr))
4166 				return -EFAULT;
4167 			ptr += sizeof(uint32_t);
4168 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4169 				return -EFAULT;
4170 			ptr += sizeof(binder_uintptr_t);
4171 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4172 				/*
4173 				 * Allocate memory for death notification
4174 				 * before taking lock
4175 				 */
4176 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4177 				if (death == NULL) {
4178 					WARN_ON(thread->return_error.cmd !=
4179 						BR_OK);
4180 					thread->return_error.cmd = BR_ERROR;
4181 					binder_enqueue_thread_work(
4182 						thread,
4183 						&thread->return_error.work);
4184 					binder_debug(
4185 						BINDER_DEBUG_FAILED_TRANSACTION,
4186 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4187 						proc->pid, thread->pid);
4188 					break;
4189 				}
4190 			}
4191 			binder_proc_lock(proc);
4192 			ref = binder_get_ref_olocked(proc, target, false);
4193 			if (ref == NULL) {
4194 				binder_user_error("%d:%d %s invalid ref %d\n",
4195 					proc->pid, thread->pid,
4196 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4197 					"BC_REQUEST_DEATH_NOTIFICATION" :
4198 					"BC_CLEAR_DEATH_NOTIFICATION",
4199 					target);
4200 				binder_proc_unlock(proc);
4201 				kfree(death);
4202 				break;
4203 			}
4204 
4205 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4206 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4207 				     proc->pid, thread->pid,
4208 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4209 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4210 				     "BC_CLEAR_DEATH_NOTIFICATION",
4211 				     (u64)cookie, ref->data.debug_id,
4212 				     ref->data.desc, ref->data.strong,
4213 				     ref->data.weak, ref->node->debug_id);
4214 
4215 			binder_node_lock(ref->node);
4216 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4217 				if (ref->death) {
4218 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4219 						proc->pid, thread->pid);
4220 					binder_node_unlock(ref->node);
4221 					binder_proc_unlock(proc);
4222 					kfree(death);
4223 					break;
4224 				}
4225 				binder_stats_created(BINDER_STAT_DEATH);
4226 				INIT_LIST_HEAD(&death->work.entry);
4227 				death->cookie = cookie;
4228 				ref->death = death;
4229 				if (ref->node->proc == NULL) {
4230 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4231 
4232 					binder_inner_proc_lock(proc);
4233 					binder_enqueue_work_ilocked(
4234 						&ref->death->work, &proc->todo);
4235 					binder_wakeup_proc_ilocked(proc);
4236 					binder_inner_proc_unlock(proc);
4237 				}
4238 			} else {
4239 				if (ref->death == NULL) {
4240 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4241 						proc->pid, thread->pid);
4242 					binder_node_unlock(ref->node);
4243 					binder_proc_unlock(proc);
4244 					break;
4245 				}
4246 				death = ref->death;
4247 				if (death->cookie != cookie) {
4248 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4249 						proc->pid, thread->pid,
4250 						(u64)death->cookie,
4251 						(u64)cookie);
4252 					binder_node_unlock(ref->node);
4253 					binder_proc_unlock(proc);
4254 					break;
4255 				}
4256 				ref->death = NULL;
4257 				binder_inner_proc_lock(proc);
4258 				if (list_empty(&death->work.entry)) {
4259 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4260 					if (thread->looper &
4261 					    (BINDER_LOOPER_STATE_REGISTERED |
4262 					     BINDER_LOOPER_STATE_ENTERED))
4263 						binder_enqueue_thread_work_ilocked(
4264 								thread,
4265 								&death->work);
4266 					else {
4267 						binder_enqueue_work_ilocked(
4268 								&death->work,
4269 								&proc->todo);
4270 						binder_wakeup_proc_ilocked(
4271 								proc);
4272 					}
4273 				} else {
4274 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4275 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4276 				}
4277 				binder_inner_proc_unlock(proc);
4278 			}
4279 			binder_node_unlock(ref->node);
4280 			binder_proc_unlock(proc);
4281 		} break;
4282 		case BC_DEAD_BINDER_DONE: {
4283 			struct binder_work *w;
4284 			binder_uintptr_t cookie;
4285 			struct binder_ref_death *death = NULL;
4286 
4287 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4288 				return -EFAULT;
4289 
4290 			ptr += sizeof(cookie);
4291 			binder_inner_proc_lock(proc);
4292 			list_for_each_entry(w, &proc->delivered_death,
4293 					    entry) {
4294 				struct binder_ref_death *tmp_death =
4295 					container_of(w,
4296 						     struct binder_ref_death,
4297 						     work);
4298 
4299 				if (tmp_death->cookie == cookie) {
4300 					death = tmp_death;
4301 					break;
4302 				}
4303 			}
4304 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4305 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4306 				     proc->pid, thread->pid, (u64)cookie,
4307 				     death);
4308 			if (death == NULL) {
4309 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4310 					proc->pid, thread->pid, (u64)cookie);
4311 				binder_inner_proc_unlock(proc);
4312 				break;
4313 			}
4314 			binder_dequeue_work_ilocked(&death->work);
4315 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4316 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4317 				if (thread->looper &
4318 					(BINDER_LOOPER_STATE_REGISTERED |
4319 					 BINDER_LOOPER_STATE_ENTERED))
4320 					binder_enqueue_thread_work_ilocked(
4321 						thread, &death->work);
4322 				else {
4323 					binder_enqueue_work_ilocked(
4324 							&death->work,
4325 							&proc->todo);
4326 					binder_wakeup_proc_ilocked(proc);
4327 				}
4328 			}
4329 			binder_inner_proc_unlock(proc);
4330 		} break;
4331 
4332 		default:
4333 			pr_err("%d:%d unknown command %u\n",
4334 			       proc->pid, thread->pid, cmd);
4335 			return -EINVAL;
4336 		}
4337 		*consumed = ptr - buffer;
4338 	}
4339 	return 0;
4340 }
4341 
4342 static void binder_stat_br(struct binder_proc *proc,
4343 			   struct binder_thread *thread, uint32_t cmd)
4344 {
4345 	trace_binder_return(cmd);
4346 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4347 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4348 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4349 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4350 	}
4351 }
4352 
4353 static int binder_put_node_cmd(struct binder_proc *proc,
4354 			       struct binder_thread *thread,
4355 			       void __user **ptrp,
4356 			       binder_uintptr_t node_ptr,
4357 			       binder_uintptr_t node_cookie,
4358 			       int node_debug_id,
4359 			       uint32_t cmd, const char *cmd_name)
4360 {
4361 	void __user *ptr = *ptrp;
4362 
4363 	if (put_user(cmd, (uint32_t __user *)ptr))
4364 		return -EFAULT;
4365 	ptr += sizeof(uint32_t);
4366 
4367 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4368 		return -EFAULT;
4369 	ptr += sizeof(binder_uintptr_t);
4370 
4371 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4372 		return -EFAULT;
4373 	ptr += sizeof(binder_uintptr_t);
4374 
4375 	binder_stat_br(proc, thread, cmd);
4376 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4377 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4378 		     (u64)node_ptr, (u64)node_cookie);
4379 
4380 	*ptrp = ptr;
4381 	return 0;
4382 }
4383 
4384 static int binder_wait_for_work(struct binder_thread *thread,
4385 				bool do_proc_work)
4386 {
4387 	DEFINE_WAIT(wait);
4388 	struct binder_proc *proc = thread->proc;
4389 	int ret = 0;
4390 
4391 	binder_inner_proc_lock(proc);
4392 	for (;;) {
4393 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4394 		if (binder_has_work_ilocked(thread, do_proc_work))
4395 			break;
4396 		if (do_proc_work)
4397 			list_add(&thread->waiting_thread_node,
4398 				 &proc->waiting_threads);
4399 		binder_inner_proc_unlock(proc);
4400 		schedule();
4401 		binder_inner_proc_lock(proc);
4402 		list_del_init(&thread->waiting_thread_node);
4403 		if (signal_pending(current)) {
4404 			ret = -EINTR;
4405 			break;
4406 		}
4407 	}
4408 	finish_wait(&thread->wait, &wait);
4409 	binder_inner_proc_unlock(proc);
4410 
4411 	return ret;
4412 }
4413 
4414 /**
4415  * binder_apply_fd_fixups() - finish fd translation
4416  * @proc:         binder_proc associated @t->buffer
4417  * @t:	binder transaction with list of fd fixups
4418  *
4419  * Now that we are in the context of the transaction target
4420  * process, we can allocate and install fds. Process the
4421  * list of fds to translate and fixup the buffer with the
4422  * new fds first and only then install the files.
4423  *
4424  * If we fail to allocate an fd, skip the install and release
4425  * any fds that have already been allocated.
4426  */
4427 static int binder_apply_fd_fixups(struct binder_proc *proc,
4428 				  struct binder_transaction *t)
4429 {
4430 	struct binder_txn_fd_fixup *fixup, *tmp;
4431 	int ret = 0;
4432 
4433 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4434 		int fd = get_unused_fd_flags(O_CLOEXEC);
4435 
4436 		if (fd < 0) {
4437 			binder_debug(BINDER_DEBUG_TRANSACTION,
4438 				     "failed fd fixup txn %d fd %d\n",
4439 				     t->debug_id, fd);
4440 			ret = -ENOMEM;
4441 			goto err;
4442 		}
4443 		binder_debug(BINDER_DEBUG_TRANSACTION,
4444 			     "fd fixup txn %d fd %d\n",
4445 			     t->debug_id, fd);
4446 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4447 		fixup->target_fd = fd;
4448 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4449 						fixup->offset, &fd,
4450 						sizeof(u32))) {
4451 			ret = -EINVAL;
4452 			goto err;
4453 		}
4454 	}
4455 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4456 		fd_install(fixup->target_fd, fixup->file);
4457 		list_del(&fixup->fixup_entry);
4458 		kfree(fixup);
4459 	}
4460 
4461 	return ret;
4462 
4463 err:
4464 	binder_free_txn_fixups(t);
4465 	return ret;
4466 }
4467 
4468 static int binder_thread_read(struct binder_proc *proc,
4469 			      struct binder_thread *thread,
4470 			      binder_uintptr_t binder_buffer, size_t size,
4471 			      binder_size_t *consumed, int non_block)
4472 {
4473 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4474 	void __user *ptr = buffer + *consumed;
4475 	void __user *end = buffer + size;
4476 
4477 	int ret = 0;
4478 	int wait_for_proc_work;
4479 
4480 	if (*consumed == 0) {
4481 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4482 			return -EFAULT;
4483 		ptr += sizeof(uint32_t);
4484 	}
4485 
4486 retry:
4487 	binder_inner_proc_lock(proc);
4488 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4489 	binder_inner_proc_unlock(proc);
4490 
4491 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4492 
4493 	trace_binder_wait_for_work(wait_for_proc_work,
4494 				   !!thread->transaction_stack,
4495 				   !binder_worklist_empty(proc, &thread->todo));
4496 	if (wait_for_proc_work) {
4497 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4498 					BINDER_LOOPER_STATE_ENTERED))) {
4499 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4500 				proc->pid, thread->pid, thread->looper);
4501 			wait_event_interruptible(binder_user_error_wait,
4502 						 binder_stop_on_user_error < 2);
4503 		}
4504 		binder_set_nice(proc->default_priority);
4505 	}
4506 
4507 	if (non_block) {
4508 		if (!binder_has_work(thread, wait_for_proc_work))
4509 			ret = -EAGAIN;
4510 	} else {
4511 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4512 	}
4513 
4514 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4515 
4516 	if (ret)
4517 		return ret;
4518 
4519 	while (1) {
4520 		uint32_t cmd;
4521 		struct binder_transaction_data_secctx tr;
4522 		struct binder_transaction_data *trd = &tr.transaction_data;
4523 		struct binder_work *w = NULL;
4524 		struct list_head *list = NULL;
4525 		struct binder_transaction *t = NULL;
4526 		struct binder_thread *t_from;
4527 		size_t trsize = sizeof(*trd);
4528 
4529 		binder_inner_proc_lock(proc);
4530 		if (!binder_worklist_empty_ilocked(&thread->todo))
4531 			list = &thread->todo;
4532 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4533 			   wait_for_proc_work)
4534 			list = &proc->todo;
4535 		else {
4536 			binder_inner_proc_unlock(proc);
4537 
4538 			/* no data added */
4539 			if (ptr - buffer == 4 && !thread->looper_need_return)
4540 				goto retry;
4541 			break;
4542 		}
4543 
4544 		if (end - ptr < sizeof(tr) + 4) {
4545 			binder_inner_proc_unlock(proc);
4546 			break;
4547 		}
4548 		w = binder_dequeue_work_head_ilocked(list);
4549 		if (binder_worklist_empty_ilocked(&thread->todo))
4550 			thread->process_todo = false;
4551 
4552 		switch (w->type) {
4553 		case BINDER_WORK_TRANSACTION: {
4554 			binder_inner_proc_unlock(proc);
4555 			t = container_of(w, struct binder_transaction, work);
4556 		} break;
4557 		case BINDER_WORK_RETURN_ERROR: {
4558 			struct binder_error *e = container_of(
4559 					w, struct binder_error, work);
4560 
4561 			WARN_ON(e->cmd == BR_OK);
4562 			binder_inner_proc_unlock(proc);
4563 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4564 				return -EFAULT;
4565 			cmd = e->cmd;
4566 			e->cmd = BR_OK;
4567 			ptr += sizeof(uint32_t);
4568 
4569 			binder_stat_br(proc, thread, cmd);
4570 		} break;
4571 		case BINDER_WORK_TRANSACTION_COMPLETE:
4572 		case BINDER_WORK_TRANSACTION_PENDING:
4573 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4574 			if (proc->oneway_spam_detection_enabled &&
4575 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4576 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4577 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4578 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4579 			else
4580 				cmd = BR_TRANSACTION_COMPLETE;
4581 			binder_inner_proc_unlock(proc);
4582 			kfree(w);
4583 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4584 			if (put_user(cmd, (uint32_t __user *)ptr))
4585 				return -EFAULT;
4586 			ptr += sizeof(uint32_t);
4587 
4588 			binder_stat_br(proc, thread, cmd);
4589 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4590 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4591 				     proc->pid, thread->pid);
4592 		} break;
4593 		case BINDER_WORK_NODE: {
4594 			struct binder_node *node = container_of(w, struct binder_node, work);
4595 			int strong, weak;
4596 			binder_uintptr_t node_ptr = node->ptr;
4597 			binder_uintptr_t node_cookie = node->cookie;
4598 			int node_debug_id = node->debug_id;
4599 			int has_weak_ref;
4600 			int has_strong_ref;
4601 			void __user *orig_ptr = ptr;
4602 
4603 			BUG_ON(proc != node->proc);
4604 			strong = node->internal_strong_refs ||
4605 					node->local_strong_refs;
4606 			weak = !hlist_empty(&node->refs) ||
4607 					node->local_weak_refs ||
4608 					node->tmp_refs || strong;
4609 			has_strong_ref = node->has_strong_ref;
4610 			has_weak_ref = node->has_weak_ref;
4611 
4612 			if (weak && !has_weak_ref) {
4613 				node->has_weak_ref = 1;
4614 				node->pending_weak_ref = 1;
4615 				node->local_weak_refs++;
4616 			}
4617 			if (strong && !has_strong_ref) {
4618 				node->has_strong_ref = 1;
4619 				node->pending_strong_ref = 1;
4620 				node->local_strong_refs++;
4621 			}
4622 			if (!strong && has_strong_ref)
4623 				node->has_strong_ref = 0;
4624 			if (!weak && has_weak_ref)
4625 				node->has_weak_ref = 0;
4626 			if (!weak && !strong) {
4627 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4628 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4629 					     proc->pid, thread->pid,
4630 					     node_debug_id,
4631 					     (u64)node_ptr,
4632 					     (u64)node_cookie);
4633 				rb_erase(&node->rb_node, &proc->nodes);
4634 				binder_inner_proc_unlock(proc);
4635 				binder_node_lock(node);
4636 				/*
4637 				 * Acquire the node lock before freeing the
4638 				 * node to serialize with other threads that
4639 				 * may have been holding the node lock while
4640 				 * decrementing this node (avoids race where
4641 				 * this thread frees while the other thread
4642 				 * is unlocking the node after the final
4643 				 * decrement)
4644 				 */
4645 				binder_node_unlock(node);
4646 				binder_free_node(node);
4647 			} else
4648 				binder_inner_proc_unlock(proc);
4649 
4650 			if (weak && !has_weak_ref)
4651 				ret = binder_put_node_cmd(
4652 						proc, thread, &ptr, node_ptr,
4653 						node_cookie, node_debug_id,
4654 						BR_INCREFS, "BR_INCREFS");
4655 			if (!ret && strong && !has_strong_ref)
4656 				ret = binder_put_node_cmd(
4657 						proc, thread, &ptr, node_ptr,
4658 						node_cookie, node_debug_id,
4659 						BR_ACQUIRE, "BR_ACQUIRE");
4660 			if (!ret && !strong && has_strong_ref)
4661 				ret = binder_put_node_cmd(
4662 						proc, thread, &ptr, node_ptr,
4663 						node_cookie, node_debug_id,
4664 						BR_RELEASE, "BR_RELEASE");
4665 			if (!ret && !weak && has_weak_ref)
4666 				ret = binder_put_node_cmd(
4667 						proc, thread, &ptr, node_ptr,
4668 						node_cookie, node_debug_id,
4669 						BR_DECREFS, "BR_DECREFS");
4670 			if (orig_ptr == ptr)
4671 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4672 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4673 					     proc->pid, thread->pid,
4674 					     node_debug_id,
4675 					     (u64)node_ptr,
4676 					     (u64)node_cookie);
4677 			if (ret)
4678 				return ret;
4679 		} break;
4680 		case BINDER_WORK_DEAD_BINDER:
4681 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4682 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4683 			struct binder_ref_death *death;
4684 			uint32_t cmd;
4685 			binder_uintptr_t cookie;
4686 
4687 			death = container_of(w, struct binder_ref_death, work);
4688 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4689 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4690 			else
4691 				cmd = BR_DEAD_BINDER;
4692 			cookie = death->cookie;
4693 
4694 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4695 				     "%d:%d %s %016llx\n",
4696 				      proc->pid, thread->pid,
4697 				      cmd == BR_DEAD_BINDER ?
4698 				      "BR_DEAD_BINDER" :
4699 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4700 				      (u64)cookie);
4701 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4702 				binder_inner_proc_unlock(proc);
4703 				kfree(death);
4704 				binder_stats_deleted(BINDER_STAT_DEATH);
4705 			} else {
4706 				binder_enqueue_work_ilocked(
4707 						w, &proc->delivered_death);
4708 				binder_inner_proc_unlock(proc);
4709 			}
4710 			if (put_user(cmd, (uint32_t __user *)ptr))
4711 				return -EFAULT;
4712 			ptr += sizeof(uint32_t);
4713 			if (put_user(cookie,
4714 				     (binder_uintptr_t __user *)ptr))
4715 				return -EFAULT;
4716 			ptr += sizeof(binder_uintptr_t);
4717 			binder_stat_br(proc, thread, cmd);
4718 			if (cmd == BR_DEAD_BINDER)
4719 				goto done; /* DEAD_BINDER notifications can cause transactions */
4720 		} break;
4721 		default:
4722 			binder_inner_proc_unlock(proc);
4723 			pr_err("%d:%d: bad work type %d\n",
4724 			       proc->pid, thread->pid, w->type);
4725 			break;
4726 		}
4727 
4728 		if (!t)
4729 			continue;
4730 
4731 		BUG_ON(t->buffer == NULL);
4732 		if (t->buffer->target_node) {
4733 			struct binder_node *target_node = t->buffer->target_node;
4734 
4735 			trd->target.ptr = target_node->ptr;
4736 			trd->cookie =  target_node->cookie;
4737 			t->saved_priority = task_nice(current);
4738 			if (t->priority < target_node->min_priority &&
4739 			    !(t->flags & TF_ONE_WAY))
4740 				binder_set_nice(t->priority);
4741 			else if (!(t->flags & TF_ONE_WAY) ||
4742 				 t->saved_priority > target_node->min_priority)
4743 				binder_set_nice(target_node->min_priority);
4744 			cmd = BR_TRANSACTION;
4745 		} else {
4746 			trd->target.ptr = 0;
4747 			trd->cookie = 0;
4748 			cmd = BR_REPLY;
4749 		}
4750 		trd->code = t->code;
4751 		trd->flags = t->flags;
4752 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4753 
4754 		t_from = binder_get_txn_from(t);
4755 		if (t_from) {
4756 			struct task_struct *sender = t_from->proc->tsk;
4757 
4758 			trd->sender_pid =
4759 				task_tgid_nr_ns(sender,
4760 						task_active_pid_ns(current));
4761 		} else {
4762 			trd->sender_pid = 0;
4763 		}
4764 
4765 		ret = binder_apply_fd_fixups(proc, t);
4766 		if (ret) {
4767 			struct binder_buffer *buffer = t->buffer;
4768 			bool oneway = !!(t->flags & TF_ONE_WAY);
4769 			int tid = t->debug_id;
4770 
4771 			if (t_from)
4772 				binder_thread_dec_tmpref(t_from);
4773 			buffer->transaction = NULL;
4774 			binder_cleanup_transaction(t, "fd fixups failed",
4775 						   BR_FAILED_REPLY);
4776 			binder_free_buf(proc, thread, buffer, true);
4777 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4778 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4779 				     proc->pid, thread->pid,
4780 				     oneway ? "async " :
4781 					(cmd == BR_REPLY ? "reply " : ""),
4782 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4783 			if (cmd == BR_REPLY) {
4784 				cmd = BR_FAILED_REPLY;
4785 				if (put_user(cmd, (uint32_t __user *)ptr))
4786 					return -EFAULT;
4787 				ptr += sizeof(uint32_t);
4788 				binder_stat_br(proc, thread, cmd);
4789 				break;
4790 			}
4791 			continue;
4792 		}
4793 		trd->data_size = t->buffer->data_size;
4794 		trd->offsets_size = t->buffer->offsets_size;
4795 		trd->data.ptr.buffer = t->buffer->user_data;
4796 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4797 					ALIGN(t->buffer->data_size,
4798 					    sizeof(void *));
4799 
4800 		tr.secctx = t->security_ctx;
4801 		if (t->security_ctx) {
4802 			cmd = BR_TRANSACTION_SEC_CTX;
4803 			trsize = sizeof(tr);
4804 		}
4805 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4806 			if (t_from)
4807 				binder_thread_dec_tmpref(t_from);
4808 
4809 			binder_cleanup_transaction(t, "put_user failed",
4810 						   BR_FAILED_REPLY);
4811 
4812 			return -EFAULT;
4813 		}
4814 		ptr += sizeof(uint32_t);
4815 		if (copy_to_user(ptr, &tr, trsize)) {
4816 			if (t_from)
4817 				binder_thread_dec_tmpref(t_from);
4818 
4819 			binder_cleanup_transaction(t, "copy_to_user failed",
4820 						   BR_FAILED_REPLY);
4821 
4822 			return -EFAULT;
4823 		}
4824 		ptr += trsize;
4825 
4826 		trace_binder_transaction_received(t);
4827 		binder_stat_br(proc, thread, cmd);
4828 		binder_debug(BINDER_DEBUG_TRANSACTION,
4829 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4830 			     proc->pid, thread->pid,
4831 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4832 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4833 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4834 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4835 			     t_from ? t_from->pid : 0, cmd,
4836 			     t->buffer->data_size, t->buffer->offsets_size,
4837 			     (u64)trd->data.ptr.buffer,
4838 			     (u64)trd->data.ptr.offsets);
4839 
4840 		if (t_from)
4841 			binder_thread_dec_tmpref(t_from);
4842 		t->buffer->allow_user_free = 1;
4843 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4844 			binder_inner_proc_lock(thread->proc);
4845 			t->to_parent = thread->transaction_stack;
4846 			t->to_thread = thread;
4847 			thread->transaction_stack = t;
4848 			binder_inner_proc_unlock(thread->proc);
4849 		} else {
4850 			binder_free_transaction(t);
4851 		}
4852 		break;
4853 	}
4854 
4855 done:
4856 
4857 	*consumed = ptr - buffer;
4858 	binder_inner_proc_lock(proc);
4859 	if (proc->requested_threads == 0 &&
4860 	    list_empty(&thread->proc->waiting_threads) &&
4861 	    proc->requested_threads_started < proc->max_threads &&
4862 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4863 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4864 	     /*spawn a new thread if we leave this out */) {
4865 		proc->requested_threads++;
4866 		binder_inner_proc_unlock(proc);
4867 		binder_debug(BINDER_DEBUG_THREADS,
4868 			     "%d:%d BR_SPAWN_LOOPER\n",
4869 			     proc->pid, thread->pid);
4870 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4871 			return -EFAULT;
4872 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4873 	} else
4874 		binder_inner_proc_unlock(proc);
4875 	return 0;
4876 }
4877 
4878 static void binder_release_work(struct binder_proc *proc,
4879 				struct list_head *list)
4880 {
4881 	struct binder_work *w;
4882 	enum binder_work_type wtype;
4883 
4884 	while (1) {
4885 		binder_inner_proc_lock(proc);
4886 		w = binder_dequeue_work_head_ilocked(list);
4887 		wtype = w ? w->type : 0;
4888 		binder_inner_proc_unlock(proc);
4889 		if (!w)
4890 			return;
4891 
4892 		switch (wtype) {
4893 		case BINDER_WORK_TRANSACTION: {
4894 			struct binder_transaction *t;
4895 
4896 			t = container_of(w, struct binder_transaction, work);
4897 
4898 			binder_cleanup_transaction(t, "process died.",
4899 						   BR_DEAD_REPLY);
4900 		} break;
4901 		case BINDER_WORK_RETURN_ERROR: {
4902 			struct binder_error *e = container_of(
4903 					w, struct binder_error, work);
4904 
4905 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4906 				"undelivered TRANSACTION_ERROR: %u\n",
4907 				e->cmd);
4908 		} break;
4909 		case BINDER_WORK_TRANSACTION_PENDING:
4910 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4911 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4912 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4913 				"undelivered TRANSACTION_COMPLETE\n");
4914 			kfree(w);
4915 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4916 		} break;
4917 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4918 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4919 			struct binder_ref_death *death;
4920 
4921 			death = container_of(w, struct binder_ref_death, work);
4922 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4923 				"undelivered death notification, %016llx\n",
4924 				(u64)death->cookie);
4925 			kfree(death);
4926 			binder_stats_deleted(BINDER_STAT_DEATH);
4927 		} break;
4928 		case BINDER_WORK_NODE:
4929 			break;
4930 		default:
4931 			pr_err("unexpected work type, %d, not freed\n",
4932 			       wtype);
4933 			break;
4934 		}
4935 	}
4936 
4937 }
4938 
4939 static struct binder_thread *binder_get_thread_ilocked(
4940 		struct binder_proc *proc, struct binder_thread *new_thread)
4941 {
4942 	struct binder_thread *thread = NULL;
4943 	struct rb_node *parent = NULL;
4944 	struct rb_node **p = &proc->threads.rb_node;
4945 
4946 	while (*p) {
4947 		parent = *p;
4948 		thread = rb_entry(parent, struct binder_thread, rb_node);
4949 
4950 		if (current->pid < thread->pid)
4951 			p = &(*p)->rb_left;
4952 		else if (current->pid > thread->pid)
4953 			p = &(*p)->rb_right;
4954 		else
4955 			return thread;
4956 	}
4957 	if (!new_thread)
4958 		return NULL;
4959 	thread = new_thread;
4960 	binder_stats_created(BINDER_STAT_THREAD);
4961 	thread->proc = proc;
4962 	thread->pid = current->pid;
4963 	atomic_set(&thread->tmp_ref, 0);
4964 	init_waitqueue_head(&thread->wait);
4965 	INIT_LIST_HEAD(&thread->todo);
4966 	rb_link_node(&thread->rb_node, parent, p);
4967 	rb_insert_color(&thread->rb_node, &proc->threads);
4968 	thread->looper_need_return = true;
4969 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4970 	thread->return_error.cmd = BR_OK;
4971 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4972 	thread->reply_error.cmd = BR_OK;
4973 	thread->ee.command = BR_OK;
4974 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4975 	return thread;
4976 }
4977 
4978 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4979 {
4980 	struct binder_thread *thread;
4981 	struct binder_thread *new_thread;
4982 
4983 	binder_inner_proc_lock(proc);
4984 	thread = binder_get_thread_ilocked(proc, NULL);
4985 	binder_inner_proc_unlock(proc);
4986 	if (!thread) {
4987 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4988 		if (new_thread == NULL)
4989 			return NULL;
4990 		binder_inner_proc_lock(proc);
4991 		thread = binder_get_thread_ilocked(proc, new_thread);
4992 		binder_inner_proc_unlock(proc);
4993 		if (thread != new_thread)
4994 			kfree(new_thread);
4995 	}
4996 	return thread;
4997 }
4998 
4999 static void binder_free_proc(struct binder_proc *proc)
5000 {
5001 	struct binder_device *device;
5002 
5003 	BUG_ON(!list_empty(&proc->todo));
5004 	BUG_ON(!list_empty(&proc->delivered_death));
5005 	if (proc->outstanding_txns)
5006 		pr_warn("%s: Unexpected outstanding_txns %d\n",
5007 			__func__, proc->outstanding_txns);
5008 	device = container_of(proc->context, struct binder_device, context);
5009 	if (refcount_dec_and_test(&device->ref)) {
5010 		kfree(proc->context->name);
5011 		kfree(device);
5012 	}
5013 	binder_alloc_deferred_release(&proc->alloc);
5014 	put_task_struct(proc->tsk);
5015 	put_cred(proc->cred);
5016 	binder_stats_deleted(BINDER_STAT_PROC);
5017 	dbitmap_free(&proc->dmap);
5018 	kfree(proc);
5019 }
5020 
5021 static void binder_free_thread(struct binder_thread *thread)
5022 {
5023 	BUG_ON(!list_empty(&thread->todo));
5024 	binder_stats_deleted(BINDER_STAT_THREAD);
5025 	binder_proc_dec_tmpref(thread->proc);
5026 	kfree(thread);
5027 }
5028 
5029 static int binder_thread_release(struct binder_proc *proc,
5030 				 struct binder_thread *thread)
5031 {
5032 	struct binder_transaction *t;
5033 	struct binder_transaction *send_reply = NULL;
5034 	int active_transactions = 0;
5035 	struct binder_transaction *last_t = NULL;
5036 
5037 	binder_inner_proc_lock(thread->proc);
5038 	/*
5039 	 * take a ref on the proc so it survives
5040 	 * after we remove this thread from proc->threads.
5041 	 * The corresponding dec is when we actually
5042 	 * free the thread in binder_free_thread()
5043 	 */
5044 	proc->tmp_ref++;
5045 	/*
5046 	 * take a ref on this thread to ensure it
5047 	 * survives while we are releasing it
5048 	 */
5049 	atomic_inc(&thread->tmp_ref);
5050 	rb_erase(&thread->rb_node, &proc->threads);
5051 	t = thread->transaction_stack;
5052 	if (t) {
5053 		spin_lock(&t->lock);
5054 		if (t->to_thread == thread)
5055 			send_reply = t;
5056 	} else {
5057 		__acquire(&t->lock);
5058 	}
5059 	thread->is_dead = true;
5060 
5061 	while (t) {
5062 		last_t = t;
5063 		active_transactions++;
5064 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5065 			     "release %d:%d transaction %d %s, still active\n",
5066 			      proc->pid, thread->pid,
5067 			     t->debug_id,
5068 			     (t->to_thread == thread) ? "in" : "out");
5069 
5070 		if (t->to_thread == thread) {
5071 			thread->proc->outstanding_txns--;
5072 			t->to_proc = NULL;
5073 			t->to_thread = NULL;
5074 			if (t->buffer) {
5075 				t->buffer->transaction = NULL;
5076 				t->buffer = NULL;
5077 			}
5078 			t = t->to_parent;
5079 		} else if (t->from == thread) {
5080 			t->from = NULL;
5081 			t = t->from_parent;
5082 		} else
5083 			BUG();
5084 		spin_unlock(&last_t->lock);
5085 		if (t)
5086 			spin_lock(&t->lock);
5087 		else
5088 			__acquire(&t->lock);
5089 	}
5090 	/* annotation for sparse, lock not acquired in last iteration above */
5091 	__release(&t->lock);
5092 
5093 	/*
5094 	 * If this thread used poll, make sure we remove the waitqueue from any
5095 	 * poll data structures holding it.
5096 	 */
5097 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5098 		wake_up_pollfree(&thread->wait);
5099 
5100 	binder_inner_proc_unlock(thread->proc);
5101 
5102 	/*
5103 	 * This is needed to avoid races between wake_up_pollfree() above and
5104 	 * someone else removing the last entry from the queue for other reasons
5105 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5106 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5107 	 * we can be sure they're done after we call synchronize_rcu().
5108 	 */
5109 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5110 		synchronize_rcu();
5111 
5112 	if (send_reply)
5113 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5114 	binder_release_work(proc, &thread->todo);
5115 	binder_thread_dec_tmpref(thread);
5116 	return active_transactions;
5117 }
5118 
5119 static __poll_t binder_poll(struct file *filp,
5120 				struct poll_table_struct *wait)
5121 {
5122 	struct binder_proc *proc = filp->private_data;
5123 	struct binder_thread *thread = NULL;
5124 	bool wait_for_proc_work;
5125 
5126 	thread = binder_get_thread(proc);
5127 	if (!thread)
5128 		return EPOLLERR;
5129 
5130 	binder_inner_proc_lock(thread->proc);
5131 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5132 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5133 
5134 	binder_inner_proc_unlock(thread->proc);
5135 
5136 	poll_wait(filp, &thread->wait, wait);
5137 
5138 	if (binder_has_work(thread, wait_for_proc_work))
5139 		return EPOLLIN;
5140 
5141 	return 0;
5142 }
5143 
5144 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5145 				struct binder_thread *thread)
5146 {
5147 	int ret = 0;
5148 	struct binder_proc *proc = filp->private_data;
5149 	void __user *ubuf = (void __user *)arg;
5150 	struct binder_write_read bwr;
5151 
5152 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5153 		ret = -EFAULT;
5154 		goto out;
5155 	}
5156 	binder_debug(BINDER_DEBUG_READ_WRITE,
5157 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5158 		     proc->pid, thread->pid,
5159 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5160 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5161 
5162 	if (bwr.write_size > 0) {
5163 		ret = binder_thread_write(proc, thread,
5164 					  bwr.write_buffer,
5165 					  bwr.write_size,
5166 					  &bwr.write_consumed);
5167 		trace_binder_write_done(ret);
5168 		if (ret < 0) {
5169 			bwr.read_consumed = 0;
5170 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5171 				ret = -EFAULT;
5172 			goto out;
5173 		}
5174 	}
5175 	if (bwr.read_size > 0) {
5176 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5177 					 bwr.read_size,
5178 					 &bwr.read_consumed,
5179 					 filp->f_flags & O_NONBLOCK);
5180 		trace_binder_read_done(ret);
5181 		binder_inner_proc_lock(proc);
5182 		if (!binder_worklist_empty_ilocked(&proc->todo))
5183 			binder_wakeup_proc_ilocked(proc);
5184 		binder_inner_proc_unlock(proc);
5185 		if (ret < 0) {
5186 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5187 				ret = -EFAULT;
5188 			goto out;
5189 		}
5190 	}
5191 	binder_debug(BINDER_DEBUG_READ_WRITE,
5192 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5193 		     proc->pid, thread->pid,
5194 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5195 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5196 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5197 		ret = -EFAULT;
5198 		goto out;
5199 	}
5200 out:
5201 	return ret;
5202 }
5203 
5204 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5205 				    struct flat_binder_object *fbo)
5206 {
5207 	int ret = 0;
5208 	struct binder_proc *proc = filp->private_data;
5209 	struct binder_context *context = proc->context;
5210 	struct binder_node *new_node;
5211 	kuid_t curr_euid = current_euid();
5212 
5213 	mutex_lock(&context->context_mgr_node_lock);
5214 	if (context->binder_context_mgr_node) {
5215 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5216 		ret = -EBUSY;
5217 		goto out;
5218 	}
5219 	ret = security_binder_set_context_mgr(proc->cred);
5220 	if (ret < 0)
5221 		goto out;
5222 	if (uid_valid(context->binder_context_mgr_uid)) {
5223 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5224 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5225 			       from_kuid(&init_user_ns, curr_euid),
5226 			       from_kuid(&init_user_ns,
5227 					 context->binder_context_mgr_uid));
5228 			ret = -EPERM;
5229 			goto out;
5230 		}
5231 	} else {
5232 		context->binder_context_mgr_uid = curr_euid;
5233 	}
5234 	new_node = binder_new_node(proc, fbo);
5235 	if (!new_node) {
5236 		ret = -ENOMEM;
5237 		goto out;
5238 	}
5239 	binder_node_lock(new_node);
5240 	new_node->local_weak_refs++;
5241 	new_node->local_strong_refs++;
5242 	new_node->has_strong_ref = 1;
5243 	new_node->has_weak_ref = 1;
5244 	context->binder_context_mgr_node = new_node;
5245 	binder_node_unlock(new_node);
5246 	binder_put_node(new_node);
5247 out:
5248 	mutex_unlock(&context->context_mgr_node_lock);
5249 	return ret;
5250 }
5251 
5252 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5253 		struct binder_node_info_for_ref *info)
5254 {
5255 	struct binder_node *node;
5256 	struct binder_context *context = proc->context;
5257 	__u32 handle = info->handle;
5258 
5259 	if (info->strong_count || info->weak_count || info->reserved1 ||
5260 	    info->reserved2 || info->reserved3) {
5261 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5262 				  proc->pid);
5263 		return -EINVAL;
5264 	}
5265 
5266 	/* This ioctl may only be used by the context manager */
5267 	mutex_lock(&context->context_mgr_node_lock);
5268 	if (!context->binder_context_mgr_node ||
5269 		context->binder_context_mgr_node->proc != proc) {
5270 		mutex_unlock(&context->context_mgr_node_lock);
5271 		return -EPERM;
5272 	}
5273 	mutex_unlock(&context->context_mgr_node_lock);
5274 
5275 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5276 	if (!node)
5277 		return -EINVAL;
5278 
5279 	info->strong_count = node->local_strong_refs +
5280 		node->internal_strong_refs;
5281 	info->weak_count = node->local_weak_refs;
5282 
5283 	binder_put_node(node);
5284 
5285 	return 0;
5286 }
5287 
5288 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5289 				struct binder_node_debug_info *info)
5290 {
5291 	struct rb_node *n;
5292 	binder_uintptr_t ptr = info->ptr;
5293 
5294 	memset(info, 0, sizeof(*info));
5295 
5296 	binder_inner_proc_lock(proc);
5297 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5298 		struct binder_node *node = rb_entry(n, struct binder_node,
5299 						    rb_node);
5300 		if (node->ptr > ptr) {
5301 			info->ptr = node->ptr;
5302 			info->cookie = node->cookie;
5303 			info->has_strong_ref = node->has_strong_ref;
5304 			info->has_weak_ref = node->has_weak_ref;
5305 			break;
5306 		}
5307 	}
5308 	binder_inner_proc_unlock(proc);
5309 
5310 	return 0;
5311 }
5312 
5313 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5314 {
5315 	struct rb_node *n;
5316 	struct binder_thread *thread;
5317 
5318 	if (proc->outstanding_txns > 0)
5319 		return true;
5320 
5321 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5322 		thread = rb_entry(n, struct binder_thread, rb_node);
5323 		if (thread->transaction_stack)
5324 			return true;
5325 	}
5326 	return false;
5327 }
5328 
5329 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5330 			       struct binder_proc *target_proc)
5331 {
5332 	int ret = 0;
5333 
5334 	if (!info->enable) {
5335 		binder_inner_proc_lock(target_proc);
5336 		target_proc->sync_recv = false;
5337 		target_proc->async_recv = false;
5338 		target_proc->is_frozen = false;
5339 		binder_inner_proc_unlock(target_proc);
5340 		return 0;
5341 	}
5342 
5343 	/*
5344 	 * Freezing the target. Prevent new transactions by
5345 	 * setting frozen state. If timeout specified, wait
5346 	 * for transactions to drain.
5347 	 */
5348 	binder_inner_proc_lock(target_proc);
5349 	target_proc->sync_recv = false;
5350 	target_proc->async_recv = false;
5351 	target_proc->is_frozen = true;
5352 	binder_inner_proc_unlock(target_proc);
5353 
5354 	if (info->timeout_ms > 0)
5355 		ret = wait_event_interruptible_timeout(
5356 			target_proc->freeze_wait,
5357 			(!target_proc->outstanding_txns),
5358 			msecs_to_jiffies(info->timeout_ms));
5359 
5360 	/* Check pending transactions that wait for reply */
5361 	if (ret >= 0) {
5362 		binder_inner_proc_lock(target_proc);
5363 		if (binder_txns_pending_ilocked(target_proc))
5364 			ret = -EAGAIN;
5365 		binder_inner_proc_unlock(target_proc);
5366 	}
5367 
5368 	if (ret < 0) {
5369 		binder_inner_proc_lock(target_proc);
5370 		target_proc->is_frozen = false;
5371 		binder_inner_proc_unlock(target_proc);
5372 	}
5373 
5374 	return ret;
5375 }
5376 
5377 static int binder_ioctl_get_freezer_info(
5378 				struct binder_frozen_status_info *info)
5379 {
5380 	struct binder_proc *target_proc;
5381 	bool found = false;
5382 	__u32 txns_pending;
5383 
5384 	info->sync_recv = 0;
5385 	info->async_recv = 0;
5386 
5387 	mutex_lock(&binder_procs_lock);
5388 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5389 		if (target_proc->pid == info->pid) {
5390 			found = true;
5391 			binder_inner_proc_lock(target_proc);
5392 			txns_pending = binder_txns_pending_ilocked(target_proc);
5393 			info->sync_recv |= target_proc->sync_recv |
5394 					(txns_pending << 1);
5395 			info->async_recv |= target_proc->async_recv;
5396 			binder_inner_proc_unlock(target_proc);
5397 		}
5398 	}
5399 	mutex_unlock(&binder_procs_lock);
5400 
5401 	if (!found)
5402 		return -EINVAL;
5403 
5404 	return 0;
5405 }
5406 
5407 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5408 					   void __user *ubuf)
5409 {
5410 	struct binder_extended_error ee;
5411 
5412 	binder_inner_proc_lock(thread->proc);
5413 	ee = thread->ee;
5414 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5415 	binder_inner_proc_unlock(thread->proc);
5416 
5417 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5418 		return -EFAULT;
5419 
5420 	return 0;
5421 }
5422 
5423 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5424 {
5425 	int ret;
5426 	struct binder_proc *proc = filp->private_data;
5427 	struct binder_thread *thread;
5428 	void __user *ubuf = (void __user *)arg;
5429 
5430 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5431 			proc->pid, current->pid, cmd, arg);*/
5432 
5433 	binder_selftest_alloc(&proc->alloc);
5434 
5435 	trace_binder_ioctl(cmd, arg);
5436 
5437 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5438 	if (ret)
5439 		goto err_unlocked;
5440 
5441 	thread = binder_get_thread(proc);
5442 	if (thread == NULL) {
5443 		ret = -ENOMEM;
5444 		goto err;
5445 	}
5446 
5447 	switch (cmd) {
5448 	case BINDER_WRITE_READ:
5449 		ret = binder_ioctl_write_read(filp, arg, thread);
5450 		if (ret)
5451 			goto err;
5452 		break;
5453 	case BINDER_SET_MAX_THREADS: {
5454 		u32 max_threads;
5455 
5456 		if (copy_from_user(&max_threads, ubuf,
5457 				   sizeof(max_threads))) {
5458 			ret = -EINVAL;
5459 			goto err;
5460 		}
5461 		binder_inner_proc_lock(proc);
5462 		proc->max_threads = max_threads;
5463 		binder_inner_proc_unlock(proc);
5464 		break;
5465 	}
5466 	case BINDER_SET_CONTEXT_MGR_EXT: {
5467 		struct flat_binder_object fbo;
5468 
5469 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5470 			ret = -EINVAL;
5471 			goto err;
5472 		}
5473 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5474 		if (ret)
5475 			goto err;
5476 		break;
5477 	}
5478 	case BINDER_SET_CONTEXT_MGR:
5479 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5480 		if (ret)
5481 			goto err;
5482 		break;
5483 	case BINDER_THREAD_EXIT:
5484 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5485 			     proc->pid, thread->pid);
5486 		binder_thread_release(proc, thread);
5487 		thread = NULL;
5488 		break;
5489 	case BINDER_VERSION: {
5490 		struct binder_version __user *ver = ubuf;
5491 
5492 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5493 			     &ver->protocol_version)) {
5494 			ret = -EINVAL;
5495 			goto err;
5496 		}
5497 		break;
5498 	}
5499 	case BINDER_GET_NODE_INFO_FOR_REF: {
5500 		struct binder_node_info_for_ref info;
5501 
5502 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5503 			ret = -EFAULT;
5504 			goto err;
5505 		}
5506 
5507 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5508 		if (ret < 0)
5509 			goto err;
5510 
5511 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5512 			ret = -EFAULT;
5513 			goto err;
5514 		}
5515 
5516 		break;
5517 	}
5518 	case BINDER_GET_NODE_DEBUG_INFO: {
5519 		struct binder_node_debug_info info;
5520 
5521 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5522 			ret = -EFAULT;
5523 			goto err;
5524 		}
5525 
5526 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5527 		if (ret < 0)
5528 			goto err;
5529 
5530 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5531 			ret = -EFAULT;
5532 			goto err;
5533 		}
5534 		break;
5535 	}
5536 	case BINDER_FREEZE: {
5537 		struct binder_freeze_info info;
5538 		struct binder_proc **target_procs = NULL, *target_proc;
5539 		int target_procs_count = 0, i = 0;
5540 
5541 		ret = 0;
5542 
5543 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5544 			ret = -EFAULT;
5545 			goto err;
5546 		}
5547 
5548 		mutex_lock(&binder_procs_lock);
5549 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5550 			if (target_proc->pid == info.pid)
5551 				target_procs_count++;
5552 		}
5553 
5554 		if (target_procs_count == 0) {
5555 			mutex_unlock(&binder_procs_lock);
5556 			ret = -EINVAL;
5557 			goto err;
5558 		}
5559 
5560 		target_procs = kcalloc(target_procs_count,
5561 				       sizeof(struct binder_proc *),
5562 				       GFP_KERNEL);
5563 
5564 		if (!target_procs) {
5565 			mutex_unlock(&binder_procs_lock);
5566 			ret = -ENOMEM;
5567 			goto err;
5568 		}
5569 
5570 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5571 			if (target_proc->pid != info.pid)
5572 				continue;
5573 
5574 			binder_inner_proc_lock(target_proc);
5575 			target_proc->tmp_ref++;
5576 			binder_inner_proc_unlock(target_proc);
5577 
5578 			target_procs[i++] = target_proc;
5579 		}
5580 		mutex_unlock(&binder_procs_lock);
5581 
5582 		for (i = 0; i < target_procs_count; i++) {
5583 			if (ret >= 0)
5584 				ret = binder_ioctl_freeze(&info,
5585 							  target_procs[i]);
5586 
5587 			binder_proc_dec_tmpref(target_procs[i]);
5588 		}
5589 
5590 		kfree(target_procs);
5591 
5592 		if (ret < 0)
5593 			goto err;
5594 		break;
5595 	}
5596 	case BINDER_GET_FROZEN_INFO: {
5597 		struct binder_frozen_status_info info;
5598 
5599 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5600 			ret = -EFAULT;
5601 			goto err;
5602 		}
5603 
5604 		ret = binder_ioctl_get_freezer_info(&info);
5605 		if (ret < 0)
5606 			goto err;
5607 
5608 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5609 			ret = -EFAULT;
5610 			goto err;
5611 		}
5612 		break;
5613 	}
5614 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5615 		uint32_t enable;
5616 
5617 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5618 			ret = -EFAULT;
5619 			goto err;
5620 		}
5621 		binder_inner_proc_lock(proc);
5622 		proc->oneway_spam_detection_enabled = (bool)enable;
5623 		binder_inner_proc_unlock(proc);
5624 		break;
5625 	}
5626 	case BINDER_GET_EXTENDED_ERROR:
5627 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5628 		if (ret < 0)
5629 			goto err;
5630 		break;
5631 	default:
5632 		ret = -EINVAL;
5633 		goto err;
5634 	}
5635 	ret = 0;
5636 err:
5637 	if (thread)
5638 		thread->looper_need_return = false;
5639 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5640 	if (ret && ret != -EINTR)
5641 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5642 err_unlocked:
5643 	trace_binder_ioctl_done(ret);
5644 	return ret;
5645 }
5646 
5647 static void binder_vma_open(struct vm_area_struct *vma)
5648 {
5649 	struct binder_proc *proc = vma->vm_private_data;
5650 
5651 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5652 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5653 		     proc->pid, vma->vm_start, vma->vm_end,
5654 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5655 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5656 }
5657 
5658 static void binder_vma_close(struct vm_area_struct *vma)
5659 {
5660 	struct binder_proc *proc = vma->vm_private_data;
5661 
5662 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5663 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5664 		     proc->pid, vma->vm_start, vma->vm_end,
5665 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5666 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5667 	binder_alloc_vma_close(&proc->alloc);
5668 }
5669 
5670 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5671 {
5672 	return VM_FAULT_SIGBUS;
5673 }
5674 
5675 static const struct vm_operations_struct binder_vm_ops = {
5676 	.open = binder_vma_open,
5677 	.close = binder_vma_close,
5678 	.fault = binder_vm_fault,
5679 };
5680 
5681 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5682 {
5683 	struct binder_proc *proc = filp->private_data;
5684 
5685 	if (proc->tsk != current->group_leader)
5686 		return -EINVAL;
5687 
5688 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5689 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5690 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5691 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5692 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5693 
5694 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5695 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5696 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5697 		return -EPERM;
5698 	}
5699 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5700 
5701 	vma->vm_ops = &binder_vm_ops;
5702 	vma->vm_private_data = proc;
5703 
5704 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5705 }
5706 
5707 static int binder_open(struct inode *nodp, struct file *filp)
5708 {
5709 	struct binder_proc *proc, *itr;
5710 	struct binder_device *binder_dev;
5711 	struct binderfs_info *info;
5712 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5713 	bool existing_pid = false;
5714 
5715 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5716 		     current->group_leader->pid, current->pid);
5717 
5718 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5719 	if (proc == NULL)
5720 		return -ENOMEM;
5721 
5722 	dbitmap_init(&proc->dmap);
5723 	spin_lock_init(&proc->inner_lock);
5724 	spin_lock_init(&proc->outer_lock);
5725 	get_task_struct(current->group_leader);
5726 	proc->tsk = current->group_leader;
5727 	proc->cred = get_cred(filp->f_cred);
5728 	INIT_LIST_HEAD(&proc->todo);
5729 	init_waitqueue_head(&proc->freeze_wait);
5730 	proc->default_priority = task_nice(current);
5731 	/* binderfs stashes devices in i_private */
5732 	if (is_binderfs_device(nodp)) {
5733 		binder_dev = nodp->i_private;
5734 		info = nodp->i_sb->s_fs_info;
5735 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5736 	} else {
5737 		binder_dev = container_of(filp->private_data,
5738 					  struct binder_device, miscdev);
5739 	}
5740 	refcount_inc(&binder_dev->ref);
5741 	proc->context = &binder_dev->context;
5742 	binder_alloc_init(&proc->alloc);
5743 
5744 	binder_stats_created(BINDER_STAT_PROC);
5745 	proc->pid = current->group_leader->pid;
5746 	INIT_LIST_HEAD(&proc->delivered_death);
5747 	INIT_LIST_HEAD(&proc->waiting_threads);
5748 	filp->private_data = proc;
5749 
5750 	mutex_lock(&binder_procs_lock);
5751 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5752 		if (itr->pid == proc->pid) {
5753 			existing_pid = true;
5754 			break;
5755 		}
5756 	}
5757 	hlist_add_head(&proc->proc_node, &binder_procs);
5758 	mutex_unlock(&binder_procs_lock);
5759 
5760 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5761 		char strbuf[11];
5762 
5763 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5764 		/*
5765 		 * proc debug entries are shared between contexts.
5766 		 * Only create for the first PID to avoid debugfs log spamming
5767 		 * The printing code will anyway print all contexts for a given
5768 		 * PID so this is not a problem.
5769 		 */
5770 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5771 			binder_debugfs_dir_entry_proc,
5772 			(void *)(unsigned long)proc->pid,
5773 			&proc_fops);
5774 	}
5775 
5776 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5777 		char strbuf[11];
5778 		struct dentry *binderfs_entry;
5779 
5780 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5781 		/*
5782 		 * Similar to debugfs, the process specific log file is shared
5783 		 * between contexts. Only create for the first PID.
5784 		 * This is ok since same as debugfs, the log file will contain
5785 		 * information on all contexts of a given PID.
5786 		 */
5787 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5788 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5789 		if (!IS_ERR(binderfs_entry)) {
5790 			proc->binderfs_entry = binderfs_entry;
5791 		} else {
5792 			int error;
5793 
5794 			error = PTR_ERR(binderfs_entry);
5795 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5796 				strbuf, error);
5797 		}
5798 	}
5799 
5800 	return 0;
5801 }
5802 
5803 static int binder_flush(struct file *filp, fl_owner_t id)
5804 {
5805 	struct binder_proc *proc = filp->private_data;
5806 
5807 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5808 
5809 	return 0;
5810 }
5811 
5812 static void binder_deferred_flush(struct binder_proc *proc)
5813 {
5814 	struct rb_node *n;
5815 	int wake_count = 0;
5816 
5817 	binder_inner_proc_lock(proc);
5818 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5819 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5820 
5821 		thread->looper_need_return = true;
5822 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5823 			wake_up_interruptible(&thread->wait);
5824 			wake_count++;
5825 		}
5826 	}
5827 	binder_inner_proc_unlock(proc);
5828 
5829 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5830 		     "binder_flush: %d woke %d threads\n", proc->pid,
5831 		     wake_count);
5832 }
5833 
5834 static int binder_release(struct inode *nodp, struct file *filp)
5835 {
5836 	struct binder_proc *proc = filp->private_data;
5837 
5838 	debugfs_remove(proc->debugfs_entry);
5839 
5840 	if (proc->binderfs_entry) {
5841 		binderfs_remove_file(proc->binderfs_entry);
5842 		proc->binderfs_entry = NULL;
5843 	}
5844 
5845 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5846 
5847 	return 0;
5848 }
5849 
5850 static int binder_node_release(struct binder_node *node, int refs)
5851 {
5852 	struct binder_ref *ref;
5853 	int death = 0;
5854 	struct binder_proc *proc = node->proc;
5855 
5856 	binder_release_work(proc, &node->async_todo);
5857 
5858 	binder_node_lock(node);
5859 	binder_inner_proc_lock(proc);
5860 	binder_dequeue_work_ilocked(&node->work);
5861 	/*
5862 	 * The caller must have taken a temporary ref on the node,
5863 	 */
5864 	BUG_ON(!node->tmp_refs);
5865 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5866 		binder_inner_proc_unlock(proc);
5867 		binder_node_unlock(node);
5868 		binder_free_node(node);
5869 
5870 		return refs;
5871 	}
5872 
5873 	node->proc = NULL;
5874 	node->local_strong_refs = 0;
5875 	node->local_weak_refs = 0;
5876 	binder_inner_proc_unlock(proc);
5877 
5878 	spin_lock(&binder_dead_nodes_lock);
5879 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5880 	spin_unlock(&binder_dead_nodes_lock);
5881 
5882 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5883 		refs++;
5884 		/*
5885 		 * Need the node lock to synchronize
5886 		 * with new notification requests and the
5887 		 * inner lock to synchronize with queued
5888 		 * death notifications.
5889 		 */
5890 		binder_inner_proc_lock(ref->proc);
5891 		if (!ref->death) {
5892 			binder_inner_proc_unlock(ref->proc);
5893 			continue;
5894 		}
5895 
5896 		death++;
5897 
5898 		BUG_ON(!list_empty(&ref->death->work.entry));
5899 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5900 		binder_enqueue_work_ilocked(&ref->death->work,
5901 					    &ref->proc->todo);
5902 		binder_wakeup_proc_ilocked(ref->proc);
5903 		binder_inner_proc_unlock(ref->proc);
5904 	}
5905 
5906 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5907 		     "node %d now dead, refs %d, death %d\n",
5908 		     node->debug_id, refs, death);
5909 	binder_node_unlock(node);
5910 	binder_put_node(node);
5911 
5912 	return refs;
5913 }
5914 
5915 static void binder_deferred_release(struct binder_proc *proc)
5916 {
5917 	struct binder_context *context = proc->context;
5918 	struct rb_node *n;
5919 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5920 
5921 	mutex_lock(&binder_procs_lock);
5922 	hlist_del(&proc->proc_node);
5923 	mutex_unlock(&binder_procs_lock);
5924 
5925 	mutex_lock(&context->context_mgr_node_lock);
5926 	if (context->binder_context_mgr_node &&
5927 	    context->binder_context_mgr_node->proc == proc) {
5928 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5929 			     "%s: %d context_mgr_node gone\n",
5930 			     __func__, proc->pid);
5931 		context->binder_context_mgr_node = NULL;
5932 	}
5933 	mutex_unlock(&context->context_mgr_node_lock);
5934 	binder_inner_proc_lock(proc);
5935 	/*
5936 	 * Make sure proc stays alive after we
5937 	 * remove all the threads
5938 	 */
5939 	proc->tmp_ref++;
5940 
5941 	proc->is_dead = true;
5942 	proc->is_frozen = false;
5943 	proc->sync_recv = false;
5944 	proc->async_recv = false;
5945 	threads = 0;
5946 	active_transactions = 0;
5947 	while ((n = rb_first(&proc->threads))) {
5948 		struct binder_thread *thread;
5949 
5950 		thread = rb_entry(n, struct binder_thread, rb_node);
5951 		binder_inner_proc_unlock(proc);
5952 		threads++;
5953 		active_transactions += binder_thread_release(proc, thread);
5954 		binder_inner_proc_lock(proc);
5955 	}
5956 
5957 	nodes = 0;
5958 	incoming_refs = 0;
5959 	while ((n = rb_first(&proc->nodes))) {
5960 		struct binder_node *node;
5961 
5962 		node = rb_entry(n, struct binder_node, rb_node);
5963 		nodes++;
5964 		/*
5965 		 * take a temporary ref on the node before
5966 		 * calling binder_node_release() which will either
5967 		 * kfree() the node or call binder_put_node()
5968 		 */
5969 		binder_inc_node_tmpref_ilocked(node);
5970 		rb_erase(&node->rb_node, &proc->nodes);
5971 		binder_inner_proc_unlock(proc);
5972 		incoming_refs = binder_node_release(node, incoming_refs);
5973 		binder_inner_proc_lock(proc);
5974 	}
5975 	binder_inner_proc_unlock(proc);
5976 
5977 	outgoing_refs = 0;
5978 	binder_proc_lock(proc);
5979 	while ((n = rb_first(&proc->refs_by_desc))) {
5980 		struct binder_ref *ref;
5981 
5982 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5983 		outgoing_refs++;
5984 		binder_cleanup_ref_olocked(ref);
5985 		binder_proc_unlock(proc);
5986 		binder_free_ref(ref);
5987 		binder_proc_lock(proc);
5988 	}
5989 	binder_proc_unlock(proc);
5990 
5991 	binder_release_work(proc, &proc->todo);
5992 	binder_release_work(proc, &proc->delivered_death);
5993 
5994 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5995 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5996 		     __func__, proc->pid, threads, nodes, incoming_refs,
5997 		     outgoing_refs, active_transactions);
5998 
5999 	binder_proc_dec_tmpref(proc);
6000 }
6001 
6002 static void binder_deferred_func(struct work_struct *work)
6003 {
6004 	struct binder_proc *proc;
6005 
6006 	int defer;
6007 
6008 	do {
6009 		mutex_lock(&binder_deferred_lock);
6010 		if (!hlist_empty(&binder_deferred_list)) {
6011 			proc = hlist_entry(binder_deferred_list.first,
6012 					struct binder_proc, deferred_work_node);
6013 			hlist_del_init(&proc->deferred_work_node);
6014 			defer = proc->deferred_work;
6015 			proc->deferred_work = 0;
6016 		} else {
6017 			proc = NULL;
6018 			defer = 0;
6019 		}
6020 		mutex_unlock(&binder_deferred_lock);
6021 
6022 		if (defer & BINDER_DEFERRED_FLUSH)
6023 			binder_deferred_flush(proc);
6024 
6025 		if (defer & BINDER_DEFERRED_RELEASE)
6026 			binder_deferred_release(proc); /* frees proc */
6027 	} while (proc);
6028 }
6029 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6030 
6031 static void
6032 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6033 {
6034 	mutex_lock(&binder_deferred_lock);
6035 	proc->deferred_work |= defer;
6036 	if (hlist_unhashed(&proc->deferred_work_node)) {
6037 		hlist_add_head(&proc->deferred_work_node,
6038 				&binder_deferred_list);
6039 		schedule_work(&binder_deferred_work);
6040 	}
6041 	mutex_unlock(&binder_deferred_lock);
6042 }
6043 
6044 static void print_binder_transaction_ilocked(struct seq_file *m,
6045 					     struct binder_proc *proc,
6046 					     const char *prefix,
6047 					     struct binder_transaction *t)
6048 {
6049 	struct binder_proc *to_proc;
6050 	struct binder_buffer *buffer = t->buffer;
6051 	ktime_t current_time = ktime_get();
6052 
6053 	spin_lock(&t->lock);
6054 	to_proc = t->to_proc;
6055 	seq_printf(m,
6056 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6057 		   prefix, t->debug_id, t,
6058 		   t->from_pid,
6059 		   t->from_tid,
6060 		   to_proc ? to_proc->pid : 0,
6061 		   t->to_thread ? t->to_thread->pid : 0,
6062 		   t->code, t->flags, t->priority, t->need_reply,
6063 		   ktime_ms_delta(current_time, t->start_time));
6064 	spin_unlock(&t->lock);
6065 
6066 	if (proc != to_proc) {
6067 		/*
6068 		 * Can only safely deref buffer if we are holding the
6069 		 * correct proc inner lock for this node
6070 		 */
6071 		seq_puts(m, "\n");
6072 		return;
6073 	}
6074 
6075 	if (buffer == NULL) {
6076 		seq_puts(m, " buffer free\n");
6077 		return;
6078 	}
6079 	if (buffer->target_node)
6080 		seq_printf(m, " node %d", buffer->target_node->debug_id);
6081 	seq_printf(m, " size %zd:%zd offset %lx\n",
6082 		   buffer->data_size, buffer->offsets_size,
6083 		   proc->alloc.buffer - buffer->user_data);
6084 }
6085 
6086 static void print_binder_work_ilocked(struct seq_file *m,
6087 				     struct binder_proc *proc,
6088 				     const char *prefix,
6089 				     const char *transaction_prefix,
6090 				     struct binder_work *w)
6091 {
6092 	struct binder_node *node;
6093 	struct binder_transaction *t;
6094 
6095 	switch (w->type) {
6096 	case BINDER_WORK_TRANSACTION:
6097 		t = container_of(w, struct binder_transaction, work);
6098 		print_binder_transaction_ilocked(
6099 				m, proc, transaction_prefix, t);
6100 		break;
6101 	case BINDER_WORK_RETURN_ERROR: {
6102 		struct binder_error *e = container_of(
6103 				w, struct binder_error, work);
6104 
6105 		seq_printf(m, "%stransaction error: %u\n",
6106 			   prefix, e->cmd);
6107 	} break;
6108 	case BINDER_WORK_TRANSACTION_COMPLETE:
6109 		seq_printf(m, "%stransaction complete\n", prefix);
6110 		break;
6111 	case BINDER_WORK_NODE:
6112 		node = container_of(w, struct binder_node, work);
6113 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6114 			   prefix, node->debug_id,
6115 			   (u64)node->ptr, (u64)node->cookie);
6116 		break;
6117 	case BINDER_WORK_DEAD_BINDER:
6118 		seq_printf(m, "%shas dead binder\n", prefix);
6119 		break;
6120 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6121 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6122 		break;
6123 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6124 		seq_printf(m, "%shas cleared death notification\n", prefix);
6125 		break;
6126 	default:
6127 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6128 		break;
6129 	}
6130 }
6131 
6132 static void print_binder_thread_ilocked(struct seq_file *m,
6133 					struct binder_thread *thread,
6134 					int print_always)
6135 {
6136 	struct binder_transaction *t;
6137 	struct binder_work *w;
6138 	size_t start_pos = m->count;
6139 	size_t header_pos;
6140 
6141 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6142 			thread->pid, thread->looper,
6143 			thread->looper_need_return,
6144 			atomic_read(&thread->tmp_ref));
6145 	header_pos = m->count;
6146 	t = thread->transaction_stack;
6147 	while (t) {
6148 		if (t->from == thread) {
6149 			print_binder_transaction_ilocked(m, thread->proc,
6150 					"    outgoing transaction", t);
6151 			t = t->from_parent;
6152 		} else if (t->to_thread == thread) {
6153 			print_binder_transaction_ilocked(m, thread->proc,
6154 						 "    incoming transaction", t);
6155 			t = t->to_parent;
6156 		} else {
6157 			print_binder_transaction_ilocked(m, thread->proc,
6158 					"    bad transaction", t);
6159 			t = NULL;
6160 		}
6161 	}
6162 	list_for_each_entry(w, &thread->todo, entry) {
6163 		print_binder_work_ilocked(m, thread->proc, "    ",
6164 					  "    pending transaction", w);
6165 	}
6166 	if (!print_always && m->count == header_pos)
6167 		m->count = start_pos;
6168 }
6169 
6170 static void print_binder_node_nilocked(struct seq_file *m,
6171 				       struct binder_node *node)
6172 {
6173 	struct binder_ref *ref;
6174 	struct binder_work *w;
6175 	int count;
6176 
6177 	count = hlist_count_nodes(&node->refs);
6178 
6179 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6180 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
6181 		   node->has_strong_ref, node->has_weak_ref,
6182 		   node->local_strong_refs, node->local_weak_refs,
6183 		   node->internal_strong_refs, count, node->tmp_refs);
6184 	if (count) {
6185 		seq_puts(m, " proc");
6186 		hlist_for_each_entry(ref, &node->refs, node_entry)
6187 			seq_printf(m, " %d", ref->proc->pid);
6188 	}
6189 	seq_puts(m, "\n");
6190 	if (node->proc) {
6191 		list_for_each_entry(w, &node->async_todo, entry)
6192 			print_binder_work_ilocked(m, node->proc, "    ",
6193 					  "    pending async transaction", w);
6194 	}
6195 }
6196 
6197 static void print_binder_ref_olocked(struct seq_file *m,
6198 				     struct binder_ref *ref)
6199 {
6200 	binder_node_lock(ref->node);
6201 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6202 		   ref->data.debug_id, ref->data.desc,
6203 		   ref->node->proc ? "" : "dead ",
6204 		   ref->node->debug_id, ref->data.strong,
6205 		   ref->data.weak, ref->death);
6206 	binder_node_unlock(ref->node);
6207 }
6208 
6209 static void print_binder_proc(struct seq_file *m,
6210 			      struct binder_proc *proc, int print_all)
6211 {
6212 	struct binder_work *w;
6213 	struct rb_node *n;
6214 	size_t start_pos = m->count;
6215 	size_t header_pos;
6216 	struct binder_node *last_node = NULL;
6217 
6218 	seq_printf(m, "proc %d\n", proc->pid);
6219 	seq_printf(m, "context %s\n", proc->context->name);
6220 	header_pos = m->count;
6221 
6222 	binder_inner_proc_lock(proc);
6223 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6224 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6225 						rb_node), print_all);
6226 
6227 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6228 		struct binder_node *node = rb_entry(n, struct binder_node,
6229 						    rb_node);
6230 		if (!print_all && !node->has_async_transaction)
6231 			continue;
6232 
6233 		/*
6234 		 * take a temporary reference on the node so it
6235 		 * survives and isn't removed from the tree
6236 		 * while we print it.
6237 		 */
6238 		binder_inc_node_tmpref_ilocked(node);
6239 		/* Need to drop inner lock to take node lock */
6240 		binder_inner_proc_unlock(proc);
6241 		if (last_node)
6242 			binder_put_node(last_node);
6243 		binder_node_inner_lock(node);
6244 		print_binder_node_nilocked(m, node);
6245 		binder_node_inner_unlock(node);
6246 		last_node = node;
6247 		binder_inner_proc_lock(proc);
6248 	}
6249 	binder_inner_proc_unlock(proc);
6250 	if (last_node)
6251 		binder_put_node(last_node);
6252 
6253 	if (print_all) {
6254 		binder_proc_lock(proc);
6255 		for (n = rb_first(&proc->refs_by_desc);
6256 		     n != NULL;
6257 		     n = rb_next(n))
6258 			print_binder_ref_olocked(m, rb_entry(n,
6259 							    struct binder_ref,
6260 							    rb_node_desc));
6261 		binder_proc_unlock(proc);
6262 	}
6263 	binder_alloc_print_allocated(m, &proc->alloc);
6264 	binder_inner_proc_lock(proc);
6265 	list_for_each_entry(w, &proc->todo, entry)
6266 		print_binder_work_ilocked(m, proc, "  ",
6267 					  "  pending transaction", w);
6268 	list_for_each_entry(w, &proc->delivered_death, entry) {
6269 		seq_puts(m, "  has delivered dead binder\n");
6270 		break;
6271 	}
6272 	binder_inner_proc_unlock(proc);
6273 	if (!print_all && m->count == header_pos)
6274 		m->count = start_pos;
6275 }
6276 
6277 static const char * const binder_return_strings[] = {
6278 	"BR_ERROR",
6279 	"BR_OK",
6280 	"BR_TRANSACTION",
6281 	"BR_REPLY",
6282 	"BR_ACQUIRE_RESULT",
6283 	"BR_DEAD_REPLY",
6284 	"BR_TRANSACTION_COMPLETE",
6285 	"BR_INCREFS",
6286 	"BR_ACQUIRE",
6287 	"BR_RELEASE",
6288 	"BR_DECREFS",
6289 	"BR_ATTEMPT_ACQUIRE",
6290 	"BR_NOOP",
6291 	"BR_SPAWN_LOOPER",
6292 	"BR_FINISHED",
6293 	"BR_DEAD_BINDER",
6294 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6295 	"BR_FAILED_REPLY",
6296 	"BR_FROZEN_REPLY",
6297 	"BR_ONEWAY_SPAM_SUSPECT",
6298 	"BR_TRANSACTION_PENDING_FROZEN"
6299 };
6300 
6301 static const char * const binder_command_strings[] = {
6302 	"BC_TRANSACTION",
6303 	"BC_REPLY",
6304 	"BC_ACQUIRE_RESULT",
6305 	"BC_FREE_BUFFER",
6306 	"BC_INCREFS",
6307 	"BC_ACQUIRE",
6308 	"BC_RELEASE",
6309 	"BC_DECREFS",
6310 	"BC_INCREFS_DONE",
6311 	"BC_ACQUIRE_DONE",
6312 	"BC_ATTEMPT_ACQUIRE",
6313 	"BC_REGISTER_LOOPER",
6314 	"BC_ENTER_LOOPER",
6315 	"BC_EXIT_LOOPER",
6316 	"BC_REQUEST_DEATH_NOTIFICATION",
6317 	"BC_CLEAR_DEATH_NOTIFICATION",
6318 	"BC_DEAD_BINDER_DONE",
6319 	"BC_TRANSACTION_SG",
6320 	"BC_REPLY_SG",
6321 };
6322 
6323 static const char * const binder_objstat_strings[] = {
6324 	"proc",
6325 	"thread",
6326 	"node",
6327 	"ref",
6328 	"death",
6329 	"transaction",
6330 	"transaction_complete"
6331 };
6332 
6333 static void print_binder_stats(struct seq_file *m, const char *prefix,
6334 			       struct binder_stats *stats)
6335 {
6336 	int i;
6337 
6338 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6339 		     ARRAY_SIZE(binder_command_strings));
6340 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6341 		int temp = atomic_read(&stats->bc[i]);
6342 
6343 		if (temp)
6344 			seq_printf(m, "%s%s: %d\n", prefix,
6345 				   binder_command_strings[i], temp);
6346 	}
6347 
6348 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6349 		     ARRAY_SIZE(binder_return_strings));
6350 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6351 		int temp = atomic_read(&stats->br[i]);
6352 
6353 		if (temp)
6354 			seq_printf(m, "%s%s: %d\n", prefix,
6355 				   binder_return_strings[i], temp);
6356 	}
6357 
6358 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6359 		     ARRAY_SIZE(binder_objstat_strings));
6360 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6361 		     ARRAY_SIZE(stats->obj_deleted));
6362 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6363 		int created = atomic_read(&stats->obj_created[i]);
6364 		int deleted = atomic_read(&stats->obj_deleted[i]);
6365 
6366 		if (created || deleted)
6367 			seq_printf(m, "%s%s: active %d total %d\n",
6368 				prefix,
6369 				binder_objstat_strings[i],
6370 				created - deleted,
6371 				created);
6372 	}
6373 }
6374 
6375 static void print_binder_proc_stats(struct seq_file *m,
6376 				    struct binder_proc *proc)
6377 {
6378 	struct binder_work *w;
6379 	struct binder_thread *thread;
6380 	struct rb_node *n;
6381 	int count, strong, weak, ready_threads;
6382 	size_t free_async_space =
6383 		binder_alloc_get_free_async_space(&proc->alloc);
6384 
6385 	seq_printf(m, "proc %d\n", proc->pid);
6386 	seq_printf(m, "context %s\n", proc->context->name);
6387 	count = 0;
6388 	ready_threads = 0;
6389 	binder_inner_proc_lock(proc);
6390 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6391 		count++;
6392 
6393 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6394 		ready_threads++;
6395 
6396 	seq_printf(m, "  threads: %d\n", count);
6397 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6398 			"  ready threads %d\n"
6399 			"  free async space %zd\n", proc->requested_threads,
6400 			proc->requested_threads_started, proc->max_threads,
6401 			ready_threads,
6402 			free_async_space);
6403 	count = 0;
6404 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6405 		count++;
6406 	binder_inner_proc_unlock(proc);
6407 	seq_printf(m, "  nodes: %d\n", count);
6408 	count = 0;
6409 	strong = 0;
6410 	weak = 0;
6411 	binder_proc_lock(proc);
6412 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6413 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6414 						  rb_node_desc);
6415 		count++;
6416 		strong += ref->data.strong;
6417 		weak += ref->data.weak;
6418 	}
6419 	binder_proc_unlock(proc);
6420 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6421 
6422 	count = binder_alloc_get_allocated_count(&proc->alloc);
6423 	seq_printf(m, "  buffers: %d\n", count);
6424 
6425 	binder_alloc_print_pages(m, &proc->alloc);
6426 
6427 	count = 0;
6428 	binder_inner_proc_lock(proc);
6429 	list_for_each_entry(w, &proc->todo, entry) {
6430 		if (w->type == BINDER_WORK_TRANSACTION)
6431 			count++;
6432 	}
6433 	binder_inner_proc_unlock(proc);
6434 	seq_printf(m, "  pending transactions: %d\n", count);
6435 
6436 	print_binder_stats(m, "  ", &proc->stats);
6437 }
6438 
6439 static int state_show(struct seq_file *m, void *unused)
6440 {
6441 	struct binder_proc *proc;
6442 	struct binder_node *node;
6443 	struct binder_node *last_node = NULL;
6444 
6445 	seq_puts(m, "binder state:\n");
6446 
6447 	spin_lock(&binder_dead_nodes_lock);
6448 	if (!hlist_empty(&binder_dead_nodes))
6449 		seq_puts(m, "dead nodes:\n");
6450 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6451 		/*
6452 		 * take a temporary reference on the node so it
6453 		 * survives and isn't removed from the list
6454 		 * while we print it.
6455 		 */
6456 		node->tmp_refs++;
6457 		spin_unlock(&binder_dead_nodes_lock);
6458 		if (last_node)
6459 			binder_put_node(last_node);
6460 		binder_node_lock(node);
6461 		print_binder_node_nilocked(m, node);
6462 		binder_node_unlock(node);
6463 		last_node = node;
6464 		spin_lock(&binder_dead_nodes_lock);
6465 	}
6466 	spin_unlock(&binder_dead_nodes_lock);
6467 	if (last_node)
6468 		binder_put_node(last_node);
6469 
6470 	mutex_lock(&binder_procs_lock);
6471 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6472 		print_binder_proc(m, proc, 1);
6473 	mutex_unlock(&binder_procs_lock);
6474 
6475 	return 0;
6476 }
6477 
6478 static int stats_show(struct seq_file *m, void *unused)
6479 {
6480 	struct binder_proc *proc;
6481 
6482 	seq_puts(m, "binder stats:\n");
6483 
6484 	print_binder_stats(m, "", &binder_stats);
6485 
6486 	mutex_lock(&binder_procs_lock);
6487 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6488 		print_binder_proc_stats(m, proc);
6489 	mutex_unlock(&binder_procs_lock);
6490 
6491 	return 0;
6492 }
6493 
6494 static int transactions_show(struct seq_file *m, void *unused)
6495 {
6496 	struct binder_proc *proc;
6497 
6498 	seq_puts(m, "binder transactions:\n");
6499 	mutex_lock(&binder_procs_lock);
6500 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6501 		print_binder_proc(m, proc, 0);
6502 	mutex_unlock(&binder_procs_lock);
6503 
6504 	return 0;
6505 }
6506 
6507 static int proc_show(struct seq_file *m, void *unused)
6508 {
6509 	struct binder_proc *itr;
6510 	int pid = (unsigned long)m->private;
6511 
6512 	mutex_lock(&binder_procs_lock);
6513 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6514 		if (itr->pid == pid) {
6515 			seq_puts(m, "binder proc state:\n");
6516 			print_binder_proc(m, itr, 1);
6517 		}
6518 	}
6519 	mutex_unlock(&binder_procs_lock);
6520 
6521 	return 0;
6522 }
6523 
6524 static void print_binder_transaction_log_entry(struct seq_file *m,
6525 					struct binder_transaction_log_entry *e)
6526 {
6527 	int debug_id = READ_ONCE(e->debug_id_done);
6528 	/*
6529 	 * read barrier to guarantee debug_id_done read before
6530 	 * we print the log values
6531 	 */
6532 	smp_rmb();
6533 	seq_printf(m,
6534 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6535 		   e->debug_id, (e->call_type == 2) ? "reply" :
6536 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6537 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6538 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6539 		   e->return_error, e->return_error_param,
6540 		   e->return_error_line);
6541 	/*
6542 	 * read-barrier to guarantee read of debug_id_done after
6543 	 * done printing the fields of the entry
6544 	 */
6545 	smp_rmb();
6546 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6547 			"\n" : " (incomplete)\n");
6548 }
6549 
6550 static int transaction_log_show(struct seq_file *m, void *unused)
6551 {
6552 	struct binder_transaction_log *log = m->private;
6553 	unsigned int log_cur = atomic_read(&log->cur);
6554 	unsigned int count;
6555 	unsigned int cur;
6556 	int i;
6557 
6558 	count = log_cur + 1;
6559 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6560 		0 : count % ARRAY_SIZE(log->entry);
6561 	if (count > ARRAY_SIZE(log->entry) || log->full)
6562 		count = ARRAY_SIZE(log->entry);
6563 	for (i = 0; i < count; i++) {
6564 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6565 
6566 		print_binder_transaction_log_entry(m, &log->entry[index]);
6567 	}
6568 	return 0;
6569 }
6570 
6571 const struct file_operations binder_fops = {
6572 	.owner = THIS_MODULE,
6573 	.poll = binder_poll,
6574 	.unlocked_ioctl = binder_ioctl,
6575 	.compat_ioctl = compat_ptr_ioctl,
6576 	.mmap = binder_mmap,
6577 	.open = binder_open,
6578 	.flush = binder_flush,
6579 	.release = binder_release,
6580 };
6581 
6582 DEFINE_SHOW_ATTRIBUTE(state);
6583 DEFINE_SHOW_ATTRIBUTE(stats);
6584 DEFINE_SHOW_ATTRIBUTE(transactions);
6585 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6586 
6587 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6588 	{
6589 		.name = "state",
6590 		.mode = 0444,
6591 		.fops = &state_fops,
6592 		.data = NULL,
6593 	},
6594 	{
6595 		.name = "stats",
6596 		.mode = 0444,
6597 		.fops = &stats_fops,
6598 		.data = NULL,
6599 	},
6600 	{
6601 		.name = "transactions",
6602 		.mode = 0444,
6603 		.fops = &transactions_fops,
6604 		.data = NULL,
6605 	},
6606 	{
6607 		.name = "transaction_log",
6608 		.mode = 0444,
6609 		.fops = &transaction_log_fops,
6610 		.data = &binder_transaction_log,
6611 	},
6612 	{
6613 		.name = "failed_transaction_log",
6614 		.mode = 0444,
6615 		.fops = &transaction_log_fops,
6616 		.data = &binder_transaction_log_failed,
6617 	},
6618 	{} /* terminator */
6619 };
6620 
6621 static int __init init_binder_device(const char *name)
6622 {
6623 	int ret;
6624 	struct binder_device *binder_device;
6625 
6626 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6627 	if (!binder_device)
6628 		return -ENOMEM;
6629 
6630 	binder_device->miscdev.fops = &binder_fops;
6631 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6632 	binder_device->miscdev.name = name;
6633 
6634 	refcount_set(&binder_device->ref, 1);
6635 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6636 	binder_device->context.name = name;
6637 	mutex_init(&binder_device->context.context_mgr_node_lock);
6638 
6639 	ret = misc_register(&binder_device->miscdev);
6640 	if (ret < 0) {
6641 		kfree(binder_device);
6642 		return ret;
6643 	}
6644 
6645 	hlist_add_head(&binder_device->hlist, &binder_devices);
6646 
6647 	return ret;
6648 }
6649 
6650 static int __init binder_init(void)
6651 {
6652 	int ret;
6653 	char *device_name, *device_tmp;
6654 	struct binder_device *device;
6655 	struct hlist_node *tmp;
6656 	char *device_names = NULL;
6657 	const struct binder_debugfs_entry *db_entry;
6658 
6659 	ret = binder_alloc_shrinker_init();
6660 	if (ret)
6661 		return ret;
6662 
6663 	atomic_set(&binder_transaction_log.cur, ~0U);
6664 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6665 
6666 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6667 
6668 	binder_for_each_debugfs_entry(db_entry)
6669 		debugfs_create_file(db_entry->name,
6670 					db_entry->mode,
6671 					binder_debugfs_dir_entry_root,
6672 					db_entry->data,
6673 					db_entry->fops);
6674 
6675 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6676 						binder_debugfs_dir_entry_root);
6677 
6678 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6679 	    strcmp(binder_devices_param, "") != 0) {
6680 		/*
6681 		* Copy the module_parameter string, because we don't want to
6682 		* tokenize it in-place.
6683 		 */
6684 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6685 		if (!device_names) {
6686 			ret = -ENOMEM;
6687 			goto err_alloc_device_names_failed;
6688 		}
6689 
6690 		device_tmp = device_names;
6691 		while ((device_name = strsep(&device_tmp, ","))) {
6692 			ret = init_binder_device(device_name);
6693 			if (ret)
6694 				goto err_init_binder_device_failed;
6695 		}
6696 	}
6697 
6698 	ret = init_binderfs();
6699 	if (ret)
6700 		goto err_init_binder_device_failed;
6701 
6702 	return ret;
6703 
6704 err_init_binder_device_failed:
6705 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6706 		misc_deregister(&device->miscdev);
6707 		hlist_del(&device->hlist);
6708 		kfree(device);
6709 	}
6710 
6711 	kfree(device_names);
6712 
6713 err_alloc_device_names_failed:
6714 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6715 	binder_alloc_shrinker_exit();
6716 
6717 	return ret;
6718 }
6719 
6720 device_initcall(binder_init);
6721 
6722 #define CREATE_TRACE_POINTS
6723 #include "binder_trace.h"
6724 
6725 MODULE_LICENSE("GPL v2");
6726