xref: /linux/drivers/android/binder.c (revision c717993dd76a1049093af5c262e751d901b8da10)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /* binder.c
3   *
4   * Android IPC Subsystem
5   *
6   * Copyright (C) 2007-2008 Google, Inc.
7   */
8  
9  /*
10   * Locking overview
11   *
12   * There are 3 main spinlocks which must be acquired in the
13   * order shown:
14   *
15   * 1) proc->outer_lock : protects binder_ref
16   *    binder_proc_lock() and binder_proc_unlock() are
17   *    used to acq/rel.
18   * 2) node->lock : protects most fields of binder_node.
19   *    binder_node_lock() and binder_node_unlock() are
20   *    used to acq/rel
21   * 3) proc->inner_lock : protects the thread and node lists
22   *    (proc->threads, proc->waiting_threads, proc->nodes)
23   *    and all todo lists associated with the binder_proc
24   *    (proc->todo, thread->todo, proc->delivered_death and
25   *    node->async_todo), as well as thread->transaction_stack
26   *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27   *    are used to acq/rel
28   *
29   * Any lock under procA must never be nested under any lock at the same
30   * level or below on procB.
31   *
32   * Functions that require a lock held on entry indicate which lock
33   * in the suffix of the function name:
34   *
35   * foo_olocked() : requires node->outer_lock
36   * foo_nlocked() : requires node->lock
37   * foo_ilocked() : requires proc->inner_lock
38   * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39   * foo_nilocked(): requires node->lock and proc->inner_lock
40   * ...
41   */
42  
43  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44  
45  #include <linux/fdtable.h>
46  #include <linux/file.h>
47  #include <linux/freezer.h>
48  #include <linux/fs.h>
49  #include <linux/list.h>
50  #include <linux/miscdevice.h>
51  #include <linux/module.h>
52  #include <linux/mutex.h>
53  #include <linux/nsproxy.h>
54  #include <linux/poll.h>
55  #include <linux/debugfs.h>
56  #include <linux/rbtree.h>
57  #include <linux/sched/signal.h>
58  #include <linux/sched/mm.h>
59  #include <linux/seq_file.h>
60  #include <linux/string.h>
61  #include <linux/uaccess.h>
62  #include <linux/pid_namespace.h>
63  #include <linux/security.h>
64  #include <linux/spinlock.h>
65  #include <linux/ratelimit.h>
66  #include <linux/syscalls.h>
67  #include <linux/task_work.h>
68  #include <linux/sizes.h>
69  
70  #include <uapi/linux/android/binder.h>
71  
72  #include <linux/cacheflush.h>
73  
74  #include "binder_internal.h"
75  #include "binder_trace.h"
76  
77  static HLIST_HEAD(binder_deferred_list);
78  static DEFINE_MUTEX(binder_deferred_lock);
79  
80  static HLIST_HEAD(binder_devices);
81  static HLIST_HEAD(binder_procs);
82  static DEFINE_MUTEX(binder_procs_lock);
83  
84  static HLIST_HEAD(binder_dead_nodes);
85  static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86  
87  static struct dentry *binder_debugfs_dir_entry_root;
88  static struct dentry *binder_debugfs_dir_entry_proc;
89  static atomic_t binder_last_id;
90  
91  static int proc_show(struct seq_file *m, void *unused);
92  DEFINE_SHOW_ATTRIBUTE(proc);
93  
94  #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
95  
96  enum {
97  	BINDER_DEBUG_USER_ERROR             = 1U << 0,
98  	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
99  	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
100  	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
101  	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
102  	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
103  	BINDER_DEBUG_READ_WRITE             = 1U << 6,
104  	BINDER_DEBUG_USER_REFS              = 1U << 7,
105  	BINDER_DEBUG_THREADS                = 1U << 8,
106  	BINDER_DEBUG_TRANSACTION            = 1U << 9,
107  	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
108  	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
109  	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110  	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111  	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112  };
113  static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114  	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115  module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116  
117  char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118  module_param_named(devices, binder_devices_param, charp, 0444);
119  
120  static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121  static int binder_stop_on_user_error;
122  
123  static int binder_set_stop_on_user_error(const char *val,
124  					 const struct kernel_param *kp)
125  {
126  	int ret;
127  
128  	ret = param_set_int(val, kp);
129  	if (binder_stop_on_user_error < 2)
130  		wake_up(&binder_user_error_wait);
131  	return ret;
132  }
133  module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134  	param_get_int, &binder_stop_on_user_error, 0644);
135  
136  #define binder_debug(mask, x...) \
137  	do { \
138  		if (binder_debug_mask & mask) \
139  			pr_info_ratelimited(x); \
140  	} while (0)
141  
142  #define binder_user_error(x...) \
143  	do { \
144  		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145  			pr_info_ratelimited(x); \
146  		if (binder_stop_on_user_error) \
147  			binder_stop_on_user_error = 2; \
148  	} while (0)
149  
150  #define to_flat_binder_object(hdr) \
151  	container_of(hdr, struct flat_binder_object, hdr)
152  
153  #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154  
155  #define to_binder_buffer_object(hdr) \
156  	container_of(hdr, struct binder_buffer_object, hdr)
157  
158  #define to_binder_fd_array_object(hdr) \
159  	container_of(hdr, struct binder_fd_array_object, hdr)
160  
161  static struct binder_stats binder_stats;
162  
163  static inline void binder_stats_deleted(enum binder_stat_types type)
164  {
165  	atomic_inc(&binder_stats.obj_deleted[type]);
166  }
167  
168  static inline void binder_stats_created(enum binder_stat_types type)
169  {
170  	atomic_inc(&binder_stats.obj_created[type]);
171  }
172  
173  struct binder_transaction_log binder_transaction_log;
174  struct binder_transaction_log binder_transaction_log_failed;
175  
176  static struct binder_transaction_log_entry *binder_transaction_log_add(
177  	struct binder_transaction_log *log)
178  {
179  	struct binder_transaction_log_entry *e;
180  	unsigned int cur = atomic_inc_return(&log->cur);
181  
182  	if (cur >= ARRAY_SIZE(log->entry))
183  		log->full = true;
184  	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185  	WRITE_ONCE(e->debug_id_done, 0);
186  	/*
187  	 * write-barrier to synchronize access to e->debug_id_done.
188  	 * We make sure the initialized 0 value is seen before
189  	 * memset() other fields are zeroed by memset.
190  	 */
191  	smp_wmb();
192  	memset(e, 0, sizeof(*e));
193  	return e;
194  }
195  
196  enum binder_deferred_state {
197  	BINDER_DEFERRED_FLUSH        = 0x01,
198  	BINDER_DEFERRED_RELEASE      = 0x02,
199  };
200  
201  enum {
202  	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
203  	BINDER_LOOPER_STATE_ENTERED     = 0x02,
204  	BINDER_LOOPER_STATE_EXITED      = 0x04,
205  	BINDER_LOOPER_STATE_INVALID     = 0x08,
206  	BINDER_LOOPER_STATE_WAITING     = 0x10,
207  	BINDER_LOOPER_STATE_POLL        = 0x20,
208  };
209  
210  /**
211   * binder_proc_lock() - Acquire outer lock for given binder_proc
212   * @proc:         struct binder_proc to acquire
213   *
214   * Acquires proc->outer_lock. Used to protect binder_ref
215   * structures associated with the given proc.
216   */
217  #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218  static void
219  _binder_proc_lock(struct binder_proc *proc, int line)
220  	__acquires(&proc->outer_lock)
221  {
222  	binder_debug(BINDER_DEBUG_SPINLOCKS,
223  		     "%s: line=%d\n", __func__, line);
224  	spin_lock(&proc->outer_lock);
225  }
226  
227  /**
228   * binder_proc_unlock() - Release spinlock for given binder_proc
229   * @proc:         struct binder_proc to acquire
230   *
231   * Release lock acquired via binder_proc_lock()
232   */
233  #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234  static void
235  _binder_proc_unlock(struct binder_proc *proc, int line)
236  	__releases(&proc->outer_lock)
237  {
238  	binder_debug(BINDER_DEBUG_SPINLOCKS,
239  		     "%s: line=%d\n", __func__, line);
240  	spin_unlock(&proc->outer_lock);
241  }
242  
243  /**
244   * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245   * @proc:         struct binder_proc to acquire
246   *
247   * Acquires proc->inner_lock. Used to protect todo lists
248   */
249  #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250  static void
251  _binder_inner_proc_lock(struct binder_proc *proc, int line)
252  	__acquires(&proc->inner_lock)
253  {
254  	binder_debug(BINDER_DEBUG_SPINLOCKS,
255  		     "%s: line=%d\n", __func__, line);
256  	spin_lock(&proc->inner_lock);
257  }
258  
259  /**
260   * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261   * @proc:         struct binder_proc to acquire
262   *
263   * Release lock acquired via binder_inner_proc_lock()
264   */
265  #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266  static void
267  _binder_inner_proc_unlock(struct binder_proc *proc, int line)
268  	__releases(&proc->inner_lock)
269  {
270  	binder_debug(BINDER_DEBUG_SPINLOCKS,
271  		     "%s: line=%d\n", __func__, line);
272  	spin_unlock(&proc->inner_lock);
273  }
274  
275  /**
276   * binder_node_lock() - Acquire spinlock for given binder_node
277   * @node:         struct binder_node to acquire
278   *
279   * Acquires node->lock. Used to protect binder_node fields
280   */
281  #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282  static void
283  _binder_node_lock(struct binder_node *node, int line)
284  	__acquires(&node->lock)
285  {
286  	binder_debug(BINDER_DEBUG_SPINLOCKS,
287  		     "%s: line=%d\n", __func__, line);
288  	spin_lock(&node->lock);
289  }
290  
291  /**
292   * binder_node_unlock() - Release spinlock for given binder_proc
293   * @node:         struct binder_node to acquire
294   *
295   * Release lock acquired via binder_node_lock()
296   */
297  #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298  static void
299  _binder_node_unlock(struct binder_node *node, int line)
300  	__releases(&node->lock)
301  {
302  	binder_debug(BINDER_DEBUG_SPINLOCKS,
303  		     "%s: line=%d\n", __func__, line);
304  	spin_unlock(&node->lock);
305  }
306  
307  /**
308   * binder_node_inner_lock() - Acquire node and inner locks
309   * @node:         struct binder_node to acquire
310   *
311   * Acquires node->lock. If node->proc also acquires
312   * proc->inner_lock. Used to protect binder_node fields
313   */
314  #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315  static void
316  _binder_node_inner_lock(struct binder_node *node, int line)
317  	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
318  {
319  	binder_debug(BINDER_DEBUG_SPINLOCKS,
320  		     "%s: line=%d\n", __func__, line);
321  	spin_lock(&node->lock);
322  	if (node->proc)
323  		binder_inner_proc_lock(node->proc);
324  	else
325  		/* annotation for sparse */
326  		__acquire(&node->proc->inner_lock);
327  }
328  
329  /**
330   * binder_node_unlock() - Release node and inner locks
331   * @node:         struct binder_node to acquire
332   *
333   * Release lock acquired via binder_node_lock()
334   */
335  #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336  static void
337  _binder_node_inner_unlock(struct binder_node *node, int line)
338  	__releases(&node->lock) __releases(&node->proc->inner_lock)
339  {
340  	struct binder_proc *proc = node->proc;
341  
342  	binder_debug(BINDER_DEBUG_SPINLOCKS,
343  		     "%s: line=%d\n", __func__, line);
344  	if (proc)
345  		binder_inner_proc_unlock(proc);
346  	else
347  		/* annotation for sparse */
348  		__release(&node->proc->inner_lock);
349  	spin_unlock(&node->lock);
350  }
351  
352  static bool binder_worklist_empty_ilocked(struct list_head *list)
353  {
354  	return list_empty(list);
355  }
356  
357  /**
358   * binder_worklist_empty() - Check if no items on the work list
359   * @proc:       binder_proc associated with list
360   * @list:	list to check
361   *
362   * Return: true if there are no items on list, else false
363   */
364  static bool binder_worklist_empty(struct binder_proc *proc,
365  				  struct list_head *list)
366  {
367  	bool ret;
368  
369  	binder_inner_proc_lock(proc);
370  	ret = binder_worklist_empty_ilocked(list);
371  	binder_inner_proc_unlock(proc);
372  	return ret;
373  }
374  
375  /**
376   * binder_enqueue_work_ilocked() - Add an item to the work list
377   * @work:         struct binder_work to add to list
378   * @target_list:  list to add work to
379   *
380   * Adds the work to the specified list. Asserts that work
381   * is not already on a list.
382   *
383   * Requires the proc->inner_lock to be held.
384   */
385  static void
386  binder_enqueue_work_ilocked(struct binder_work *work,
387  			   struct list_head *target_list)
388  {
389  	BUG_ON(target_list == NULL);
390  	BUG_ON(work->entry.next && !list_empty(&work->entry));
391  	list_add_tail(&work->entry, target_list);
392  }
393  
394  /**
395   * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396   * @thread:       thread to queue work to
397   * @work:         struct binder_work to add to list
398   *
399   * Adds the work to the todo list of the thread. Doesn't set the process_todo
400   * flag, which means that (if it wasn't already set) the thread will go to
401   * sleep without handling this work when it calls read.
402   *
403   * Requires the proc->inner_lock to be held.
404   */
405  static void
406  binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407  					    struct binder_work *work)
408  {
409  	WARN_ON(!list_empty(&thread->waiting_thread_node));
410  	binder_enqueue_work_ilocked(work, &thread->todo);
411  }
412  
413  /**
414   * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415   * @thread:       thread to queue work to
416   * @work:         struct binder_work to add to list
417   *
418   * Adds the work to the todo list of the thread, and enables processing
419   * of the todo queue.
420   *
421   * Requires the proc->inner_lock to be held.
422   */
423  static void
424  binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425  				   struct binder_work *work)
426  {
427  	WARN_ON(!list_empty(&thread->waiting_thread_node));
428  	binder_enqueue_work_ilocked(work, &thread->todo);
429  	thread->process_todo = true;
430  }
431  
432  /**
433   * binder_enqueue_thread_work() - Add an item to the thread work list
434   * @thread:       thread to queue work to
435   * @work:         struct binder_work to add to list
436   *
437   * Adds the work to the todo list of the thread, and enables processing
438   * of the todo queue.
439   */
440  static void
441  binder_enqueue_thread_work(struct binder_thread *thread,
442  			   struct binder_work *work)
443  {
444  	binder_inner_proc_lock(thread->proc);
445  	binder_enqueue_thread_work_ilocked(thread, work);
446  	binder_inner_proc_unlock(thread->proc);
447  }
448  
449  static void
450  binder_dequeue_work_ilocked(struct binder_work *work)
451  {
452  	list_del_init(&work->entry);
453  }
454  
455  /**
456   * binder_dequeue_work() - Removes an item from the work list
457   * @proc:         binder_proc associated with list
458   * @work:         struct binder_work to remove from list
459   *
460   * Removes the specified work item from whatever list it is on.
461   * Can safely be called if work is not on any list.
462   */
463  static void
464  binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465  {
466  	binder_inner_proc_lock(proc);
467  	binder_dequeue_work_ilocked(work);
468  	binder_inner_proc_unlock(proc);
469  }
470  
471  static struct binder_work *binder_dequeue_work_head_ilocked(
472  					struct list_head *list)
473  {
474  	struct binder_work *w;
475  
476  	w = list_first_entry_or_null(list, struct binder_work, entry);
477  	if (w)
478  		list_del_init(&w->entry);
479  	return w;
480  }
481  
482  static void
483  binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484  static void binder_free_thread(struct binder_thread *thread);
485  static void binder_free_proc(struct binder_proc *proc);
486  static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487  
488  static bool binder_has_work_ilocked(struct binder_thread *thread,
489  				    bool do_proc_work)
490  {
491  	return thread->process_todo ||
492  		thread->looper_need_return ||
493  		(do_proc_work &&
494  		 !binder_worklist_empty_ilocked(&thread->proc->todo));
495  }
496  
497  static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498  {
499  	bool has_work;
500  
501  	binder_inner_proc_lock(thread->proc);
502  	has_work = binder_has_work_ilocked(thread, do_proc_work);
503  	binder_inner_proc_unlock(thread->proc);
504  
505  	return has_work;
506  }
507  
508  static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509  {
510  	return !thread->transaction_stack &&
511  		binder_worklist_empty_ilocked(&thread->todo) &&
512  		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513  				   BINDER_LOOPER_STATE_REGISTERED));
514  }
515  
516  static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517  					       bool sync)
518  {
519  	struct rb_node *n;
520  	struct binder_thread *thread;
521  
522  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523  		thread = rb_entry(n, struct binder_thread, rb_node);
524  		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525  		    binder_available_for_proc_work_ilocked(thread)) {
526  			if (sync)
527  				wake_up_interruptible_sync(&thread->wait);
528  			else
529  				wake_up_interruptible(&thread->wait);
530  		}
531  	}
532  }
533  
534  /**
535   * binder_select_thread_ilocked() - selects a thread for doing proc work.
536   * @proc:	process to select a thread from
537   *
538   * Note that calling this function moves the thread off the waiting_threads
539   * list, so it can only be woken up by the caller of this function, or a
540   * signal. Therefore, callers *should* always wake up the thread this function
541   * returns.
542   *
543   * Return:	If there's a thread currently waiting for process work,
544   *		returns that thread. Otherwise returns NULL.
545   */
546  static struct binder_thread *
547  binder_select_thread_ilocked(struct binder_proc *proc)
548  {
549  	struct binder_thread *thread;
550  
551  	assert_spin_locked(&proc->inner_lock);
552  	thread = list_first_entry_or_null(&proc->waiting_threads,
553  					  struct binder_thread,
554  					  waiting_thread_node);
555  
556  	if (thread)
557  		list_del_init(&thread->waiting_thread_node);
558  
559  	return thread;
560  }
561  
562  /**
563   * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564   * @proc:	process to wake up a thread in
565   * @thread:	specific thread to wake-up (may be NULL)
566   * @sync:	whether to do a synchronous wake-up
567   *
568   * This function wakes up a thread in the @proc process.
569   * The caller may provide a specific thread to wake-up in
570   * the @thread parameter. If @thread is NULL, this function
571   * will wake up threads that have called poll().
572   *
573   * Note that for this function to work as expected, callers
574   * should first call binder_select_thread() to find a thread
575   * to handle the work (if they don't have a thread already),
576   * and pass the result into the @thread parameter.
577   */
578  static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579  					 struct binder_thread *thread,
580  					 bool sync)
581  {
582  	assert_spin_locked(&proc->inner_lock);
583  
584  	if (thread) {
585  		if (sync)
586  			wake_up_interruptible_sync(&thread->wait);
587  		else
588  			wake_up_interruptible(&thread->wait);
589  		return;
590  	}
591  
592  	/* Didn't find a thread waiting for proc work; this can happen
593  	 * in two scenarios:
594  	 * 1. All threads are busy handling transactions
595  	 *    In that case, one of those threads should call back into
596  	 *    the kernel driver soon and pick up this work.
597  	 * 2. Threads are using the (e)poll interface, in which case
598  	 *    they may be blocked on the waitqueue without having been
599  	 *    added to waiting_threads. For this case, we just iterate
600  	 *    over all threads not handling transaction work, and
601  	 *    wake them all up. We wake all because we don't know whether
602  	 *    a thread that called into (e)poll is handling non-binder
603  	 *    work currently.
604  	 */
605  	binder_wakeup_poll_threads_ilocked(proc, sync);
606  }
607  
608  static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609  {
610  	struct binder_thread *thread = binder_select_thread_ilocked(proc);
611  
612  	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
613  }
614  
615  static void binder_set_nice(long nice)
616  {
617  	long min_nice;
618  
619  	if (can_nice(current, nice)) {
620  		set_user_nice(current, nice);
621  		return;
622  	}
623  	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624  	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625  		     "%d: nice value %ld not allowed use %ld instead\n",
626  		      current->pid, nice, min_nice);
627  	set_user_nice(current, min_nice);
628  	if (min_nice <= MAX_NICE)
629  		return;
630  	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631  }
632  
633  static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634  						   binder_uintptr_t ptr)
635  {
636  	struct rb_node *n = proc->nodes.rb_node;
637  	struct binder_node *node;
638  
639  	assert_spin_locked(&proc->inner_lock);
640  
641  	while (n) {
642  		node = rb_entry(n, struct binder_node, rb_node);
643  
644  		if (ptr < node->ptr)
645  			n = n->rb_left;
646  		else if (ptr > node->ptr)
647  			n = n->rb_right;
648  		else {
649  			/*
650  			 * take an implicit weak reference
651  			 * to ensure node stays alive until
652  			 * call to binder_put_node()
653  			 */
654  			binder_inc_node_tmpref_ilocked(node);
655  			return node;
656  		}
657  	}
658  	return NULL;
659  }
660  
661  static struct binder_node *binder_get_node(struct binder_proc *proc,
662  					   binder_uintptr_t ptr)
663  {
664  	struct binder_node *node;
665  
666  	binder_inner_proc_lock(proc);
667  	node = binder_get_node_ilocked(proc, ptr);
668  	binder_inner_proc_unlock(proc);
669  	return node;
670  }
671  
672  static struct binder_node *binder_init_node_ilocked(
673  						struct binder_proc *proc,
674  						struct binder_node *new_node,
675  						struct flat_binder_object *fp)
676  {
677  	struct rb_node **p = &proc->nodes.rb_node;
678  	struct rb_node *parent = NULL;
679  	struct binder_node *node;
680  	binder_uintptr_t ptr = fp ? fp->binder : 0;
681  	binder_uintptr_t cookie = fp ? fp->cookie : 0;
682  	__u32 flags = fp ? fp->flags : 0;
683  
684  	assert_spin_locked(&proc->inner_lock);
685  
686  	while (*p) {
687  
688  		parent = *p;
689  		node = rb_entry(parent, struct binder_node, rb_node);
690  
691  		if (ptr < node->ptr)
692  			p = &(*p)->rb_left;
693  		else if (ptr > node->ptr)
694  			p = &(*p)->rb_right;
695  		else {
696  			/*
697  			 * A matching node is already in
698  			 * the rb tree. Abandon the init
699  			 * and return it.
700  			 */
701  			binder_inc_node_tmpref_ilocked(node);
702  			return node;
703  		}
704  	}
705  	node = new_node;
706  	binder_stats_created(BINDER_STAT_NODE);
707  	node->tmp_refs++;
708  	rb_link_node(&node->rb_node, parent, p);
709  	rb_insert_color(&node->rb_node, &proc->nodes);
710  	node->debug_id = atomic_inc_return(&binder_last_id);
711  	node->proc = proc;
712  	node->ptr = ptr;
713  	node->cookie = cookie;
714  	node->work.type = BINDER_WORK_NODE;
715  	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716  	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717  	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718  	spin_lock_init(&node->lock);
719  	INIT_LIST_HEAD(&node->work.entry);
720  	INIT_LIST_HEAD(&node->async_todo);
721  	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722  		     "%d:%d node %d u%016llx c%016llx created\n",
723  		     proc->pid, current->pid, node->debug_id,
724  		     (u64)node->ptr, (u64)node->cookie);
725  
726  	return node;
727  }
728  
729  static struct binder_node *binder_new_node(struct binder_proc *proc,
730  					   struct flat_binder_object *fp)
731  {
732  	struct binder_node *node;
733  	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734  
735  	if (!new_node)
736  		return NULL;
737  	binder_inner_proc_lock(proc);
738  	node = binder_init_node_ilocked(proc, new_node, fp);
739  	binder_inner_proc_unlock(proc);
740  	if (node != new_node)
741  		/*
742  		 * The node was already added by another thread
743  		 */
744  		kfree(new_node);
745  
746  	return node;
747  }
748  
749  static void binder_free_node(struct binder_node *node)
750  {
751  	kfree(node);
752  	binder_stats_deleted(BINDER_STAT_NODE);
753  }
754  
755  static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756  				    int internal,
757  				    struct list_head *target_list)
758  {
759  	struct binder_proc *proc = node->proc;
760  
761  	assert_spin_locked(&node->lock);
762  	if (proc)
763  		assert_spin_locked(&proc->inner_lock);
764  	if (strong) {
765  		if (internal) {
766  			if (target_list == NULL &&
767  			    node->internal_strong_refs == 0 &&
768  			    !(node->proc &&
769  			      node == node->proc->context->binder_context_mgr_node &&
770  			      node->has_strong_ref)) {
771  				pr_err("invalid inc strong node for %d\n",
772  					node->debug_id);
773  				return -EINVAL;
774  			}
775  			node->internal_strong_refs++;
776  		} else
777  			node->local_strong_refs++;
778  		if (!node->has_strong_ref && target_list) {
779  			struct binder_thread *thread = container_of(target_list,
780  						    struct binder_thread, todo);
781  			binder_dequeue_work_ilocked(&node->work);
782  			BUG_ON(&thread->todo != target_list);
783  			binder_enqueue_deferred_thread_work_ilocked(thread,
784  								   &node->work);
785  		}
786  	} else {
787  		if (!internal)
788  			node->local_weak_refs++;
789  		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790  			if (target_list == NULL) {
791  				pr_err("invalid inc weak node for %d\n",
792  					node->debug_id);
793  				return -EINVAL;
794  			}
795  			/*
796  			 * See comment above
797  			 */
798  			binder_enqueue_work_ilocked(&node->work, target_list);
799  		}
800  	}
801  	return 0;
802  }
803  
804  static int binder_inc_node(struct binder_node *node, int strong, int internal,
805  			   struct list_head *target_list)
806  {
807  	int ret;
808  
809  	binder_node_inner_lock(node);
810  	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811  	binder_node_inner_unlock(node);
812  
813  	return ret;
814  }
815  
816  static bool binder_dec_node_nilocked(struct binder_node *node,
817  				     int strong, int internal)
818  {
819  	struct binder_proc *proc = node->proc;
820  
821  	assert_spin_locked(&node->lock);
822  	if (proc)
823  		assert_spin_locked(&proc->inner_lock);
824  	if (strong) {
825  		if (internal)
826  			node->internal_strong_refs--;
827  		else
828  			node->local_strong_refs--;
829  		if (node->local_strong_refs || node->internal_strong_refs)
830  			return false;
831  	} else {
832  		if (!internal)
833  			node->local_weak_refs--;
834  		if (node->local_weak_refs || node->tmp_refs ||
835  				!hlist_empty(&node->refs))
836  			return false;
837  	}
838  
839  	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840  		if (list_empty(&node->work.entry)) {
841  			binder_enqueue_work_ilocked(&node->work, &proc->todo);
842  			binder_wakeup_proc_ilocked(proc);
843  		}
844  	} else {
845  		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846  		    !node->local_weak_refs && !node->tmp_refs) {
847  			if (proc) {
848  				binder_dequeue_work_ilocked(&node->work);
849  				rb_erase(&node->rb_node, &proc->nodes);
850  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851  					     "refless node %d deleted\n",
852  					     node->debug_id);
853  			} else {
854  				BUG_ON(!list_empty(&node->work.entry));
855  				spin_lock(&binder_dead_nodes_lock);
856  				/*
857  				 * tmp_refs could have changed so
858  				 * check it again
859  				 */
860  				if (node->tmp_refs) {
861  					spin_unlock(&binder_dead_nodes_lock);
862  					return false;
863  				}
864  				hlist_del(&node->dead_node);
865  				spin_unlock(&binder_dead_nodes_lock);
866  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867  					     "dead node %d deleted\n",
868  					     node->debug_id);
869  			}
870  			return true;
871  		}
872  	}
873  	return false;
874  }
875  
876  static void binder_dec_node(struct binder_node *node, int strong, int internal)
877  {
878  	bool free_node;
879  
880  	binder_node_inner_lock(node);
881  	free_node = binder_dec_node_nilocked(node, strong, internal);
882  	binder_node_inner_unlock(node);
883  	if (free_node)
884  		binder_free_node(node);
885  }
886  
887  static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888  {
889  	/*
890  	 * No call to binder_inc_node() is needed since we
891  	 * don't need to inform userspace of any changes to
892  	 * tmp_refs
893  	 */
894  	node->tmp_refs++;
895  }
896  
897  /**
898   * binder_inc_node_tmpref() - take a temporary reference on node
899   * @node:	node to reference
900   *
901   * Take reference on node to prevent the node from being freed
902   * while referenced only by a local variable. The inner lock is
903   * needed to serialize with the node work on the queue (which
904   * isn't needed after the node is dead). If the node is dead
905   * (node->proc is NULL), use binder_dead_nodes_lock to protect
906   * node->tmp_refs against dead-node-only cases where the node
907   * lock cannot be acquired (eg traversing the dead node list to
908   * print nodes)
909   */
910  static void binder_inc_node_tmpref(struct binder_node *node)
911  {
912  	binder_node_lock(node);
913  	if (node->proc)
914  		binder_inner_proc_lock(node->proc);
915  	else
916  		spin_lock(&binder_dead_nodes_lock);
917  	binder_inc_node_tmpref_ilocked(node);
918  	if (node->proc)
919  		binder_inner_proc_unlock(node->proc);
920  	else
921  		spin_unlock(&binder_dead_nodes_lock);
922  	binder_node_unlock(node);
923  }
924  
925  /**
926   * binder_dec_node_tmpref() - remove a temporary reference on node
927   * @node:	node to reference
928   *
929   * Release temporary reference on node taken via binder_inc_node_tmpref()
930   */
931  static void binder_dec_node_tmpref(struct binder_node *node)
932  {
933  	bool free_node;
934  
935  	binder_node_inner_lock(node);
936  	if (!node->proc)
937  		spin_lock(&binder_dead_nodes_lock);
938  	else
939  		__acquire(&binder_dead_nodes_lock);
940  	node->tmp_refs--;
941  	BUG_ON(node->tmp_refs < 0);
942  	if (!node->proc)
943  		spin_unlock(&binder_dead_nodes_lock);
944  	else
945  		__release(&binder_dead_nodes_lock);
946  	/*
947  	 * Call binder_dec_node() to check if all refcounts are 0
948  	 * and cleanup is needed. Calling with strong=0 and internal=1
949  	 * causes no actual reference to be released in binder_dec_node().
950  	 * If that changes, a change is needed here too.
951  	 */
952  	free_node = binder_dec_node_nilocked(node, 0, 1);
953  	binder_node_inner_unlock(node);
954  	if (free_node)
955  		binder_free_node(node);
956  }
957  
958  static void binder_put_node(struct binder_node *node)
959  {
960  	binder_dec_node_tmpref(node);
961  }
962  
963  static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964  						 u32 desc, bool need_strong_ref)
965  {
966  	struct rb_node *n = proc->refs_by_desc.rb_node;
967  	struct binder_ref *ref;
968  
969  	while (n) {
970  		ref = rb_entry(n, struct binder_ref, rb_node_desc);
971  
972  		if (desc < ref->data.desc) {
973  			n = n->rb_left;
974  		} else if (desc > ref->data.desc) {
975  			n = n->rb_right;
976  		} else if (need_strong_ref && !ref->data.strong) {
977  			binder_user_error("tried to use weak ref as strong ref\n");
978  			return NULL;
979  		} else {
980  			return ref;
981  		}
982  	}
983  	return NULL;
984  }
985  
986  /**
987   * binder_get_ref_for_node_olocked() - get the ref associated with given node
988   * @proc:	binder_proc that owns the ref
989   * @node:	binder_node of target
990   * @new_ref:	newly allocated binder_ref to be initialized or %NULL
991   *
992   * Look up the ref for the given node and return it if it exists
993   *
994   * If it doesn't exist and the caller provides a newly allocated
995   * ref, initialize the fields of the newly allocated ref and insert
996   * into the given proc rb_trees and node refs list.
997   *
998   * Return:	the ref for node. It is possible that another thread
999   *		allocated/initialized the ref first in which case the
1000   *		returned ref would be different than the passed-in
1001   *		new_ref. new_ref must be kfree'd by the caller in
1002   *		this case.
1003   */
1004  static struct binder_ref *binder_get_ref_for_node_olocked(
1005  					struct binder_proc *proc,
1006  					struct binder_node *node,
1007  					struct binder_ref *new_ref)
1008  {
1009  	struct binder_context *context = proc->context;
1010  	struct rb_node **p = &proc->refs_by_node.rb_node;
1011  	struct rb_node *parent = NULL;
1012  	struct binder_ref *ref;
1013  	struct rb_node *n;
1014  
1015  	while (*p) {
1016  		parent = *p;
1017  		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018  
1019  		if (node < ref->node)
1020  			p = &(*p)->rb_left;
1021  		else if (node > ref->node)
1022  			p = &(*p)->rb_right;
1023  		else
1024  			return ref;
1025  	}
1026  	if (!new_ref)
1027  		return NULL;
1028  
1029  	binder_stats_created(BINDER_STAT_REF);
1030  	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031  	new_ref->proc = proc;
1032  	new_ref->node = node;
1033  	rb_link_node(&new_ref->rb_node_node, parent, p);
1034  	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035  
1036  	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037  	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038  		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039  		if (ref->data.desc > new_ref->data.desc)
1040  			break;
1041  		new_ref->data.desc = ref->data.desc + 1;
1042  	}
1043  
1044  	p = &proc->refs_by_desc.rb_node;
1045  	while (*p) {
1046  		parent = *p;
1047  		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048  
1049  		if (new_ref->data.desc < ref->data.desc)
1050  			p = &(*p)->rb_left;
1051  		else if (new_ref->data.desc > ref->data.desc)
1052  			p = &(*p)->rb_right;
1053  		else
1054  			BUG();
1055  	}
1056  	rb_link_node(&new_ref->rb_node_desc, parent, p);
1057  	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058  
1059  	binder_node_lock(node);
1060  	hlist_add_head(&new_ref->node_entry, &node->refs);
1061  
1062  	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063  		     "%d new ref %d desc %d for node %d\n",
1064  		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065  		      node->debug_id);
1066  	binder_node_unlock(node);
1067  	return new_ref;
1068  }
1069  
1070  static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071  {
1072  	bool delete_node = false;
1073  
1074  	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075  		     "%d delete ref %d desc %d for node %d\n",
1076  		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077  		      ref->node->debug_id);
1078  
1079  	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080  	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081  
1082  	binder_node_inner_lock(ref->node);
1083  	if (ref->data.strong)
1084  		binder_dec_node_nilocked(ref->node, 1, 1);
1085  
1086  	hlist_del(&ref->node_entry);
1087  	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088  	binder_node_inner_unlock(ref->node);
1089  	/*
1090  	 * Clear ref->node unless we want the caller to free the node
1091  	 */
1092  	if (!delete_node) {
1093  		/*
1094  		 * The caller uses ref->node to determine
1095  		 * whether the node needs to be freed. Clear
1096  		 * it since the node is still alive.
1097  		 */
1098  		ref->node = NULL;
1099  	}
1100  
1101  	if (ref->death) {
1102  		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103  			     "%d delete ref %d desc %d has death notification\n",
1104  			      ref->proc->pid, ref->data.debug_id,
1105  			      ref->data.desc);
1106  		binder_dequeue_work(ref->proc, &ref->death->work);
1107  		binder_stats_deleted(BINDER_STAT_DEATH);
1108  	}
1109  	binder_stats_deleted(BINDER_STAT_REF);
1110  }
1111  
1112  /**
1113   * binder_inc_ref_olocked() - increment the ref for given handle
1114   * @ref:         ref to be incremented
1115   * @strong:      if true, strong increment, else weak
1116   * @target_list: list to queue node work on
1117   *
1118   * Increment the ref. @ref->proc->outer_lock must be held on entry
1119   *
1120   * Return: 0, if successful, else errno
1121   */
1122  static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123  				  struct list_head *target_list)
1124  {
1125  	int ret;
1126  
1127  	if (strong) {
1128  		if (ref->data.strong == 0) {
1129  			ret = binder_inc_node(ref->node, 1, 1, target_list);
1130  			if (ret)
1131  				return ret;
1132  		}
1133  		ref->data.strong++;
1134  	} else {
1135  		if (ref->data.weak == 0) {
1136  			ret = binder_inc_node(ref->node, 0, 1, target_list);
1137  			if (ret)
1138  				return ret;
1139  		}
1140  		ref->data.weak++;
1141  	}
1142  	return 0;
1143  }
1144  
1145  /**
1146   * binder_dec_ref() - dec the ref for given handle
1147   * @ref:	ref to be decremented
1148   * @strong:	if true, strong decrement, else weak
1149   *
1150   * Decrement the ref.
1151   *
1152   * Return: true if ref is cleaned up and ready to be freed
1153   */
1154  static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155  {
1156  	if (strong) {
1157  		if (ref->data.strong == 0) {
1158  			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159  					  ref->proc->pid, ref->data.debug_id,
1160  					  ref->data.desc, ref->data.strong,
1161  					  ref->data.weak);
1162  			return false;
1163  		}
1164  		ref->data.strong--;
1165  		if (ref->data.strong == 0)
1166  			binder_dec_node(ref->node, strong, 1);
1167  	} else {
1168  		if (ref->data.weak == 0) {
1169  			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170  					  ref->proc->pid, ref->data.debug_id,
1171  					  ref->data.desc, ref->data.strong,
1172  					  ref->data.weak);
1173  			return false;
1174  		}
1175  		ref->data.weak--;
1176  	}
1177  	if (ref->data.strong == 0 && ref->data.weak == 0) {
1178  		binder_cleanup_ref_olocked(ref);
1179  		return true;
1180  	}
1181  	return false;
1182  }
1183  
1184  /**
1185   * binder_get_node_from_ref() - get the node from the given proc/desc
1186   * @proc:	proc containing the ref
1187   * @desc:	the handle associated with the ref
1188   * @need_strong_ref: if true, only return node if ref is strong
1189   * @rdata:	the id/refcount data for the ref
1190   *
1191   * Given a proc and ref handle, return the associated binder_node
1192   *
1193   * Return: a binder_node or NULL if not found or not strong when strong required
1194   */
1195  static struct binder_node *binder_get_node_from_ref(
1196  		struct binder_proc *proc,
1197  		u32 desc, bool need_strong_ref,
1198  		struct binder_ref_data *rdata)
1199  {
1200  	struct binder_node *node;
1201  	struct binder_ref *ref;
1202  
1203  	binder_proc_lock(proc);
1204  	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205  	if (!ref)
1206  		goto err_no_ref;
1207  	node = ref->node;
1208  	/*
1209  	 * Take an implicit reference on the node to ensure
1210  	 * it stays alive until the call to binder_put_node()
1211  	 */
1212  	binder_inc_node_tmpref(node);
1213  	if (rdata)
1214  		*rdata = ref->data;
1215  	binder_proc_unlock(proc);
1216  
1217  	return node;
1218  
1219  err_no_ref:
1220  	binder_proc_unlock(proc);
1221  	return NULL;
1222  }
1223  
1224  /**
1225   * binder_free_ref() - free the binder_ref
1226   * @ref:	ref to free
1227   *
1228   * Free the binder_ref. Free the binder_node indicated by ref->node
1229   * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230   */
1231  static void binder_free_ref(struct binder_ref *ref)
1232  {
1233  	if (ref->node)
1234  		binder_free_node(ref->node);
1235  	kfree(ref->death);
1236  	kfree(ref);
1237  }
1238  
1239  /**
1240   * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241   * @proc:	proc containing the ref
1242   * @desc:	the handle associated with the ref
1243   * @increment:	true=inc reference, false=dec reference
1244   * @strong:	true=strong reference, false=weak reference
1245   * @rdata:	the id/refcount data for the ref
1246   *
1247   * Given a proc and ref handle, increment or decrement the ref
1248   * according to "increment" arg.
1249   *
1250   * Return: 0 if successful, else errno
1251   */
1252  static int binder_update_ref_for_handle(struct binder_proc *proc,
1253  		uint32_t desc, bool increment, bool strong,
1254  		struct binder_ref_data *rdata)
1255  {
1256  	int ret = 0;
1257  	struct binder_ref *ref;
1258  	bool delete_ref = false;
1259  
1260  	binder_proc_lock(proc);
1261  	ref = binder_get_ref_olocked(proc, desc, strong);
1262  	if (!ref) {
1263  		ret = -EINVAL;
1264  		goto err_no_ref;
1265  	}
1266  	if (increment)
1267  		ret = binder_inc_ref_olocked(ref, strong, NULL);
1268  	else
1269  		delete_ref = binder_dec_ref_olocked(ref, strong);
1270  
1271  	if (rdata)
1272  		*rdata = ref->data;
1273  	binder_proc_unlock(proc);
1274  
1275  	if (delete_ref)
1276  		binder_free_ref(ref);
1277  	return ret;
1278  
1279  err_no_ref:
1280  	binder_proc_unlock(proc);
1281  	return ret;
1282  }
1283  
1284  /**
1285   * binder_dec_ref_for_handle() - dec the ref for given handle
1286   * @proc:	proc containing the ref
1287   * @desc:	the handle associated with the ref
1288   * @strong:	true=strong reference, false=weak reference
1289   * @rdata:	the id/refcount data for the ref
1290   *
1291   * Just calls binder_update_ref_for_handle() to decrement the ref.
1292   *
1293   * Return: 0 if successful, else errno
1294   */
1295  static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296  		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297  {
1298  	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299  }
1300  
1301  
1302  /**
1303   * binder_inc_ref_for_node() - increment the ref for given proc/node
1304   * @proc:	 proc containing the ref
1305   * @node:	 target node
1306   * @strong:	 true=strong reference, false=weak reference
1307   * @target_list: worklist to use if node is incremented
1308   * @rdata:	 the id/refcount data for the ref
1309   *
1310   * Given a proc and node, increment the ref. Create the ref if it
1311   * doesn't already exist
1312   *
1313   * Return: 0 if successful, else errno
1314   */
1315  static int binder_inc_ref_for_node(struct binder_proc *proc,
1316  			struct binder_node *node,
1317  			bool strong,
1318  			struct list_head *target_list,
1319  			struct binder_ref_data *rdata)
1320  {
1321  	struct binder_ref *ref;
1322  	struct binder_ref *new_ref = NULL;
1323  	int ret = 0;
1324  
1325  	binder_proc_lock(proc);
1326  	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327  	if (!ref) {
1328  		binder_proc_unlock(proc);
1329  		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330  		if (!new_ref)
1331  			return -ENOMEM;
1332  		binder_proc_lock(proc);
1333  		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334  	}
1335  	ret = binder_inc_ref_olocked(ref, strong, target_list);
1336  	*rdata = ref->data;
1337  	binder_proc_unlock(proc);
1338  	if (new_ref && ref != new_ref)
1339  		/*
1340  		 * Another thread created the ref first so
1341  		 * free the one we allocated
1342  		 */
1343  		kfree(new_ref);
1344  	return ret;
1345  }
1346  
1347  static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348  					   struct binder_transaction *t)
1349  {
1350  	BUG_ON(!target_thread);
1351  	assert_spin_locked(&target_thread->proc->inner_lock);
1352  	BUG_ON(target_thread->transaction_stack != t);
1353  	BUG_ON(target_thread->transaction_stack->from != target_thread);
1354  	target_thread->transaction_stack =
1355  		target_thread->transaction_stack->from_parent;
1356  	t->from = NULL;
1357  }
1358  
1359  /**
1360   * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361   * @thread:	thread to decrement
1362   *
1363   * A thread needs to be kept alive while being used to create or
1364   * handle a transaction. binder_get_txn_from() is used to safely
1365   * extract t->from from a binder_transaction and keep the thread
1366   * indicated by t->from from being freed. When done with that
1367   * binder_thread, this function is called to decrement the
1368   * tmp_ref and free if appropriate (thread has been released
1369   * and no transaction being processed by the driver)
1370   */
1371  static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372  {
1373  	/*
1374  	 * atomic is used to protect the counter value while
1375  	 * it cannot reach zero or thread->is_dead is false
1376  	 */
1377  	binder_inner_proc_lock(thread->proc);
1378  	atomic_dec(&thread->tmp_ref);
1379  	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380  		binder_inner_proc_unlock(thread->proc);
1381  		binder_free_thread(thread);
1382  		return;
1383  	}
1384  	binder_inner_proc_unlock(thread->proc);
1385  }
1386  
1387  /**
1388   * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389   * @proc:	proc to decrement
1390   *
1391   * A binder_proc needs to be kept alive while being used to create or
1392   * handle a transaction. proc->tmp_ref is incremented when
1393   * creating a new transaction or the binder_proc is currently in-use
1394   * by threads that are being released. When done with the binder_proc,
1395   * this function is called to decrement the counter and free the
1396   * proc if appropriate (proc has been released, all threads have
1397   * been released and not currenly in-use to process a transaction).
1398   */
1399  static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400  {
1401  	binder_inner_proc_lock(proc);
1402  	proc->tmp_ref--;
1403  	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404  			!proc->tmp_ref) {
1405  		binder_inner_proc_unlock(proc);
1406  		binder_free_proc(proc);
1407  		return;
1408  	}
1409  	binder_inner_proc_unlock(proc);
1410  }
1411  
1412  /**
1413   * binder_get_txn_from() - safely extract the "from" thread in transaction
1414   * @t:	binder transaction for t->from
1415   *
1416   * Atomically return the "from" thread and increment the tmp_ref
1417   * count for the thread to ensure it stays alive until
1418   * binder_thread_dec_tmpref() is called.
1419   *
1420   * Return: the value of t->from
1421   */
1422  static struct binder_thread *binder_get_txn_from(
1423  		struct binder_transaction *t)
1424  {
1425  	struct binder_thread *from;
1426  
1427  	spin_lock(&t->lock);
1428  	from = t->from;
1429  	if (from)
1430  		atomic_inc(&from->tmp_ref);
1431  	spin_unlock(&t->lock);
1432  	return from;
1433  }
1434  
1435  /**
1436   * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437   * @t:	binder transaction for t->from
1438   *
1439   * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440   * to guarantee that the thread cannot be released while operating on it.
1441   * The caller must call binder_inner_proc_unlock() to release the inner lock
1442   * as well as call binder_dec_thread_txn() to release the reference.
1443   *
1444   * Return: the value of t->from
1445   */
1446  static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447  		struct binder_transaction *t)
1448  	__acquires(&t->from->proc->inner_lock)
1449  {
1450  	struct binder_thread *from;
1451  
1452  	from = binder_get_txn_from(t);
1453  	if (!from) {
1454  		__acquire(&from->proc->inner_lock);
1455  		return NULL;
1456  	}
1457  	binder_inner_proc_lock(from->proc);
1458  	if (t->from) {
1459  		BUG_ON(from != t->from);
1460  		return from;
1461  	}
1462  	binder_inner_proc_unlock(from->proc);
1463  	__acquire(&from->proc->inner_lock);
1464  	binder_thread_dec_tmpref(from);
1465  	return NULL;
1466  }
1467  
1468  /**
1469   * binder_free_txn_fixups() - free unprocessed fd fixups
1470   * @t:	binder transaction for t->from
1471   *
1472   * If the transaction is being torn down prior to being
1473   * processed by the target process, free all of the
1474   * fd fixups and fput the file structs. It is safe to
1475   * call this function after the fixups have been
1476   * processed -- in that case, the list will be empty.
1477   */
1478  static void binder_free_txn_fixups(struct binder_transaction *t)
1479  {
1480  	struct binder_txn_fd_fixup *fixup, *tmp;
1481  
1482  	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483  		fput(fixup->file);
1484  		list_del(&fixup->fixup_entry);
1485  		kfree(fixup);
1486  	}
1487  }
1488  
1489  static void binder_txn_latency_free(struct binder_transaction *t)
1490  {
1491  	int from_proc, from_thread, to_proc, to_thread;
1492  
1493  	spin_lock(&t->lock);
1494  	from_proc = t->from ? t->from->proc->pid : 0;
1495  	from_thread = t->from ? t->from->pid : 0;
1496  	to_proc = t->to_proc ? t->to_proc->pid : 0;
1497  	to_thread = t->to_thread ? t->to_thread->pid : 0;
1498  	spin_unlock(&t->lock);
1499  
1500  	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501  }
1502  
1503  static void binder_free_transaction(struct binder_transaction *t)
1504  {
1505  	struct binder_proc *target_proc = t->to_proc;
1506  
1507  	if (target_proc) {
1508  		binder_inner_proc_lock(target_proc);
1509  		target_proc->outstanding_txns--;
1510  		if (target_proc->outstanding_txns < 0)
1511  			pr_warn("%s: Unexpected outstanding_txns %d\n",
1512  				__func__, target_proc->outstanding_txns);
1513  		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514  			wake_up_interruptible_all(&target_proc->freeze_wait);
1515  		if (t->buffer)
1516  			t->buffer->transaction = NULL;
1517  		binder_inner_proc_unlock(target_proc);
1518  	}
1519  	if (trace_binder_txn_latency_free_enabled())
1520  		binder_txn_latency_free(t);
1521  	/*
1522  	 * If the transaction has no target_proc, then
1523  	 * t->buffer->transaction has already been cleared.
1524  	 */
1525  	binder_free_txn_fixups(t);
1526  	kfree(t);
1527  	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528  }
1529  
1530  static void binder_send_failed_reply(struct binder_transaction *t,
1531  				     uint32_t error_code)
1532  {
1533  	struct binder_thread *target_thread;
1534  	struct binder_transaction *next;
1535  
1536  	BUG_ON(t->flags & TF_ONE_WAY);
1537  	while (1) {
1538  		target_thread = binder_get_txn_from_and_acq_inner(t);
1539  		if (target_thread) {
1540  			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541  				     "send failed reply for transaction %d to %d:%d\n",
1542  				      t->debug_id,
1543  				      target_thread->proc->pid,
1544  				      target_thread->pid);
1545  
1546  			binder_pop_transaction_ilocked(target_thread, t);
1547  			if (target_thread->reply_error.cmd == BR_OK) {
1548  				target_thread->reply_error.cmd = error_code;
1549  				binder_enqueue_thread_work_ilocked(
1550  					target_thread,
1551  					&target_thread->reply_error.work);
1552  				wake_up_interruptible(&target_thread->wait);
1553  			} else {
1554  				/*
1555  				 * Cannot get here for normal operation, but
1556  				 * we can if multiple synchronous transactions
1557  				 * are sent without blocking for responses.
1558  				 * Just ignore the 2nd error in this case.
1559  				 */
1560  				pr_warn("Unexpected reply error: %u\n",
1561  					target_thread->reply_error.cmd);
1562  			}
1563  			binder_inner_proc_unlock(target_thread->proc);
1564  			binder_thread_dec_tmpref(target_thread);
1565  			binder_free_transaction(t);
1566  			return;
1567  		}
1568  		__release(&target_thread->proc->inner_lock);
1569  		next = t->from_parent;
1570  
1571  		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572  			     "send failed reply for transaction %d, target dead\n",
1573  			     t->debug_id);
1574  
1575  		binder_free_transaction(t);
1576  		if (next == NULL) {
1577  			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578  				     "reply failed, no target thread at root\n");
1579  			return;
1580  		}
1581  		t = next;
1582  		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583  			     "reply failed, no target thread -- retry %d\n",
1584  			      t->debug_id);
1585  	}
1586  }
1587  
1588  /**
1589   * binder_cleanup_transaction() - cleans up undelivered transaction
1590   * @t:		transaction that needs to be cleaned up
1591   * @reason:	reason the transaction wasn't delivered
1592   * @error_code:	error to return to caller (if synchronous call)
1593   */
1594  static void binder_cleanup_transaction(struct binder_transaction *t,
1595  				       const char *reason,
1596  				       uint32_t error_code)
1597  {
1598  	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599  		binder_send_failed_reply(t, error_code);
1600  	} else {
1601  		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602  			"undelivered transaction %d, %s\n",
1603  			t->debug_id, reason);
1604  		binder_free_transaction(t);
1605  	}
1606  }
1607  
1608  /**
1609   * binder_get_object() - gets object and checks for valid metadata
1610   * @proc:	binder_proc owning the buffer
1611   * @u:		sender's user pointer to base of buffer
1612   * @buffer:	binder_buffer that we're parsing.
1613   * @offset:	offset in the @buffer at which to validate an object.
1614   * @object:	struct binder_object to read into
1615   *
1616   * Copy the binder object at the given offset into @object. If @u is
1617   * provided then the copy is from the sender's buffer. If not, then
1618   * it is copied from the target's @buffer.
1619   *
1620   * Return:	If there's a valid metadata object at @offset, the
1621   *		size of that object. Otherwise, it returns zero. The object
1622   *		is read into the struct binder_object pointed to by @object.
1623   */
1624  static size_t binder_get_object(struct binder_proc *proc,
1625  				const void __user *u,
1626  				struct binder_buffer *buffer,
1627  				unsigned long offset,
1628  				struct binder_object *object)
1629  {
1630  	size_t read_size;
1631  	struct binder_object_header *hdr;
1632  	size_t object_size = 0;
1633  
1634  	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1635  	if (offset > buffer->data_size || read_size < sizeof(*hdr))
1636  		return 0;
1637  	if (u) {
1638  		if (copy_from_user(object, u + offset, read_size))
1639  			return 0;
1640  	} else {
1641  		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1642  						  offset, read_size))
1643  			return 0;
1644  	}
1645  
1646  	/* Ok, now see if we read a complete object. */
1647  	hdr = &object->hdr;
1648  	switch (hdr->type) {
1649  	case BINDER_TYPE_BINDER:
1650  	case BINDER_TYPE_WEAK_BINDER:
1651  	case BINDER_TYPE_HANDLE:
1652  	case BINDER_TYPE_WEAK_HANDLE:
1653  		object_size = sizeof(struct flat_binder_object);
1654  		break;
1655  	case BINDER_TYPE_FD:
1656  		object_size = sizeof(struct binder_fd_object);
1657  		break;
1658  	case BINDER_TYPE_PTR:
1659  		object_size = sizeof(struct binder_buffer_object);
1660  		break;
1661  	case BINDER_TYPE_FDA:
1662  		object_size = sizeof(struct binder_fd_array_object);
1663  		break;
1664  	default:
1665  		return 0;
1666  	}
1667  	if (offset <= buffer->data_size - object_size &&
1668  	    buffer->data_size >= object_size)
1669  		return object_size;
1670  	else
1671  		return 0;
1672  }
1673  
1674  /**
1675   * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1676   * @proc:	binder_proc owning the buffer
1677   * @b:		binder_buffer containing the object
1678   * @object:	struct binder_object to read into
1679   * @index:	index in offset array at which the binder_buffer_object is
1680   *		located
1681   * @start_offset: points to the start of the offset array
1682   * @object_offsetp: offset of @object read from @b
1683   * @num_valid:	the number of valid offsets in the offset array
1684   *
1685   * Return:	If @index is within the valid range of the offset array
1686   *		described by @start and @num_valid, and if there's a valid
1687   *		binder_buffer_object at the offset found in index @index
1688   *		of the offset array, that object is returned. Otherwise,
1689   *		%NULL is returned.
1690   *		Note that the offset found in index @index itself is not
1691   *		verified; this function assumes that @num_valid elements
1692   *		from @start were previously verified to have valid offsets.
1693   *		If @object_offsetp is non-NULL, then the offset within
1694   *		@b is written to it.
1695   */
1696  static struct binder_buffer_object *binder_validate_ptr(
1697  						struct binder_proc *proc,
1698  						struct binder_buffer *b,
1699  						struct binder_object *object,
1700  						binder_size_t index,
1701  						binder_size_t start_offset,
1702  						binder_size_t *object_offsetp,
1703  						binder_size_t num_valid)
1704  {
1705  	size_t object_size;
1706  	binder_size_t object_offset;
1707  	unsigned long buffer_offset;
1708  
1709  	if (index >= num_valid)
1710  		return NULL;
1711  
1712  	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1713  	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1714  					  b, buffer_offset,
1715  					  sizeof(object_offset)))
1716  		return NULL;
1717  	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1718  	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1719  		return NULL;
1720  	if (object_offsetp)
1721  		*object_offsetp = object_offset;
1722  
1723  	return &object->bbo;
1724  }
1725  
1726  /**
1727   * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1728   * @proc:		binder_proc owning the buffer
1729   * @b:			transaction buffer
1730   * @objects_start_offset: offset to start of objects buffer
1731   * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1732   * @fixup_offset:	start offset in @buffer to fix up
1733   * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1734   * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1735   *
1736   * Return:		%true if a fixup in buffer @buffer at offset @offset is
1737   *			allowed.
1738   *
1739   * For safety reasons, we only allow fixups inside a buffer to happen
1740   * at increasing offsets; additionally, we only allow fixup on the last
1741   * buffer object that was verified, or one of its parents.
1742   *
1743   * Example of what is allowed:
1744   *
1745   * A
1746   *   B (parent = A, offset = 0)
1747   *   C (parent = A, offset = 16)
1748   *     D (parent = C, offset = 0)
1749   *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1750   *
1751   * Examples of what is not allowed:
1752   *
1753   * Decreasing offsets within the same parent:
1754   * A
1755   *   C (parent = A, offset = 16)
1756   *   B (parent = A, offset = 0) // decreasing offset within A
1757   *
1758   * Referring to a parent that wasn't the last object or any of its parents:
1759   * A
1760   *   B (parent = A, offset = 0)
1761   *   C (parent = A, offset = 0)
1762   *   C (parent = A, offset = 16)
1763   *     D (parent = B, offset = 0) // B is not A or any of A's parents
1764   */
1765  static bool binder_validate_fixup(struct binder_proc *proc,
1766  				  struct binder_buffer *b,
1767  				  binder_size_t objects_start_offset,
1768  				  binder_size_t buffer_obj_offset,
1769  				  binder_size_t fixup_offset,
1770  				  binder_size_t last_obj_offset,
1771  				  binder_size_t last_min_offset)
1772  {
1773  	if (!last_obj_offset) {
1774  		/* Nothing to fix up in */
1775  		return false;
1776  	}
1777  
1778  	while (last_obj_offset != buffer_obj_offset) {
1779  		unsigned long buffer_offset;
1780  		struct binder_object last_object;
1781  		struct binder_buffer_object *last_bbo;
1782  		size_t object_size = binder_get_object(proc, NULL, b,
1783  						       last_obj_offset,
1784  						       &last_object);
1785  		if (object_size != sizeof(*last_bbo))
1786  			return false;
1787  
1788  		last_bbo = &last_object.bbo;
1789  		/*
1790  		 * Safe to retrieve the parent of last_obj, since it
1791  		 * was already previously verified by the driver.
1792  		 */
1793  		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1794  			return false;
1795  		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1796  		buffer_offset = objects_start_offset +
1797  			sizeof(binder_size_t) * last_bbo->parent;
1798  		if (binder_alloc_copy_from_buffer(&proc->alloc,
1799  						  &last_obj_offset,
1800  						  b, buffer_offset,
1801  						  sizeof(last_obj_offset)))
1802  			return false;
1803  	}
1804  	return (fixup_offset >= last_min_offset);
1805  }
1806  
1807  /**
1808   * struct binder_task_work_cb - for deferred close
1809   *
1810   * @twork:                callback_head for task work
1811   * @fd:                   fd to close
1812   *
1813   * Structure to pass task work to be handled after
1814   * returning from binder_ioctl() via task_work_add().
1815   */
1816  struct binder_task_work_cb {
1817  	struct callback_head twork;
1818  	struct file *file;
1819  };
1820  
1821  /**
1822   * binder_do_fd_close() - close list of file descriptors
1823   * @twork:	callback head for task work
1824   *
1825   * It is not safe to call ksys_close() during the binder_ioctl()
1826   * function if there is a chance that binder's own file descriptor
1827   * might be closed. This is to meet the requirements for using
1828   * fdget() (see comments for __fget_light()). Therefore use
1829   * task_work_add() to schedule the close operation once we have
1830   * returned from binder_ioctl(). This function is a callback
1831   * for that mechanism and does the actual ksys_close() on the
1832   * given file descriptor.
1833   */
1834  static void binder_do_fd_close(struct callback_head *twork)
1835  {
1836  	struct binder_task_work_cb *twcb = container_of(twork,
1837  			struct binder_task_work_cb, twork);
1838  
1839  	fput(twcb->file);
1840  	kfree(twcb);
1841  }
1842  
1843  /**
1844   * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1845   * @fd:		file-descriptor to close
1846   *
1847   * See comments in binder_do_fd_close(). This function is used to schedule
1848   * a file-descriptor to be closed after returning from binder_ioctl().
1849   */
1850  static void binder_deferred_fd_close(int fd)
1851  {
1852  	struct binder_task_work_cb *twcb;
1853  
1854  	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1855  	if (!twcb)
1856  		return;
1857  	init_task_work(&twcb->twork, binder_do_fd_close);
1858  	close_fd_get_file(fd, &twcb->file);
1859  	if (twcb->file) {
1860  		filp_close(twcb->file, current->files);
1861  		task_work_add(current, &twcb->twork, TWA_RESUME);
1862  	} else {
1863  		kfree(twcb);
1864  	}
1865  }
1866  
1867  static void binder_transaction_buffer_release(struct binder_proc *proc,
1868  					      struct binder_thread *thread,
1869  					      struct binder_buffer *buffer,
1870  					      binder_size_t failed_at,
1871  					      bool is_failure)
1872  {
1873  	int debug_id = buffer->debug_id;
1874  	binder_size_t off_start_offset, buffer_offset, off_end_offset;
1875  
1876  	binder_debug(BINDER_DEBUG_TRANSACTION,
1877  		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1878  		     proc->pid, buffer->debug_id,
1879  		     buffer->data_size, buffer->offsets_size,
1880  		     (unsigned long long)failed_at);
1881  
1882  	if (buffer->target_node)
1883  		binder_dec_node(buffer->target_node, 1, 0);
1884  
1885  	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1886  	off_end_offset = is_failure && failed_at ? failed_at :
1887  				off_start_offset + buffer->offsets_size;
1888  	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1889  	     buffer_offset += sizeof(binder_size_t)) {
1890  		struct binder_object_header *hdr;
1891  		size_t object_size = 0;
1892  		struct binder_object object;
1893  		binder_size_t object_offset;
1894  
1895  		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1896  						   buffer, buffer_offset,
1897  						   sizeof(object_offset)))
1898  			object_size = binder_get_object(proc, NULL, buffer,
1899  							object_offset, &object);
1900  		if (object_size == 0) {
1901  			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1902  			       debug_id, (u64)object_offset, buffer->data_size);
1903  			continue;
1904  		}
1905  		hdr = &object.hdr;
1906  		switch (hdr->type) {
1907  		case BINDER_TYPE_BINDER:
1908  		case BINDER_TYPE_WEAK_BINDER: {
1909  			struct flat_binder_object *fp;
1910  			struct binder_node *node;
1911  
1912  			fp = to_flat_binder_object(hdr);
1913  			node = binder_get_node(proc, fp->binder);
1914  			if (node == NULL) {
1915  				pr_err("transaction release %d bad node %016llx\n",
1916  				       debug_id, (u64)fp->binder);
1917  				break;
1918  			}
1919  			binder_debug(BINDER_DEBUG_TRANSACTION,
1920  				     "        node %d u%016llx\n",
1921  				     node->debug_id, (u64)node->ptr);
1922  			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1923  					0);
1924  			binder_put_node(node);
1925  		} break;
1926  		case BINDER_TYPE_HANDLE:
1927  		case BINDER_TYPE_WEAK_HANDLE: {
1928  			struct flat_binder_object *fp;
1929  			struct binder_ref_data rdata;
1930  			int ret;
1931  
1932  			fp = to_flat_binder_object(hdr);
1933  			ret = binder_dec_ref_for_handle(proc, fp->handle,
1934  				hdr->type == BINDER_TYPE_HANDLE, &rdata);
1935  
1936  			if (ret) {
1937  				pr_err("transaction release %d bad handle %d, ret = %d\n",
1938  				 debug_id, fp->handle, ret);
1939  				break;
1940  			}
1941  			binder_debug(BINDER_DEBUG_TRANSACTION,
1942  				     "        ref %d desc %d\n",
1943  				     rdata.debug_id, rdata.desc);
1944  		} break;
1945  
1946  		case BINDER_TYPE_FD: {
1947  			/*
1948  			 * No need to close the file here since user-space
1949  			 * closes it for successfully delivered
1950  			 * transactions. For transactions that weren't
1951  			 * delivered, the new fd was never allocated so
1952  			 * there is no need to close and the fput on the
1953  			 * file is done when the transaction is torn
1954  			 * down.
1955  			 */
1956  		} break;
1957  		case BINDER_TYPE_PTR:
1958  			/*
1959  			 * Nothing to do here, this will get cleaned up when the
1960  			 * transaction buffer gets freed
1961  			 */
1962  			break;
1963  		case BINDER_TYPE_FDA: {
1964  			struct binder_fd_array_object *fda;
1965  			struct binder_buffer_object *parent;
1966  			struct binder_object ptr_object;
1967  			binder_size_t fda_offset;
1968  			size_t fd_index;
1969  			binder_size_t fd_buf_size;
1970  			binder_size_t num_valid;
1971  
1972  			if (is_failure) {
1973  				/*
1974  				 * The fd fixups have not been applied so no
1975  				 * fds need to be closed.
1976  				 */
1977  				continue;
1978  			}
1979  
1980  			num_valid = (buffer_offset - off_start_offset) /
1981  						sizeof(binder_size_t);
1982  			fda = to_binder_fd_array_object(hdr);
1983  			parent = binder_validate_ptr(proc, buffer, &ptr_object,
1984  						     fda->parent,
1985  						     off_start_offset,
1986  						     NULL,
1987  						     num_valid);
1988  			if (!parent) {
1989  				pr_err("transaction release %d bad parent offset\n",
1990  				       debug_id);
1991  				continue;
1992  			}
1993  			fd_buf_size = sizeof(u32) * fda->num_fds;
1994  			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1995  				pr_err("transaction release %d invalid number of fds (%lld)\n",
1996  				       debug_id, (u64)fda->num_fds);
1997  				continue;
1998  			}
1999  			if (fd_buf_size > parent->length ||
2000  			    fda->parent_offset > parent->length - fd_buf_size) {
2001  				/* No space for all file descriptors here. */
2002  				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2003  				       debug_id, (u64)fda->num_fds);
2004  				continue;
2005  			}
2006  			/*
2007  			 * the source data for binder_buffer_object is visible
2008  			 * to user-space and the @buffer element is the user
2009  			 * pointer to the buffer_object containing the fd_array.
2010  			 * Convert the address to an offset relative to
2011  			 * the base of the transaction buffer.
2012  			 */
2013  			fda_offset =
2014  			    (parent->buffer - (uintptr_t)buffer->user_data) +
2015  			    fda->parent_offset;
2016  			for (fd_index = 0; fd_index < fda->num_fds;
2017  			     fd_index++) {
2018  				u32 fd;
2019  				int err;
2020  				binder_size_t offset = fda_offset +
2021  					fd_index * sizeof(fd);
2022  
2023  				err = binder_alloc_copy_from_buffer(
2024  						&proc->alloc, &fd, buffer,
2025  						offset, sizeof(fd));
2026  				WARN_ON(err);
2027  				if (!err) {
2028  					binder_deferred_fd_close(fd);
2029  					/*
2030  					 * Need to make sure the thread goes
2031  					 * back to userspace to complete the
2032  					 * deferred close
2033  					 */
2034  					if (thread)
2035  						thread->looper_need_return = true;
2036  				}
2037  			}
2038  		} break;
2039  		default:
2040  			pr_err("transaction release %d bad object type %x\n",
2041  				debug_id, hdr->type);
2042  			break;
2043  		}
2044  	}
2045  }
2046  
2047  static int binder_translate_binder(struct flat_binder_object *fp,
2048  				   struct binder_transaction *t,
2049  				   struct binder_thread *thread)
2050  {
2051  	struct binder_node *node;
2052  	struct binder_proc *proc = thread->proc;
2053  	struct binder_proc *target_proc = t->to_proc;
2054  	struct binder_ref_data rdata;
2055  	int ret = 0;
2056  
2057  	node = binder_get_node(proc, fp->binder);
2058  	if (!node) {
2059  		node = binder_new_node(proc, fp);
2060  		if (!node)
2061  			return -ENOMEM;
2062  	}
2063  	if (fp->cookie != node->cookie) {
2064  		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2065  				  proc->pid, thread->pid, (u64)fp->binder,
2066  				  node->debug_id, (u64)fp->cookie,
2067  				  (u64)node->cookie);
2068  		ret = -EINVAL;
2069  		goto done;
2070  	}
2071  	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2072  		ret = -EPERM;
2073  		goto done;
2074  	}
2075  
2076  	ret = binder_inc_ref_for_node(target_proc, node,
2077  			fp->hdr.type == BINDER_TYPE_BINDER,
2078  			&thread->todo, &rdata);
2079  	if (ret)
2080  		goto done;
2081  
2082  	if (fp->hdr.type == BINDER_TYPE_BINDER)
2083  		fp->hdr.type = BINDER_TYPE_HANDLE;
2084  	else
2085  		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2086  	fp->binder = 0;
2087  	fp->handle = rdata.desc;
2088  	fp->cookie = 0;
2089  
2090  	trace_binder_transaction_node_to_ref(t, node, &rdata);
2091  	binder_debug(BINDER_DEBUG_TRANSACTION,
2092  		     "        node %d u%016llx -> ref %d desc %d\n",
2093  		     node->debug_id, (u64)node->ptr,
2094  		     rdata.debug_id, rdata.desc);
2095  done:
2096  	binder_put_node(node);
2097  	return ret;
2098  }
2099  
2100  static int binder_translate_handle(struct flat_binder_object *fp,
2101  				   struct binder_transaction *t,
2102  				   struct binder_thread *thread)
2103  {
2104  	struct binder_proc *proc = thread->proc;
2105  	struct binder_proc *target_proc = t->to_proc;
2106  	struct binder_node *node;
2107  	struct binder_ref_data src_rdata;
2108  	int ret = 0;
2109  
2110  	node = binder_get_node_from_ref(proc, fp->handle,
2111  			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2112  	if (!node) {
2113  		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2114  				  proc->pid, thread->pid, fp->handle);
2115  		return -EINVAL;
2116  	}
2117  	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2118  		ret = -EPERM;
2119  		goto done;
2120  	}
2121  
2122  	binder_node_lock(node);
2123  	if (node->proc == target_proc) {
2124  		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2125  			fp->hdr.type = BINDER_TYPE_BINDER;
2126  		else
2127  			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2128  		fp->binder = node->ptr;
2129  		fp->cookie = node->cookie;
2130  		if (node->proc)
2131  			binder_inner_proc_lock(node->proc);
2132  		else
2133  			__acquire(&node->proc->inner_lock);
2134  		binder_inc_node_nilocked(node,
2135  					 fp->hdr.type == BINDER_TYPE_BINDER,
2136  					 0, NULL);
2137  		if (node->proc)
2138  			binder_inner_proc_unlock(node->proc);
2139  		else
2140  			__release(&node->proc->inner_lock);
2141  		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2142  		binder_debug(BINDER_DEBUG_TRANSACTION,
2143  			     "        ref %d desc %d -> node %d u%016llx\n",
2144  			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2145  			     (u64)node->ptr);
2146  		binder_node_unlock(node);
2147  	} else {
2148  		struct binder_ref_data dest_rdata;
2149  
2150  		binder_node_unlock(node);
2151  		ret = binder_inc_ref_for_node(target_proc, node,
2152  				fp->hdr.type == BINDER_TYPE_HANDLE,
2153  				NULL, &dest_rdata);
2154  		if (ret)
2155  			goto done;
2156  
2157  		fp->binder = 0;
2158  		fp->handle = dest_rdata.desc;
2159  		fp->cookie = 0;
2160  		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2161  						    &dest_rdata);
2162  		binder_debug(BINDER_DEBUG_TRANSACTION,
2163  			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2164  			     src_rdata.debug_id, src_rdata.desc,
2165  			     dest_rdata.debug_id, dest_rdata.desc,
2166  			     node->debug_id);
2167  	}
2168  done:
2169  	binder_put_node(node);
2170  	return ret;
2171  }
2172  
2173  static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2174  			       struct binder_transaction *t,
2175  			       struct binder_thread *thread,
2176  			       struct binder_transaction *in_reply_to)
2177  {
2178  	struct binder_proc *proc = thread->proc;
2179  	struct binder_proc *target_proc = t->to_proc;
2180  	struct binder_txn_fd_fixup *fixup;
2181  	struct file *file;
2182  	int ret = 0;
2183  	bool target_allows_fd;
2184  
2185  	if (in_reply_to)
2186  		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2187  	else
2188  		target_allows_fd = t->buffer->target_node->accept_fds;
2189  	if (!target_allows_fd) {
2190  		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2191  				  proc->pid, thread->pid,
2192  				  in_reply_to ? "reply" : "transaction",
2193  				  fd);
2194  		ret = -EPERM;
2195  		goto err_fd_not_accepted;
2196  	}
2197  
2198  	file = fget(fd);
2199  	if (!file) {
2200  		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2201  				  proc->pid, thread->pid, fd);
2202  		ret = -EBADF;
2203  		goto err_fget;
2204  	}
2205  	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2206  	if (ret < 0) {
2207  		ret = -EPERM;
2208  		goto err_security;
2209  	}
2210  
2211  	/*
2212  	 * Add fixup record for this transaction. The allocation
2213  	 * of the fd in the target needs to be done from a
2214  	 * target thread.
2215  	 */
2216  	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2217  	if (!fixup) {
2218  		ret = -ENOMEM;
2219  		goto err_alloc;
2220  	}
2221  	fixup->file = file;
2222  	fixup->offset = fd_offset;
2223  	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2224  	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2225  
2226  	return ret;
2227  
2228  err_alloc:
2229  err_security:
2230  	fput(file);
2231  err_fget:
2232  err_fd_not_accepted:
2233  	return ret;
2234  }
2235  
2236  /**
2237   * struct binder_ptr_fixup - data to be fixed-up in target buffer
2238   * @offset	offset in target buffer to fixup
2239   * @skip_size	bytes to skip in copy (fixup will be written later)
2240   * @fixup_data	data to write at fixup offset
2241   * @node	list node
2242   *
2243   * This is used for the pointer fixup list (pf) which is created and consumed
2244   * during binder_transaction() and is only accessed locally. No
2245   * locking is necessary.
2246   *
2247   * The list is ordered by @offset.
2248   */
2249  struct binder_ptr_fixup {
2250  	binder_size_t offset;
2251  	size_t skip_size;
2252  	binder_uintptr_t fixup_data;
2253  	struct list_head node;
2254  };
2255  
2256  /**
2257   * struct binder_sg_copy - scatter-gather data to be copied
2258   * @offset		offset in target buffer
2259   * @sender_uaddr	user address in source buffer
2260   * @length		bytes to copy
2261   * @node		list node
2262   *
2263   * This is used for the sg copy list (sgc) which is created and consumed
2264   * during binder_transaction() and is only accessed locally. No
2265   * locking is necessary.
2266   *
2267   * The list is ordered by @offset.
2268   */
2269  struct binder_sg_copy {
2270  	binder_size_t offset;
2271  	const void __user *sender_uaddr;
2272  	size_t length;
2273  	struct list_head node;
2274  };
2275  
2276  /**
2277   * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2278   * @alloc:	binder_alloc associated with @buffer
2279   * @buffer:	binder buffer in target process
2280   * @sgc_head:	list_head of scatter-gather copy list
2281   * @pf_head:	list_head of pointer fixup list
2282   *
2283   * Processes all elements of @sgc_head, applying fixups from @pf_head
2284   * and copying the scatter-gather data from the source process' user
2285   * buffer to the target's buffer. It is expected that the list creation
2286   * and processing all occurs during binder_transaction() so these lists
2287   * are only accessed in local context.
2288   *
2289   * Return: 0=success, else -errno
2290   */
2291  static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2292  					 struct binder_buffer *buffer,
2293  					 struct list_head *sgc_head,
2294  					 struct list_head *pf_head)
2295  {
2296  	int ret = 0;
2297  	struct binder_sg_copy *sgc, *tmpsgc;
2298  	struct binder_ptr_fixup *pf =
2299  		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2300  					 node);
2301  
2302  	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2303  		size_t bytes_copied = 0;
2304  
2305  		while (bytes_copied < sgc->length) {
2306  			size_t copy_size;
2307  			size_t bytes_left = sgc->length - bytes_copied;
2308  			size_t offset = sgc->offset + bytes_copied;
2309  
2310  			/*
2311  			 * We copy up to the fixup (pointed to by pf)
2312  			 */
2313  			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2314  				       : bytes_left;
2315  			if (!ret && copy_size)
2316  				ret = binder_alloc_copy_user_to_buffer(
2317  						alloc, buffer,
2318  						offset,
2319  						sgc->sender_uaddr + bytes_copied,
2320  						copy_size);
2321  			bytes_copied += copy_size;
2322  			if (copy_size != bytes_left) {
2323  				BUG_ON(!pf);
2324  				/* we stopped at a fixup offset */
2325  				if (pf->skip_size) {
2326  					/*
2327  					 * we are just skipping. This is for
2328  					 * BINDER_TYPE_FDA where the translated
2329  					 * fds will be fixed up when we get
2330  					 * to target context.
2331  					 */
2332  					bytes_copied += pf->skip_size;
2333  				} else {
2334  					/* apply the fixup indicated by pf */
2335  					if (!ret)
2336  						ret = binder_alloc_copy_to_buffer(
2337  							alloc, buffer,
2338  							pf->offset,
2339  							&pf->fixup_data,
2340  							sizeof(pf->fixup_data));
2341  					bytes_copied += sizeof(pf->fixup_data);
2342  				}
2343  				list_del(&pf->node);
2344  				kfree(pf);
2345  				pf = list_first_entry_or_null(pf_head,
2346  						struct binder_ptr_fixup, node);
2347  			}
2348  		}
2349  		list_del(&sgc->node);
2350  		kfree(sgc);
2351  	}
2352  	BUG_ON(!list_empty(pf_head));
2353  	BUG_ON(!list_empty(sgc_head));
2354  
2355  	return ret > 0 ? -EINVAL : ret;
2356  }
2357  
2358  /**
2359   * binder_cleanup_deferred_txn_lists() - free specified lists
2360   * @sgc_head:	list_head of scatter-gather copy list
2361   * @pf_head:	list_head of pointer fixup list
2362   *
2363   * Called to clean up @sgc_head and @pf_head if there is an
2364   * error.
2365   */
2366  static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2367  					      struct list_head *pf_head)
2368  {
2369  	struct binder_sg_copy *sgc, *tmpsgc;
2370  	struct binder_ptr_fixup *pf, *tmppf;
2371  
2372  	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2373  		list_del(&sgc->node);
2374  		kfree(sgc);
2375  	}
2376  	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2377  		list_del(&pf->node);
2378  		kfree(pf);
2379  	}
2380  }
2381  
2382  /**
2383   * binder_defer_copy() - queue a scatter-gather buffer for copy
2384   * @sgc_head:		list_head of scatter-gather copy list
2385   * @offset:		binder buffer offset in target process
2386   * @sender_uaddr:	user address in source process
2387   * @length:		bytes to copy
2388   *
2389   * Specify a scatter-gather block to be copied. The actual copy must
2390   * be deferred until all the needed fixups are identified and queued.
2391   * Then the copy and fixups are done together so un-translated values
2392   * from the source are never visible in the target buffer.
2393   *
2394   * We are guaranteed that repeated calls to this function will have
2395   * monotonically increasing @offset values so the list will naturally
2396   * be ordered.
2397   *
2398   * Return: 0=success, else -errno
2399   */
2400  static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2401  			     const void __user *sender_uaddr, size_t length)
2402  {
2403  	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2404  
2405  	if (!bc)
2406  		return -ENOMEM;
2407  
2408  	bc->offset = offset;
2409  	bc->sender_uaddr = sender_uaddr;
2410  	bc->length = length;
2411  	INIT_LIST_HEAD(&bc->node);
2412  
2413  	/*
2414  	 * We are guaranteed that the deferred copies are in-order
2415  	 * so just add to the tail.
2416  	 */
2417  	list_add_tail(&bc->node, sgc_head);
2418  
2419  	return 0;
2420  }
2421  
2422  /**
2423   * binder_add_fixup() - queue a fixup to be applied to sg copy
2424   * @pf_head:	list_head of binder ptr fixup list
2425   * @offset:	binder buffer offset in target process
2426   * @fixup:	bytes to be copied for fixup
2427   * @skip_size:	bytes to skip when copying (fixup will be applied later)
2428   *
2429   * Add the specified fixup to a list ordered by @offset. When copying
2430   * the scatter-gather buffers, the fixup will be copied instead of
2431   * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2432   * will be applied later (in target process context), so we just skip
2433   * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2434   * value in @fixup.
2435   *
2436   * This function is called *mostly* in @offset order, but there are
2437   * exceptions. Since out-of-order inserts are relatively uncommon,
2438   * we insert the new element by searching backward from the tail of
2439   * the list.
2440   *
2441   * Return: 0=success, else -errno
2442   */
2443  static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2444  			    binder_uintptr_t fixup, size_t skip_size)
2445  {
2446  	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2447  	struct binder_ptr_fixup *tmppf;
2448  
2449  	if (!pf)
2450  		return -ENOMEM;
2451  
2452  	pf->offset = offset;
2453  	pf->fixup_data = fixup;
2454  	pf->skip_size = skip_size;
2455  	INIT_LIST_HEAD(&pf->node);
2456  
2457  	/* Fixups are *mostly* added in-order, but there are some
2458  	 * exceptions. Look backwards through list for insertion point.
2459  	 */
2460  	list_for_each_entry_reverse(tmppf, pf_head, node) {
2461  		if (tmppf->offset < pf->offset) {
2462  			list_add(&pf->node, &tmppf->node);
2463  			return 0;
2464  		}
2465  	}
2466  	/*
2467  	 * if we get here, then the new offset is the lowest so
2468  	 * insert at the head
2469  	 */
2470  	list_add(&pf->node, pf_head);
2471  	return 0;
2472  }
2473  
2474  static int binder_translate_fd_array(struct list_head *pf_head,
2475  				     struct binder_fd_array_object *fda,
2476  				     const void __user *sender_ubuffer,
2477  				     struct binder_buffer_object *parent,
2478  				     struct binder_buffer_object *sender_uparent,
2479  				     struct binder_transaction *t,
2480  				     struct binder_thread *thread,
2481  				     struct binder_transaction *in_reply_to)
2482  {
2483  	binder_size_t fdi, fd_buf_size;
2484  	binder_size_t fda_offset;
2485  	const void __user *sender_ufda_base;
2486  	struct binder_proc *proc = thread->proc;
2487  	int ret;
2488  
2489  	fd_buf_size = sizeof(u32) * fda->num_fds;
2490  	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2491  		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2492  				  proc->pid, thread->pid, (u64)fda->num_fds);
2493  		return -EINVAL;
2494  	}
2495  	if (fd_buf_size > parent->length ||
2496  	    fda->parent_offset > parent->length - fd_buf_size) {
2497  		/* No space for all file descriptors here. */
2498  		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2499  				  proc->pid, thread->pid, (u64)fda->num_fds);
2500  		return -EINVAL;
2501  	}
2502  	/*
2503  	 * the source data for binder_buffer_object is visible
2504  	 * to user-space and the @buffer element is the user
2505  	 * pointer to the buffer_object containing the fd_array.
2506  	 * Convert the address to an offset relative to
2507  	 * the base of the transaction buffer.
2508  	 */
2509  	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2510  		fda->parent_offset;
2511  	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2512  				fda->parent_offset;
2513  
2514  	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2515  	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2516  		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2517  				  proc->pid, thread->pid);
2518  		return -EINVAL;
2519  	}
2520  	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2521  	if (ret)
2522  		return ret;
2523  
2524  	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2525  		u32 fd;
2526  		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2527  		binder_size_t sender_uoffset = fdi * sizeof(fd);
2528  
2529  		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2530  		if (!ret)
2531  			ret = binder_translate_fd(fd, offset, t, thread,
2532  						  in_reply_to);
2533  		if (ret)
2534  			return ret > 0 ? -EINVAL : ret;
2535  	}
2536  	return 0;
2537  }
2538  
2539  static int binder_fixup_parent(struct list_head *pf_head,
2540  			       struct binder_transaction *t,
2541  			       struct binder_thread *thread,
2542  			       struct binder_buffer_object *bp,
2543  			       binder_size_t off_start_offset,
2544  			       binder_size_t num_valid,
2545  			       binder_size_t last_fixup_obj_off,
2546  			       binder_size_t last_fixup_min_off)
2547  {
2548  	struct binder_buffer_object *parent;
2549  	struct binder_buffer *b = t->buffer;
2550  	struct binder_proc *proc = thread->proc;
2551  	struct binder_proc *target_proc = t->to_proc;
2552  	struct binder_object object;
2553  	binder_size_t buffer_offset;
2554  	binder_size_t parent_offset;
2555  
2556  	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2557  		return 0;
2558  
2559  	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2560  				     off_start_offset, &parent_offset,
2561  				     num_valid);
2562  	if (!parent) {
2563  		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2564  				  proc->pid, thread->pid);
2565  		return -EINVAL;
2566  	}
2567  
2568  	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2569  				   parent_offset, bp->parent_offset,
2570  				   last_fixup_obj_off,
2571  				   last_fixup_min_off)) {
2572  		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2573  				  proc->pid, thread->pid);
2574  		return -EINVAL;
2575  	}
2576  
2577  	if (parent->length < sizeof(binder_uintptr_t) ||
2578  	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2579  		/* No space for a pointer here! */
2580  		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2581  				  proc->pid, thread->pid);
2582  		return -EINVAL;
2583  	}
2584  	buffer_offset = bp->parent_offset +
2585  			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2586  	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2587  }
2588  
2589  /**
2590   * binder_proc_transaction() - sends a transaction to a process and wakes it up
2591   * @t:		transaction to send
2592   * @proc:	process to send the transaction to
2593   * @thread:	thread in @proc to send the transaction to (may be NULL)
2594   *
2595   * This function queues a transaction to the specified process. It will try
2596   * to find a thread in the target process to handle the transaction and
2597   * wake it up. If no thread is found, the work is queued to the proc
2598   * waitqueue.
2599   *
2600   * If the @thread parameter is not NULL, the transaction is always queued
2601   * to the waitlist of that specific thread.
2602   *
2603   * Return:	0 if the transaction was successfully queued
2604   *		BR_DEAD_REPLY if the target process or thread is dead
2605   *		BR_FROZEN_REPLY if the target process or thread is frozen
2606   */
2607  static int binder_proc_transaction(struct binder_transaction *t,
2608  				    struct binder_proc *proc,
2609  				    struct binder_thread *thread)
2610  {
2611  	struct binder_node *node = t->buffer->target_node;
2612  	bool oneway = !!(t->flags & TF_ONE_WAY);
2613  	bool pending_async = false;
2614  
2615  	BUG_ON(!node);
2616  	binder_node_lock(node);
2617  	if (oneway) {
2618  		BUG_ON(thread);
2619  		if (node->has_async_transaction)
2620  			pending_async = true;
2621  		else
2622  			node->has_async_transaction = true;
2623  	}
2624  
2625  	binder_inner_proc_lock(proc);
2626  	if (proc->is_frozen) {
2627  		proc->sync_recv |= !oneway;
2628  		proc->async_recv |= oneway;
2629  	}
2630  
2631  	if ((proc->is_frozen && !oneway) || proc->is_dead ||
2632  			(thread && thread->is_dead)) {
2633  		binder_inner_proc_unlock(proc);
2634  		binder_node_unlock(node);
2635  		return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2636  	}
2637  
2638  	if (!thread && !pending_async)
2639  		thread = binder_select_thread_ilocked(proc);
2640  
2641  	if (thread)
2642  		binder_enqueue_thread_work_ilocked(thread, &t->work);
2643  	else if (!pending_async)
2644  		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2645  	else
2646  		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2647  
2648  	if (!pending_async)
2649  		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2650  
2651  	proc->outstanding_txns++;
2652  	binder_inner_proc_unlock(proc);
2653  	binder_node_unlock(node);
2654  
2655  	return 0;
2656  }
2657  
2658  /**
2659   * binder_get_node_refs_for_txn() - Get required refs on node for txn
2660   * @node:         struct binder_node for which to get refs
2661   * @proc:         returns @node->proc if valid
2662   * @error:        if no @proc then returns BR_DEAD_REPLY
2663   *
2664   * User-space normally keeps the node alive when creating a transaction
2665   * since it has a reference to the target. The local strong ref keeps it
2666   * alive if the sending process dies before the target process processes
2667   * the transaction. If the source process is malicious or has a reference
2668   * counting bug, relying on the local strong ref can fail.
2669   *
2670   * Since user-space can cause the local strong ref to go away, we also take
2671   * a tmpref on the node to ensure it survives while we are constructing
2672   * the transaction. We also need a tmpref on the proc while we are
2673   * constructing the transaction, so we take that here as well.
2674   *
2675   * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2676   * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2677   * target proc has died, @error is set to BR_DEAD_REPLY
2678   */
2679  static struct binder_node *binder_get_node_refs_for_txn(
2680  		struct binder_node *node,
2681  		struct binder_proc **procp,
2682  		uint32_t *error)
2683  {
2684  	struct binder_node *target_node = NULL;
2685  
2686  	binder_node_inner_lock(node);
2687  	if (node->proc) {
2688  		target_node = node;
2689  		binder_inc_node_nilocked(node, 1, 0, NULL);
2690  		binder_inc_node_tmpref_ilocked(node);
2691  		node->proc->tmp_ref++;
2692  		*procp = node->proc;
2693  	} else
2694  		*error = BR_DEAD_REPLY;
2695  	binder_node_inner_unlock(node);
2696  
2697  	return target_node;
2698  }
2699  
2700  static void binder_transaction(struct binder_proc *proc,
2701  			       struct binder_thread *thread,
2702  			       struct binder_transaction_data *tr, int reply,
2703  			       binder_size_t extra_buffers_size)
2704  {
2705  	int ret;
2706  	struct binder_transaction *t;
2707  	struct binder_work *w;
2708  	struct binder_work *tcomplete;
2709  	binder_size_t buffer_offset = 0;
2710  	binder_size_t off_start_offset, off_end_offset;
2711  	binder_size_t off_min;
2712  	binder_size_t sg_buf_offset, sg_buf_end_offset;
2713  	binder_size_t user_offset = 0;
2714  	struct binder_proc *target_proc = NULL;
2715  	struct binder_thread *target_thread = NULL;
2716  	struct binder_node *target_node = NULL;
2717  	struct binder_transaction *in_reply_to = NULL;
2718  	struct binder_transaction_log_entry *e;
2719  	uint32_t return_error = 0;
2720  	uint32_t return_error_param = 0;
2721  	uint32_t return_error_line = 0;
2722  	binder_size_t last_fixup_obj_off = 0;
2723  	binder_size_t last_fixup_min_off = 0;
2724  	struct binder_context *context = proc->context;
2725  	int t_debug_id = atomic_inc_return(&binder_last_id);
2726  	char *secctx = NULL;
2727  	u32 secctx_sz = 0;
2728  	struct list_head sgc_head;
2729  	struct list_head pf_head;
2730  	const void __user *user_buffer = (const void __user *)
2731  				(uintptr_t)tr->data.ptr.buffer;
2732  	INIT_LIST_HEAD(&sgc_head);
2733  	INIT_LIST_HEAD(&pf_head);
2734  
2735  	e = binder_transaction_log_add(&binder_transaction_log);
2736  	e->debug_id = t_debug_id;
2737  	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2738  	e->from_proc = proc->pid;
2739  	e->from_thread = thread->pid;
2740  	e->target_handle = tr->target.handle;
2741  	e->data_size = tr->data_size;
2742  	e->offsets_size = tr->offsets_size;
2743  	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2744  
2745  	if (reply) {
2746  		binder_inner_proc_lock(proc);
2747  		in_reply_to = thread->transaction_stack;
2748  		if (in_reply_to == NULL) {
2749  			binder_inner_proc_unlock(proc);
2750  			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2751  					  proc->pid, thread->pid);
2752  			return_error = BR_FAILED_REPLY;
2753  			return_error_param = -EPROTO;
2754  			return_error_line = __LINE__;
2755  			goto err_empty_call_stack;
2756  		}
2757  		if (in_reply_to->to_thread != thread) {
2758  			spin_lock(&in_reply_to->lock);
2759  			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2760  				proc->pid, thread->pid, in_reply_to->debug_id,
2761  				in_reply_to->to_proc ?
2762  				in_reply_to->to_proc->pid : 0,
2763  				in_reply_to->to_thread ?
2764  				in_reply_to->to_thread->pid : 0);
2765  			spin_unlock(&in_reply_to->lock);
2766  			binder_inner_proc_unlock(proc);
2767  			return_error = BR_FAILED_REPLY;
2768  			return_error_param = -EPROTO;
2769  			return_error_line = __LINE__;
2770  			in_reply_to = NULL;
2771  			goto err_bad_call_stack;
2772  		}
2773  		thread->transaction_stack = in_reply_to->to_parent;
2774  		binder_inner_proc_unlock(proc);
2775  		binder_set_nice(in_reply_to->saved_priority);
2776  		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2777  		if (target_thread == NULL) {
2778  			/* annotation for sparse */
2779  			__release(&target_thread->proc->inner_lock);
2780  			return_error = BR_DEAD_REPLY;
2781  			return_error_line = __LINE__;
2782  			goto err_dead_binder;
2783  		}
2784  		if (target_thread->transaction_stack != in_reply_to) {
2785  			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2786  				proc->pid, thread->pid,
2787  				target_thread->transaction_stack ?
2788  				target_thread->transaction_stack->debug_id : 0,
2789  				in_reply_to->debug_id);
2790  			binder_inner_proc_unlock(target_thread->proc);
2791  			return_error = BR_FAILED_REPLY;
2792  			return_error_param = -EPROTO;
2793  			return_error_line = __LINE__;
2794  			in_reply_to = NULL;
2795  			target_thread = NULL;
2796  			goto err_dead_binder;
2797  		}
2798  		target_proc = target_thread->proc;
2799  		target_proc->tmp_ref++;
2800  		binder_inner_proc_unlock(target_thread->proc);
2801  	} else {
2802  		if (tr->target.handle) {
2803  			struct binder_ref *ref;
2804  
2805  			/*
2806  			 * There must already be a strong ref
2807  			 * on this node. If so, do a strong
2808  			 * increment on the node to ensure it
2809  			 * stays alive until the transaction is
2810  			 * done.
2811  			 */
2812  			binder_proc_lock(proc);
2813  			ref = binder_get_ref_olocked(proc, tr->target.handle,
2814  						     true);
2815  			if (ref) {
2816  				target_node = binder_get_node_refs_for_txn(
2817  						ref->node, &target_proc,
2818  						&return_error);
2819  			} else {
2820  				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2821  						  proc->pid, thread->pid, tr->target.handle);
2822  				return_error = BR_FAILED_REPLY;
2823  			}
2824  			binder_proc_unlock(proc);
2825  		} else {
2826  			mutex_lock(&context->context_mgr_node_lock);
2827  			target_node = context->binder_context_mgr_node;
2828  			if (target_node)
2829  				target_node = binder_get_node_refs_for_txn(
2830  						target_node, &target_proc,
2831  						&return_error);
2832  			else
2833  				return_error = BR_DEAD_REPLY;
2834  			mutex_unlock(&context->context_mgr_node_lock);
2835  			if (target_node && target_proc->pid == proc->pid) {
2836  				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2837  						  proc->pid, thread->pid);
2838  				return_error = BR_FAILED_REPLY;
2839  				return_error_param = -EINVAL;
2840  				return_error_line = __LINE__;
2841  				goto err_invalid_target_handle;
2842  			}
2843  		}
2844  		if (!target_node) {
2845  			/*
2846  			 * return_error is set above
2847  			 */
2848  			return_error_param = -EINVAL;
2849  			return_error_line = __LINE__;
2850  			goto err_dead_binder;
2851  		}
2852  		e->to_node = target_node->debug_id;
2853  		if (WARN_ON(proc == target_proc)) {
2854  			return_error = BR_FAILED_REPLY;
2855  			return_error_param = -EINVAL;
2856  			return_error_line = __LINE__;
2857  			goto err_invalid_target_handle;
2858  		}
2859  		if (security_binder_transaction(proc->cred,
2860  						target_proc->cred) < 0) {
2861  			return_error = BR_FAILED_REPLY;
2862  			return_error_param = -EPERM;
2863  			return_error_line = __LINE__;
2864  			goto err_invalid_target_handle;
2865  		}
2866  		binder_inner_proc_lock(proc);
2867  
2868  		w = list_first_entry_or_null(&thread->todo,
2869  					     struct binder_work, entry);
2870  		if (!(tr->flags & TF_ONE_WAY) && w &&
2871  		    w->type == BINDER_WORK_TRANSACTION) {
2872  			/*
2873  			 * Do not allow new outgoing transaction from a
2874  			 * thread that has a transaction at the head of
2875  			 * its todo list. Only need to check the head
2876  			 * because binder_select_thread_ilocked picks a
2877  			 * thread from proc->waiting_threads to enqueue
2878  			 * the transaction, and nothing is queued to the
2879  			 * todo list while the thread is on waiting_threads.
2880  			 */
2881  			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2882  					  proc->pid, thread->pid);
2883  			binder_inner_proc_unlock(proc);
2884  			return_error = BR_FAILED_REPLY;
2885  			return_error_param = -EPROTO;
2886  			return_error_line = __LINE__;
2887  			goto err_bad_todo_list;
2888  		}
2889  
2890  		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2891  			struct binder_transaction *tmp;
2892  
2893  			tmp = thread->transaction_stack;
2894  			if (tmp->to_thread != thread) {
2895  				spin_lock(&tmp->lock);
2896  				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2897  					proc->pid, thread->pid, tmp->debug_id,
2898  					tmp->to_proc ? tmp->to_proc->pid : 0,
2899  					tmp->to_thread ?
2900  					tmp->to_thread->pid : 0);
2901  				spin_unlock(&tmp->lock);
2902  				binder_inner_proc_unlock(proc);
2903  				return_error = BR_FAILED_REPLY;
2904  				return_error_param = -EPROTO;
2905  				return_error_line = __LINE__;
2906  				goto err_bad_call_stack;
2907  			}
2908  			while (tmp) {
2909  				struct binder_thread *from;
2910  
2911  				spin_lock(&tmp->lock);
2912  				from = tmp->from;
2913  				if (from && from->proc == target_proc) {
2914  					atomic_inc(&from->tmp_ref);
2915  					target_thread = from;
2916  					spin_unlock(&tmp->lock);
2917  					break;
2918  				}
2919  				spin_unlock(&tmp->lock);
2920  				tmp = tmp->from_parent;
2921  			}
2922  		}
2923  		binder_inner_proc_unlock(proc);
2924  	}
2925  	if (target_thread)
2926  		e->to_thread = target_thread->pid;
2927  	e->to_proc = target_proc->pid;
2928  
2929  	/* TODO: reuse incoming transaction for reply */
2930  	t = kzalloc(sizeof(*t), GFP_KERNEL);
2931  	if (t == NULL) {
2932  		return_error = BR_FAILED_REPLY;
2933  		return_error_param = -ENOMEM;
2934  		return_error_line = __LINE__;
2935  		goto err_alloc_t_failed;
2936  	}
2937  	INIT_LIST_HEAD(&t->fd_fixups);
2938  	binder_stats_created(BINDER_STAT_TRANSACTION);
2939  	spin_lock_init(&t->lock);
2940  
2941  	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2942  	if (tcomplete == NULL) {
2943  		return_error = BR_FAILED_REPLY;
2944  		return_error_param = -ENOMEM;
2945  		return_error_line = __LINE__;
2946  		goto err_alloc_tcomplete_failed;
2947  	}
2948  	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2949  
2950  	t->debug_id = t_debug_id;
2951  
2952  	if (reply)
2953  		binder_debug(BINDER_DEBUG_TRANSACTION,
2954  			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2955  			     proc->pid, thread->pid, t->debug_id,
2956  			     target_proc->pid, target_thread->pid,
2957  			     (u64)tr->data.ptr.buffer,
2958  			     (u64)tr->data.ptr.offsets,
2959  			     (u64)tr->data_size, (u64)tr->offsets_size,
2960  			     (u64)extra_buffers_size);
2961  	else
2962  		binder_debug(BINDER_DEBUG_TRANSACTION,
2963  			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2964  			     proc->pid, thread->pid, t->debug_id,
2965  			     target_proc->pid, target_node->debug_id,
2966  			     (u64)tr->data.ptr.buffer,
2967  			     (u64)tr->data.ptr.offsets,
2968  			     (u64)tr->data_size, (u64)tr->offsets_size,
2969  			     (u64)extra_buffers_size);
2970  
2971  	if (!reply && !(tr->flags & TF_ONE_WAY))
2972  		t->from = thread;
2973  	else
2974  		t->from = NULL;
2975  	t->sender_euid = task_euid(proc->tsk);
2976  	t->to_proc = target_proc;
2977  	t->to_thread = target_thread;
2978  	t->code = tr->code;
2979  	t->flags = tr->flags;
2980  	t->priority = task_nice(current);
2981  
2982  	if (target_node && target_node->txn_security_ctx) {
2983  		u32 secid;
2984  		size_t added_size;
2985  
2986  		security_cred_getsecid(proc->cred, &secid);
2987  		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2988  		if (ret) {
2989  			return_error = BR_FAILED_REPLY;
2990  			return_error_param = ret;
2991  			return_error_line = __LINE__;
2992  			goto err_get_secctx_failed;
2993  		}
2994  		added_size = ALIGN(secctx_sz, sizeof(u64));
2995  		extra_buffers_size += added_size;
2996  		if (extra_buffers_size < added_size) {
2997  			/* integer overflow of extra_buffers_size */
2998  			return_error = BR_FAILED_REPLY;
2999  			return_error_param = -EINVAL;
3000  			return_error_line = __LINE__;
3001  			goto err_bad_extra_size;
3002  		}
3003  	}
3004  
3005  	trace_binder_transaction(reply, t, target_node);
3006  
3007  	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3008  		tr->offsets_size, extra_buffers_size,
3009  		!reply && (t->flags & TF_ONE_WAY), current->tgid);
3010  	if (IS_ERR(t->buffer)) {
3011  		/*
3012  		 * -ESRCH indicates VMA cleared. The target is dying.
3013  		 */
3014  		return_error_param = PTR_ERR(t->buffer);
3015  		return_error = return_error_param == -ESRCH ?
3016  			BR_DEAD_REPLY : BR_FAILED_REPLY;
3017  		return_error_line = __LINE__;
3018  		t->buffer = NULL;
3019  		goto err_binder_alloc_buf_failed;
3020  	}
3021  	if (secctx) {
3022  		int err;
3023  		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3024  				    ALIGN(tr->offsets_size, sizeof(void *)) +
3025  				    ALIGN(extra_buffers_size, sizeof(void *)) -
3026  				    ALIGN(secctx_sz, sizeof(u64));
3027  
3028  		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3029  		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3030  						  t->buffer, buf_offset,
3031  						  secctx, secctx_sz);
3032  		if (err) {
3033  			t->security_ctx = 0;
3034  			WARN_ON(1);
3035  		}
3036  		security_release_secctx(secctx, secctx_sz);
3037  		secctx = NULL;
3038  	}
3039  	t->buffer->debug_id = t->debug_id;
3040  	t->buffer->transaction = t;
3041  	t->buffer->target_node = target_node;
3042  	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3043  	trace_binder_transaction_alloc_buf(t->buffer);
3044  
3045  	if (binder_alloc_copy_user_to_buffer(
3046  				&target_proc->alloc,
3047  				t->buffer,
3048  				ALIGN(tr->data_size, sizeof(void *)),
3049  				(const void __user *)
3050  					(uintptr_t)tr->data.ptr.offsets,
3051  				tr->offsets_size)) {
3052  		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3053  				proc->pid, thread->pid);
3054  		return_error = BR_FAILED_REPLY;
3055  		return_error_param = -EFAULT;
3056  		return_error_line = __LINE__;
3057  		goto err_copy_data_failed;
3058  	}
3059  	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3060  		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3061  				proc->pid, thread->pid, (u64)tr->offsets_size);
3062  		return_error = BR_FAILED_REPLY;
3063  		return_error_param = -EINVAL;
3064  		return_error_line = __LINE__;
3065  		goto err_bad_offset;
3066  	}
3067  	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3068  		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3069  				  proc->pid, thread->pid,
3070  				  (u64)extra_buffers_size);
3071  		return_error = BR_FAILED_REPLY;
3072  		return_error_param = -EINVAL;
3073  		return_error_line = __LINE__;
3074  		goto err_bad_offset;
3075  	}
3076  	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3077  	buffer_offset = off_start_offset;
3078  	off_end_offset = off_start_offset + tr->offsets_size;
3079  	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3080  	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3081  		ALIGN(secctx_sz, sizeof(u64));
3082  	off_min = 0;
3083  	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3084  	     buffer_offset += sizeof(binder_size_t)) {
3085  		struct binder_object_header *hdr;
3086  		size_t object_size;
3087  		struct binder_object object;
3088  		binder_size_t object_offset;
3089  		binder_size_t copy_size;
3090  
3091  		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3092  						  &object_offset,
3093  						  t->buffer,
3094  						  buffer_offset,
3095  						  sizeof(object_offset))) {
3096  			return_error = BR_FAILED_REPLY;
3097  			return_error_param = -EINVAL;
3098  			return_error_line = __LINE__;
3099  			goto err_bad_offset;
3100  		}
3101  
3102  		/*
3103  		 * Copy the source user buffer up to the next object
3104  		 * that will be processed.
3105  		 */
3106  		copy_size = object_offset - user_offset;
3107  		if (copy_size && (user_offset > object_offset ||
3108  				binder_alloc_copy_user_to_buffer(
3109  					&target_proc->alloc,
3110  					t->buffer, user_offset,
3111  					user_buffer + user_offset,
3112  					copy_size))) {
3113  			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3114  					proc->pid, thread->pid);
3115  			return_error = BR_FAILED_REPLY;
3116  			return_error_param = -EFAULT;
3117  			return_error_line = __LINE__;
3118  			goto err_copy_data_failed;
3119  		}
3120  		object_size = binder_get_object(target_proc, user_buffer,
3121  				t->buffer, object_offset, &object);
3122  		if (object_size == 0 || object_offset < off_min) {
3123  			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3124  					  proc->pid, thread->pid,
3125  					  (u64)object_offset,
3126  					  (u64)off_min,
3127  					  (u64)t->buffer->data_size);
3128  			return_error = BR_FAILED_REPLY;
3129  			return_error_param = -EINVAL;
3130  			return_error_line = __LINE__;
3131  			goto err_bad_offset;
3132  		}
3133  		/*
3134  		 * Set offset to the next buffer fragment to be
3135  		 * copied
3136  		 */
3137  		user_offset = object_offset + object_size;
3138  
3139  		hdr = &object.hdr;
3140  		off_min = object_offset + object_size;
3141  		switch (hdr->type) {
3142  		case BINDER_TYPE_BINDER:
3143  		case BINDER_TYPE_WEAK_BINDER: {
3144  			struct flat_binder_object *fp;
3145  
3146  			fp = to_flat_binder_object(hdr);
3147  			ret = binder_translate_binder(fp, t, thread);
3148  
3149  			if (ret < 0 ||
3150  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3151  							t->buffer,
3152  							object_offset,
3153  							fp, sizeof(*fp))) {
3154  				return_error = BR_FAILED_REPLY;
3155  				return_error_param = ret;
3156  				return_error_line = __LINE__;
3157  				goto err_translate_failed;
3158  			}
3159  		} break;
3160  		case BINDER_TYPE_HANDLE:
3161  		case BINDER_TYPE_WEAK_HANDLE: {
3162  			struct flat_binder_object *fp;
3163  
3164  			fp = to_flat_binder_object(hdr);
3165  			ret = binder_translate_handle(fp, t, thread);
3166  			if (ret < 0 ||
3167  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3168  							t->buffer,
3169  							object_offset,
3170  							fp, sizeof(*fp))) {
3171  				return_error = BR_FAILED_REPLY;
3172  				return_error_param = ret;
3173  				return_error_line = __LINE__;
3174  				goto err_translate_failed;
3175  			}
3176  		} break;
3177  
3178  		case BINDER_TYPE_FD: {
3179  			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3180  			binder_size_t fd_offset = object_offset +
3181  				(uintptr_t)&fp->fd - (uintptr_t)fp;
3182  			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3183  						      thread, in_reply_to);
3184  
3185  			fp->pad_binder = 0;
3186  			if (ret < 0 ||
3187  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3188  							t->buffer,
3189  							object_offset,
3190  							fp, sizeof(*fp))) {
3191  				return_error = BR_FAILED_REPLY;
3192  				return_error_param = ret;
3193  				return_error_line = __LINE__;
3194  				goto err_translate_failed;
3195  			}
3196  		} break;
3197  		case BINDER_TYPE_FDA: {
3198  			struct binder_object ptr_object;
3199  			binder_size_t parent_offset;
3200  			struct binder_object user_object;
3201  			size_t user_parent_size;
3202  			struct binder_fd_array_object *fda =
3203  				to_binder_fd_array_object(hdr);
3204  			size_t num_valid = (buffer_offset - off_start_offset) /
3205  						sizeof(binder_size_t);
3206  			struct binder_buffer_object *parent =
3207  				binder_validate_ptr(target_proc, t->buffer,
3208  						    &ptr_object, fda->parent,
3209  						    off_start_offset,
3210  						    &parent_offset,
3211  						    num_valid);
3212  			if (!parent) {
3213  				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3214  						  proc->pid, thread->pid);
3215  				return_error = BR_FAILED_REPLY;
3216  				return_error_param = -EINVAL;
3217  				return_error_line = __LINE__;
3218  				goto err_bad_parent;
3219  			}
3220  			if (!binder_validate_fixup(target_proc, t->buffer,
3221  						   off_start_offset,
3222  						   parent_offset,
3223  						   fda->parent_offset,
3224  						   last_fixup_obj_off,
3225  						   last_fixup_min_off)) {
3226  				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3227  						  proc->pid, thread->pid);
3228  				return_error = BR_FAILED_REPLY;
3229  				return_error_param = -EINVAL;
3230  				return_error_line = __LINE__;
3231  				goto err_bad_parent;
3232  			}
3233  			/*
3234  			 * We need to read the user version of the parent
3235  			 * object to get the original user offset
3236  			 */
3237  			user_parent_size =
3238  				binder_get_object(proc, user_buffer, t->buffer,
3239  						  parent_offset, &user_object);
3240  			if (user_parent_size != sizeof(user_object.bbo)) {
3241  				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3242  						  proc->pid, thread->pid,
3243  						  user_parent_size,
3244  						  sizeof(user_object.bbo));
3245  				return_error = BR_FAILED_REPLY;
3246  				return_error_param = -EINVAL;
3247  				return_error_line = __LINE__;
3248  				goto err_bad_parent;
3249  			}
3250  			ret = binder_translate_fd_array(&pf_head, fda,
3251  							user_buffer, parent,
3252  							&user_object.bbo, t,
3253  							thread, in_reply_to);
3254  			if (!ret)
3255  				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3256  								  t->buffer,
3257  								  object_offset,
3258  								  fda, sizeof(*fda));
3259  			if (ret) {
3260  				return_error = BR_FAILED_REPLY;
3261  				return_error_param = ret > 0 ? -EINVAL : ret;
3262  				return_error_line = __LINE__;
3263  				goto err_translate_failed;
3264  			}
3265  			last_fixup_obj_off = parent_offset;
3266  			last_fixup_min_off =
3267  				fda->parent_offset + sizeof(u32) * fda->num_fds;
3268  		} break;
3269  		case BINDER_TYPE_PTR: {
3270  			struct binder_buffer_object *bp =
3271  				to_binder_buffer_object(hdr);
3272  			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3273  			size_t num_valid;
3274  
3275  			if (bp->length > buf_left) {
3276  				binder_user_error("%d:%d got transaction with too large buffer\n",
3277  						  proc->pid, thread->pid);
3278  				return_error = BR_FAILED_REPLY;
3279  				return_error_param = -EINVAL;
3280  				return_error_line = __LINE__;
3281  				goto err_bad_offset;
3282  			}
3283  			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3284  				(const void __user *)(uintptr_t)bp->buffer,
3285  				bp->length);
3286  			if (ret) {
3287  				return_error = BR_FAILED_REPLY;
3288  				return_error_param = ret;
3289  				return_error_line = __LINE__;
3290  				goto err_translate_failed;
3291  			}
3292  			/* Fixup buffer pointer to target proc address space */
3293  			bp->buffer = (uintptr_t)
3294  				t->buffer->user_data + sg_buf_offset;
3295  			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3296  
3297  			num_valid = (buffer_offset - off_start_offset) /
3298  					sizeof(binder_size_t);
3299  			ret = binder_fixup_parent(&pf_head, t,
3300  						  thread, bp,
3301  						  off_start_offset,
3302  						  num_valid,
3303  						  last_fixup_obj_off,
3304  						  last_fixup_min_off);
3305  			if (ret < 0 ||
3306  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3307  							t->buffer,
3308  							object_offset,
3309  							bp, sizeof(*bp))) {
3310  				return_error = BR_FAILED_REPLY;
3311  				return_error_param = ret;
3312  				return_error_line = __LINE__;
3313  				goto err_translate_failed;
3314  			}
3315  			last_fixup_obj_off = object_offset;
3316  			last_fixup_min_off = 0;
3317  		} break;
3318  		default:
3319  			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3320  				proc->pid, thread->pid, hdr->type);
3321  			return_error = BR_FAILED_REPLY;
3322  			return_error_param = -EINVAL;
3323  			return_error_line = __LINE__;
3324  			goto err_bad_object_type;
3325  		}
3326  	}
3327  	/* Done processing objects, copy the rest of the buffer */
3328  	if (binder_alloc_copy_user_to_buffer(
3329  				&target_proc->alloc,
3330  				t->buffer, user_offset,
3331  				user_buffer + user_offset,
3332  				tr->data_size - user_offset)) {
3333  		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3334  				proc->pid, thread->pid);
3335  		return_error = BR_FAILED_REPLY;
3336  		return_error_param = -EFAULT;
3337  		return_error_line = __LINE__;
3338  		goto err_copy_data_failed;
3339  	}
3340  
3341  	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3342  					    &sgc_head, &pf_head);
3343  	if (ret) {
3344  		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3345  				  proc->pid, thread->pid);
3346  		return_error = BR_FAILED_REPLY;
3347  		return_error_param = ret;
3348  		return_error_line = __LINE__;
3349  		goto err_copy_data_failed;
3350  	}
3351  	if (t->buffer->oneway_spam_suspect)
3352  		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3353  	else
3354  		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3355  	t->work.type = BINDER_WORK_TRANSACTION;
3356  
3357  	if (reply) {
3358  		binder_enqueue_thread_work(thread, tcomplete);
3359  		binder_inner_proc_lock(target_proc);
3360  		if (target_thread->is_dead) {
3361  			return_error = BR_DEAD_REPLY;
3362  			binder_inner_proc_unlock(target_proc);
3363  			goto err_dead_proc_or_thread;
3364  		}
3365  		BUG_ON(t->buffer->async_transaction != 0);
3366  		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3367  		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3368  		target_proc->outstanding_txns++;
3369  		binder_inner_proc_unlock(target_proc);
3370  		wake_up_interruptible_sync(&target_thread->wait);
3371  		binder_free_transaction(in_reply_to);
3372  	} else if (!(t->flags & TF_ONE_WAY)) {
3373  		BUG_ON(t->buffer->async_transaction != 0);
3374  		binder_inner_proc_lock(proc);
3375  		/*
3376  		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3377  		 * userspace immediately; this allows the target process to
3378  		 * immediately start processing this transaction, reducing
3379  		 * latency. We will then return the TRANSACTION_COMPLETE when
3380  		 * the target replies (or there is an error).
3381  		 */
3382  		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3383  		t->need_reply = 1;
3384  		t->from_parent = thread->transaction_stack;
3385  		thread->transaction_stack = t;
3386  		binder_inner_proc_unlock(proc);
3387  		return_error = binder_proc_transaction(t,
3388  				target_proc, target_thread);
3389  		if (return_error) {
3390  			binder_inner_proc_lock(proc);
3391  			binder_pop_transaction_ilocked(thread, t);
3392  			binder_inner_proc_unlock(proc);
3393  			goto err_dead_proc_or_thread;
3394  		}
3395  	} else {
3396  		BUG_ON(target_node == NULL);
3397  		BUG_ON(t->buffer->async_transaction != 1);
3398  		binder_enqueue_thread_work(thread, tcomplete);
3399  		return_error = binder_proc_transaction(t, target_proc, NULL);
3400  		if (return_error)
3401  			goto err_dead_proc_or_thread;
3402  	}
3403  	if (target_thread)
3404  		binder_thread_dec_tmpref(target_thread);
3405  	binder_proc_dec_tmpref(target_proc);
3406  	if (target_node)
3407  		binder_dec_node_tmpref(target_node);
3408  	/*
3409  	 * write barrier to synchronize with initialization
3410  	 * of log entry
3411  	 */
3412  	smp_wmb();
3413  	WRITE_ONCE(e->debug_id_done, t_debug_id);
3414  	return;
3415  
3416  err_dead_proc_or_thread:
3417  	return_error_line = __LINE__;
3418  	binder_dequeue_work(proc, tcomplete);
3419  err_translate_failed:
3420  err_bad_object_type:
3421  err_bad_offset:
3422  err_bad_parent:
3423  err_copy_data_failed:
3424  	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3425  	binder_free_txn_fixups(t);
3426  	trace_binder_transaction_failed_buffer_release(t->buffer);
3427  	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3428  					  buffer_offset, true);
3429  	if (target_node)
3430  		binder_dec_node_tmpref(target_node);
3431  	target_node = NULL;
3432  	t->buffer->transaction = NULL;
3433  	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3434  err_binder_alloc_buf_failed:
3435  err_bad_extra_size:
3436  	if (secctx)
3437  		security_release_secctx(secctx, secctx_sz);
3438  err_get_secctx_failed:
3439  	kfree(tcomplete);
3440  	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3441  err_alloc_tcomplete_failed:
3442  	if (trace_binder_txn_latency_free_enabled())
3443  		binder_txn_latency_free(t);
3444  	kfree(t);
3445  	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3446  err_alloc_t_failed:
3447  err_bad_todo_list:
3448  err_bad_call_stack:
3449  err_empty_call_stack:
3450  err_dead_binder:
3451  err_invalid_target_handle:
3452  	if (target_thread)
3453  		binder_thread_dec_tmpref(target_thread);
3454  	if (target_proc)
3455  		binder_proc_dec_tmpref(target_proc);
3456  	if (target_node) {
3457  		binder_dec_node(target_node, 1, 0);
3458  		binder_dec_node_tmpref(target_node);
3459  	}
3460  
3461  	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3462  		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3463  		     proc->pid, thread->pid, return_error, return_error_param,
3464  		     (u64)tr->data_size, (u64)tr->offsets_size,
3465  		     return_error_line);
3466  
3467  	{
3468  		struct binder_transaction_log_entry *fe;
3469  
3470  		e->return_error = return_error;
3471  		e->return_error_param = return_error_param;
3472  		e->return_error_line = return_error_line;
3473  		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3474  		*fe = *e;
3475  		/*
3476  		 * write barrier to synchronize with initialization
3477  		 * of log entry
3478  		 */
3479  		smp_wmb();
3480  		WRITE_ONCE(e->debug_id_done, t_debug_id);
3481  		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3482  	}
3483  
3484  	BUG_ON(thread->return_error.cmd != BR_OK);
3485  	if (in_reply_to) {
3486  		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3487  		binder_enqueue_thread_work(thread, &thread->return_error.work);
3488  		binder_send_failed_reply(in_reply_to, return_error);
3489  	} else {
3490  		thread->return_error.cmd = return_error;
3491  		binder_enqueue_thread_work(thread, &thread->return_error.work);
3492  	}
3493  }
3494  
3495  /**
3496   * binder_free_buf() - free the specified buffer
3497   * @proc:	binder proc that owns buffer
3498   * @buffer:	buffer to be freed
3499   * @is_failure:	failed to send transaction
3500   *
3501   * If buffer for an async transaction, enqueue the next async
3502   * transaction from the node.
3503   *
3504   * Cleanup buffer and free it.
3505   */
3506  static void
3507  binder_free_buf(struct binder_proc *proc,
3508  		struct binder_thread *thread,
3509  		struct binder_buffer *buffer, bool is_failure)
3510  {
3511  	binder_inner_proc_lock(proc);
3512  	if (buffer->transaction) {
3513  		buffer->transaction->buffer = NULL;
3514  		buffer->transaction = NULL;
3515  	}
3516  	binder_inner_proc_unlock(proc);
3517  	if (buffer->async_transaction && buffer->target_node) {
3518  		struct binder_node *buf_node;
3519  		struct binder_work *w;
3520  
3521  		buf_node = buffer->target_node;
3522  		binder_node_inner_lock(buf_node);
3523  		BUG_ON(!buf_node->has_async_transaction);
3524  		BUG_ON(buf_node->proc != proc);
3525  		w = binder_dequeue_work_head_ilocked(
3526  				&buf_node->async_todo);
3527  		if (!w) {
3528  			buf_node->has_async_transaction = false;
3529  		} else {
3530  			binder_enqueue_work_ilocked(
3531  					w, &proc->todo);
3532  			binder_wakeup_proc_ilocked(proc);
3533  		}
3534  		binder_node_inner_unlock(buf_node);
3535  	}
3536  	trace_binder_transaction_buffer_release(buffer);
3537  	binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3538  	binder_alloc_free_buf(&proc->alloc, buffer);
3539  }
3540  
3541  static int binder_thread_write(struct binder_proc *proc,
3542  			struct binder_thread *thread,
3543  			binder_uintptr_t binder_buffer, size_t size,
3544  			binder_size_t *consumed)
3545  {
3546  	uint32_t cmd;
3547  	struct binder_context *context = proc->context;
3548  	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3549  	void __user *ptr = buffer + *consumed;
3550  	void __user *end = buffer + size;
3551  
3552  	while (ptr < end && thread->return_error.cmd == BR_OK) {
3553  		int ret;
3554  
3555  		if (get_user(cmd, (uint32_t __user *)ptr))
3556  			return -EFAULT;
3557  		ptr += sizeof(uint32_t);
3558  		trace_binder_command(cmd);
3559  		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3560  			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3561  			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3562  			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3563  		}
3564  		switch (cmd) {
3565  		case BC_INCREFS:
3566  		case BC_ACQUIRE:
3567  		case BC_RELEASE:
3568  		case BC_DECREFS: {
3569  			uint32_t target;
3570  			const char *debug_string;
3571  			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3572  			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3573  			struct binder_ref_data rdata;
3574  
3575  			if (get_user(target, (uint32_t __user *)ptr))
3576  				return -EFAULT;
3577  
3578  			ptr += sizeof(uint32_t);
3579  			ret = -1;
3580  			if (increment && !target) {
3581  				struct binder_node *ctx_mgr_node;
3582  
3583  				mutex_lock(&context->context_mgr_node_lock);
3584  				ctx_mgr_node = context->binder_context_mgr_node;
3585  				if (ctx_mgr_node) {
3586  					if (ctx_mgr_node->proc == proc) {
3587  						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3588  								  proc->pid, thread->pid);
3589  						mutex_unlock(&context->context_mgr_node_lock);
3590  						return -EINVAL;
3591  					}
3592  					ret = binder_inc_ref_for_node(
3593  							proc, ctx_mgr_node,
3594  							strong, NULL, &rdata);
3595  				}
3596  				mutex_unlock(&context->context_mgr_node_lock);
3597  			}
3598  			if (ret)
3599  				ret = binder_update_ref_for_handle(
3600  						proc, target, increment, strong,
3601  						&rdata);
3602  			if (!ret && rdata.desc != target) {
3603  				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3604  					proc->pid, thread->pid,
3605  					target, rdata.desc);
3606  			}
3607  			switch (cmd) {
3608  			case BC_INCREFS:
3609  				debug_string = "IncRefs";
3610  				break;
3611  			case BC_ACQUIRE:
3612  				debug_string = "Acquire";
3613  				break;
3614  			case BC_RELEASE:
3615  				debug_string = "Release";
3616  				break;
3617  			case BC_DECREFS:
3618  			default:
3619  				debug_string = "DecRefs";
3620  				break;
3621  			}
3622  			if (ret) {
3623  				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3624  					proc->pid, thread->pid, debug_string,
3625  					strong, target, ret);
3626  				break;
3627  			}
3628  			binder_debug(BINDER_DEBUG_USER_REFS,
3629  				     "%d:%d %s ref %d desc %d s %d w %d\n",
3630  				     proc->pid, thread->pid, debug_string,
3631  				     rdata.debug_id, rdata.desc, rdata.strong,
3632  				     rdata.weak);
3633  			break;
3634  		}
3635  		case BC_INCREFS_DONE:
3636  		case BC_ACQUIRE_DONE: {
3637  			binder_uintptr_t node_ptr;
3638  			binder_uintptr_t cookie;
3639  			struct binder_node *node;
3640  			bool free_node;
3641  
3642  			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3643  				return -EFAULT;
3644  			ptr += sizeof(binder_uintptr_t);
3645  			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3646  				return -EFAULT;
3647  			ptr += sizeof(binder_uintptr_t);
3648  			node = binder_get_node(proc, node_ptr);
3649  			if (node == NULL) {
3650  				binder_user_error("%d:%d %s u%016llx no match\n",
3651  					proc->pid, thread->pid,
3652  					cmd == BC_INCREFS_DONE ?
3653  					"BC_INCREFS_DONE" :
3654  					"BC_ACQUIRE_DONE",
3655  					(u64)node_ptr);
3656  				break;
3657  			}
3658  			if (cookie != node->cookie) {
3659  				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3660  					proc->pid, thread->pid,
3661  					cmd == BC_INCREFS_DONE ?
3662  					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3663  					(u64)node_ptr, node->debug_id,
3664  					(u64)cookie, (u64)node->cookie);
3665  				binder_put_node(node);
3666  				break;
3667  			}
3668  			binder_node_inner_lock(node);
3669  			if (cmd == BC_ACQUIRE_DONE) {
3670  				if (node->pending_strong_ref == 0) {
3671  					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3672  						proc->pid, thread->pid,
3673  						node->debug_id);
3674  					binder_node_inner_unlock(node);
3675  					binder_put_node(node);
3676  					break;
3677  				}
3678  				node->pending_strong_ref = 0;
3679  			} else {
3680  				if (node->pending_weak_ref == 0) {
3681  					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3682  						proc->pid, thread->pid,
3683  						node->debug_id);
3684  					binder_node_inner_unlock(node);
3685  					binder_put_node(node);
3686  					break;
3687  				}
3688  				node->pending_weak_ref = 0;
3689  			}
3690  			free_node = binder_dec_node_nilocked(node,
3691  					cmd == BC_ACQUIRE_DONE, 0);
3692  			WARN_ON(free_node);
3693  			binder_debug(BINDER_DEBUG_USER_REFS,
3694  				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3695  				     proc->pid, thread->pid,
3696  				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3697  				     node->debug_id, node->local_strong_refs,
3698  				     node->local_weak_refs, node->tmp_refs);
3699  			binder_node_inner_unlock(node);
3700  			binder_put_node(node);
3701  			break;
3702  		}
3703  		case BC_ATTEMPT_ACQUIRE:
3704  			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3705  			return -EINVAL;
3706  		case BC_ACQUIRE_RESULT:
3707  			pr_err("BC_ACQUIRE_RESULT not supported\n");
3708  			return -EINVAL;
3709  
3710  		case BC_FREE_BUFFER: {
3711  			binder_uintptr_t data_ptr;
3712  			struct binder_buffer *buffer;
3713  
3714  			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3715  				return -EFAULT;
3716  			ptr += sizeof(binder_uintptr_t);
3717  
3718  			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3719  							      data_ptr);
3720  			if (IS_ERR_OR_NULL(buffer)) {
3721  				if (PTR_ERR(buffer) == -EPERM) {
3722  					binder_user_error(
3723  						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3724  						proc->pid, thread->pid,
3725  						(u64)data_ptr);
3726  				} else {
3727  					binder_user_error(
3728  						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3729  						proc->pid, thread->pid,
3730  						(u64)data_ptr);
3731  				}
3732  				break;
3733  			}
3734  			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3735  				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3736  				     proc->pid, thread->pid, (u64)data_ptr,
3737  				     buffer->debug_id,
3738  				     buffer->transaction ? "active" : "finished");
3739  			binder_free_buf(proc, thread, buffer, false);
3740  			break;
3741  		}
3742  
3743  		case BC_TRANSACTION_SG:
3744  		case BC_REPLY_SG: {
3745  			struct binder_transaction_data_sg tr;
3746  
3747  			if (copy_from_user(&tr, ptr, sizeof(tr)))
3748  				return -EFAULT;
3749  			ptr += sizeof(tr);
3750  			binder_transaction(proc, thread, &tr.transaction_data,
3751  					   cmd == BC_REPLY_SG, tr.buffers_size);
3752  			break;
3753  		}
3754  		case BC_TRANSACTION:
3755  		case BC_REPLY: {
3756  			struct binder_transaction_data tr;
3757  
3758  			if (copy_from_user(&tr, ptr, sizeof(tr)))
3759  				return -EFAULT;
3760  			ptr += sizeof(tr);
3761  			binder_transaction(proc, thread, &tr,
3762  					   cmd == BC_REPLY, 0);
3763  			break;
3764  		}
3765  
3766  		case BC_REGISTER_LOOPER:
3767  			binder_debug(BINDER_DEBUG_THREADS,
3768  				     "%d:%d BC_REGISTER_LOOPER\n",
3769  				     proc->pid, thread->pid);
3770  			binder_inner_proc_lock(proc);
3771  			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3772  				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3773  				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3774  					proc->pid, thread->pid);
3775  			} else if (proc->requested_threads == 0) {
3776  				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3777  				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3778  					proc->pid, thread->pid);
3779  			} else {
3780  				proc->requested_threads--;
3781  				proc->requested_threads_started++;
3782  			}
3783  			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3784  			binder_inner_proc_unlock(proc);
3785  			break;
3786  		case BC_ENTER_LOOPER:
3787  			binder_debug(BINDER_DEBUG_THREADS,
3788  				     "%d:%d BC_ENTER_LOOPER\n",
3789  				     proc->pid, thread->pid);
3790  			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3791  				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3792  				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3793  					proc->pid, thread->pid);
3794  			}
3795  			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3796  			break;
3797  		case BC_EXIT_LOOPER:
3798  			binder_debug(BINDER_DEBUG_THREADS,
3799  				     "%d:%d BC_EXIT_LOOPER\n",
3800  				     proc->pid, thread->pid);
3801  			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3802  			break;
3803  
3804  		case BC_REQUEST_DEATH_NOTIFICATION:
3805  		case BC_CLEAR_DEATH_NOTIFICATION: {
3806  			uint32_t target;
3807  			binder_uintptr_t cookie;
3808  			struct binder_ref *ref;
3809  			struct binder_ref_death *death = NULL;
3810  
3811  			if (get_user(target, (uint32_t __user *)ptr))
3812  				return -EFAULT;
3813  			ptr += sizeof(uint32_t);
3814  			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3815  				return -EFAULT;
3816  			ptr += sizeof(binder_uintptr_t);
3817  			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3818  				/*
3819  				 * Allocate memory for death notification
3820  				 * before taking lock
3821  				 */
3822  				death = kzalloc(sizeof(*death), GFP_KERNEL);
3823  				if (death == NULL) {
3824  					WARN_ON(thread->return_error.cmd !=
3825  						BR_OK);
3826  					thread->return_error.cmd = BR_ERROR;
3827  					binder_enqueue_thread_work(
3828  						thread,
3829  						&thread->return_error.work);
3830  					binder_debug(
3831  						BINDER_DEBUG_FAILED_TRANSACTION,
3832  						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3833  						proc->pid, thread->pid);
3834  					break;
3835  				}
3836  			}
3837  			binder_proc_lock(proc);
3838  			ref = binder_get_ref_olocked(proc, target, false);
3839  			if (ref == NULL) {
3840  				binder_user_error("%d:%d %s invalid ref %d\n",
3841  					proc->pid, thread->pid,
3842  					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3843  					"BC_REQUEST_DEATH_NOTIFICATION" :
3844  					"BC_CLEAR_DEATH_NOTIFICATION",
3845  					target);
3846  				binder_proc_unlock(proc);
3847  				kfree(death);
3848  				break;
3849  			}
3850  
3851  			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3852  				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3853  				     proc->pid, thread->pid,
3854  				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3855  				     "BC_REQUEST_DEATH_NOTIFICATION" :
3856  				     "BC_CLEAR_DEATH_NOTIFICATION",
3857  				     (u64)cookie, ref->data.debug_id,
3858  				     ref->data.desc, ref->data.strong,
3859  				     ref->data.weak, ref->node->debug_id);
3860  
3861  			binder_node_lock(ref->node);
3862  			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3863  				if (ref->death) {
3864  					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3865  						proc->pid, thread->pid);
3866  					binder_node_unlock(ref->node);
3867  					binder_proc_unlock(proc);
3868  					kfree(death);
3869  					break;
3870  				}
3871  				binder_stats_created(BINDER_STAT_DEATH);
3872  				INIT_LIST_HEAD(&death->work.entry);
3873  				death->cookie = cookie;
3874  				ref->death = death;
3875  				if (ref->node->proc == NULL) {
3876  					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3877  
3878  					binder_inner_proc_lock(proc);
3879  					binder_enqueue_work_ilocked(
3880  						&ref->death->work, &proc->todo);
3881  					binder_wakeup_proc_ilocked(proc);
3882  					binder_inner_proc_unlock(proc);
3883  				}
3884  			} else {
3885  				if (ref->death == NULL) {
3886  					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3887  						proc->pid, thread->pid);
3888  					binder_node_unlock(ref->node);
3889  					binder_proc_unlock(proc);
3890  					break;
3891  				}
3892  				death = ref->death;
3893  				if (death->cookie != cookie) {
3894  					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3895  						proc->pid, thread->pid,
3896  						(u64)death->cookie,
3897  						(u64)cookie);
3898  					binder_node_unlock(ref->node);
3899  					binder_proc_unlock(proc);
3900  					break;
3901  				}
3902  				ref->death = NULL;
3903  				binder_inner_proc_lock(proc);
3904  				if (list_empty(&death->work.entry)) {
3905  					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3906  					if (thread->looper &
3907  					    (BINDER_LOOPER_STATE_REGISTERED |
3908  					     BINDER_LOOPER_STATE_ENTERED))
3909  						binder_enqueue_thread_work_ilocked(
3910  								thread,
3911  								&death->work);
3912  					else {
3913  						binder_enqueue_work_ilocked(
3914  								&death->work,
3915  								&proc->todo);
3916  						binder_wakeup_proc_ilocked(
3917  								proc);
3918  					}
3919  				} else {
3920  					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3921  					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3922  				}
3923  				binder_inner_proc_unlock(proc);
3924  			}
3925  			binder_node_unlock(ref->node);
3926  			binder_proc_unlock(proc);
3927  		} break;
3928  		case BC_DEAD_BINDER_DONE: {
3929  			struct binder_work *w;
3930  			binder_uintptr_t cookie;
3931  			struct binder_ref_death *death = NULL;
3932  
3933  			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3934  				return -EFAULT;
3935  
3936  			ptr += sizeof(cookie);
3937  			binder_inner_proc_lock(proc);
3938  			list_for_each_entry(w, &proc->delivered_death,
3939  					    entry) {
3940  				struct binder_ref_death *tmp_death =
3941  					container_of(w,
3942  						     struct binder_ref_death,
3943  						     work);
3944  
3945  				if (tmp_death->cookie == cookie) {
3946  					death = tmp_death;
3947  					break;
3948  				}
3949  			}
3950  			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3951  				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3952  				     proc->pid, thread->pid, (u64)cookie,
3953  				     death);
3954  			if (death == NULL) {
3955  				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3956  					proc->pid, thread->pid, (u64)cookie);
3957  				binder_inner_proc_unlock(proc);
3958  				break;
3959  			}
3960  			binder_dequeue_work_ilocked(&death->work);
3961  			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3962  				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3963  				if (thread->looper &
3964  					(BINDER_LOOPER_STATE_REGISTERED |
3965  					 BINDER_LOOPER_STATE_ENTERED))
3966  					binder_enqueue_thread_work_ilocked(
3967  						thread, &death->work);
3968  				else {
3969  					binder_enqueue_work_ilocked(
3970  							&death->work,
3971  							&proc->todo);
3972  					binder_wakeup_proc_ilocked(proc);
3973  				}
3974  			}
3975  			binder_inner_proc_unlock(proc);
3976  		} break;
3977  
3978  		default:
3979  			pr_err("%d:%d unknown command %d\n",
3980  			       proc->pid, thread->pid, cmd);
3981  			return -EINVAL;
3982  		}
3983  		*consumed = ptr - buffer;
3984  	}
3985  	return 0;
3986  }
3987  
3988  static void binder_stat_br(struct binder_proc *proc,
3989  			   struct binder_thread *thread, uint32_t cmd)
3990  {
3991  	trace_binder_return(cmd);
3992  	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3993  		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3994  		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3995  		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3996  	}
3997  }
3998  
3999  static int binder_put_node_cmd(struct binder_proc *proc,
4000  			       struct binder_thread *thread,
4001  			       void __user **ptrp,
4002  			       binder_uintptr_t node_ptr,
4003  			       binder_uintptr_t node_cookie,
4004  			       int node_debug_id,
4005  			       uint32_t cmd, const char *cmd_name)
4006  {
4007  	void __user *ptr = *ptrp;
4008  
4009  	if (put_user(cmd, (uint32_t __user *)ptr))
4010  		return -EFAULT;
4011  	ptr += sizeof(uint32_t);
4012  
4013  	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4014  		return -EFAULT;
4015  	ptr += sizeof(binder_uintptr_t);
4016  
4017  	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4018  		return -EFAULT;
4019  	ptr += sizeof(binder_uintptr_t);
4020  
4021  	binder_stat_br(proc, thread, cmd);
4022  	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4023  		     proc->pid, thread->pid, cmd_name, node_debug_id,
4024  		     (u64)node_ptr, (u64)node_cookie);
4025  
4026  	*ptrp = ptr;
4027  	return 0;
4028  }
4029  
4030  static int binder_wait_for_work(struct binder_thread *thread,
4031  				bool do_proc_work)
4032  {
4033  	DEFINE_WAIT(wait);
4034  	struct binder_proc *proc = thread->proc;
4035  	int ret = 0;
4036  
4037  	freezer_do_not_count();
4038  	binder_inner_proc_lock(proc);
4039  	for (;;) {
4040  		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4041  		if (binder_has_work_ilocked(thread, do_proc_work))
4042  			break;
4043  		if (do_proc_work)
4044  			list_add(&thread->waiting_thread_node,
4045  				 &proc->waiting_threads);
4046  		binder_inner_proc_unlock(proc);
4047  		schedule();
4048  		binder_inner_proc_lock(proc);
4049  		list_del_init(&thread->waiting_thread_node);
4050  		if (signal_pending(current)) {
4051  			ret = -EINTR;
4052  			break;
4053  		}
4054  	}
4055  	finish_wait(&thread->wait, &wait);
4056  	binder_inner_proc_unlock(proc);
4057  	freezer_count();
4058  
4059  	return ret;
4060  }
4061  
4062  /**
4063   * binder_apply_fd_fixups() - finish fd translation
4064   * @proc:         binder_proc associated @t->buffer
4065   * @t:	binder transaction with list of fd fixups
4066   *
4067   * Now that we are in the context of the transaction target
4068   * process, we can allocate and install fds. Process the
4069   * list of fds to translate and fixup the buffer with the
4070   * new fds.
4071   *
4072   * If we fail to allocate an fd, then free the resources by
4073   * fput'ing files that have not been processed and ksys_close'ing
4074   * any fds that have already been allocated.
4075   */
4076  static int binder_apply_fd_fixups(struct binder_proc *proc,
4077  				  struct binder_transaction *t)
4078  {
4079  	struct binder_txn_fd_fixup *fixup, *tmp;
4080  	int ret = 0;
4081  
4082  	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4083  		int fd = get_unused_fd_flags(O_CLOEXEC);
4084  
4085  		if (fd < 0) {
4086  			binder_debug(BINDER_DEBUG_TRANSACTION,
4087  				     "failed fd fixup txn %d fd %d\n",
4088  				     t->debug_id, fd);
4089  			ret = -ENOMEM;
4090  			break;
4091  		}
4092  		binder_debug(BINDER_DEBUG_TRANSACTION,
4093  			     "fd fixup txn %d fd %d\n",
4094  			     t->debug_id, fd);
4095  		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4096  		fd_install(fd, fixup->file);
4097  		fixup->file = NULL;
4098  		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4099  						fixup->offset, &fd,
4100  						sizeof(u32))) {
4101  			ret = -EINVAL;
4102  			break;
4103  		}
4104  	}
4105  	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4106  		if (fixup->file) {
4107  			fput(fixup->file);
4108  		} else if (ret) {
4109  			u32 fd;
4110  			int err;
4111  
4112  			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4113  							    t->buffer,
4114  							    fixup->offset,
4115  							    sizeof(fd));
4116  			WARN_ON(err);
4117  			if (!err)
4118  				binder_deferred_fd_close(fd);
4119  		}
4120  		list_del(&fixup->fixup_entry);
4121  		kfree(fixup);
4122  	}
4123  
4124  	return ret;
4125  }
4126  
4127  static int binder_thread_read(struct binder_proc *proc,
4128  			      struct binder_thread *thread,
4129  			      binder_uintptr_t binder_buffer, size_t size,
4130  			      binder_size_t *consumed, int non_block)
4131  {
4132  	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4133  	void __user *ptr = buffer + *consumed;
4134  	void __user *end = buffer + size;
4135  
4136  	int ret = 0;
4137  	int wait_for_proc_work;
4138  
4139  	if (*consumed == 0) {
4140  		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4141  			return -EFAULT;
4142  		ptr += sizeof(uint32_t);
4143  	}
4144  
4145  retry:
4146  	binder_inner_proc_lock(proc);
4147  	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4148  	binder_inner_proc_unlock(proc);
4149  
4150  	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4151  
4152  	trace_binder_wait_for_work(wait_for_proc_work,
4153  				   !!thread->transaction_stack,
4154  				   !binder_worklist_empty(proc, &thread->todo));
4155  	if (wait_for_proc_work) {
4156  		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4157  					BINDER_LOOPER_STATE_ENTERED))) {
4158  			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4159  				proc->pid, thread->pid, thread->looper);
4160  			wait_event_interruptible(binder_user_error_wait,
4161  						 binder_stop_on_user_error < 2);
4162  		}
4163  		binder_set_nice(proc->default_priority);
4164  	}
4165  
4166  	if (non_block) {
4167  		if (!binder_has_work(thread, wait_for_proc_work))
4168  			ret = -EAGAIN;
4169  	} else {
4170  		ret = binder_wait_for_work(thread, wait_for_proc_work);
4171  	}
4172  
4173  	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4174  
4175  	if (ret)
4176  		return ret;
4177  
4178  	while (1) {
4179  		uint32_t cmd;
4180  		struct binder_transaction_data_secctx tr;
4181  		struct binder_transaction_data *trd = &tr.transaction_data;
4182  		struct binder_work *w = NULL;
4183  		struct list_head *list = NULL;
4184  		struct binder_transaction *t = NULL;
4185  		struct binder_thread *t_from;
4186  		size_t trsize = sizeof(*trd);
4187  
4188  		binder_inner_proc_lock(proc);
4189  		if (!binder_worklist_empty_ilocked(&thread->todo))
4190  			list = &thread->todo;
4191  		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4192  			   wait_for_proc_work)
4193  			list = &proc->todo;
4194  		else {
4195  			binder_inner_proc_unlock(proc);
4196  
4197  			/* no data added */
4198  			if (ptr - buffer == 4 && !thread->looper_need_return)
4199  				goto retry;
4200  			break;
4201  		}
4202  
4203  		if (end - ptr < sizeof(tr) + 4) {
4204  			binder_inner_proc_unlock(proc);
4205  			break;
4206  		}
4207  		w = binder_dequeue_work_head_ilocked(list);
4208  		if (binder_worklist_empty_ilocked(&thread->todo))
4209  			thread->process_todo = false;
4210  
4211  		switch (w->type) {
4212  		case BINDER_WORK_TRANSACTION: {
4213  			binder_inner_proc_unlock(proc);
4214  			t = container_of(w, struct binder_transaction, work);
4215  		} break;
4216  		case BINDER_WORK_RETURN_ERROR: {
4217  			struct binder_error *e = container_of(
4218  					w, struct binder_error, work);
4219  
4220  			WARN_ON(e->cmd == BR_OK);
4221  			binder_inner_proc_unlock(proc);
4222  			if (put_user(e->cmd, (uint32_t __user *)ptr))
4223  				return -EFAULT;
4224  			cmd = e->cmd;
4225  			e->cmd = BR_OK;
4226  			ptr += sizeof(uint32_t);
4227  
4228  			binder_stat_br(proc, thread, cmd);
4229  		} break;
4230  		case BINDER_WORK_TRANSACTION_COMPLETE:
4231  		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4232  			if (proc->oneway_spam_detection_enabled &&
4233  				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4234  				cmd = BR_ONEWAY_SPAM_SUSPECT;
4235  			else
4236  				cmd = BR_TRANSACTION_COMPLETE;
4237  			binder_inner_proc_unlock(proc);
4238  			kfree(w);
4239  			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4240  			if (put_user(cmd, (uint32_t __user *)ptr))
4241  				return -EFAULT;
4242  			ptr += sizeof(uint32_t);
4243  
4244  			binder_stat_br(proc, thread, cmd);
4245  			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4246  				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4247  				     proc->pid, thread->pid);
4248  		} break;
4249  		case BINDER_WORK_NODE: {
4250  			struct binder_node *node = container_of(w, struct binder_node, work);
4251  			int strong, weak;
4252  			binder_uintptr_t node_ptr = node->ptr;
4253  			binder_uintptr_t node_cookie = node->cookie;
4254  			int node_debug_id = node->debug_id;
4255  			int has_weak_ref;
4256  			int has_strong_ref;
4257  			void __user *orig_ptr = ptr;
4258  
4259  			BUG_ON(proc != node->proc);
4260  			strong = node->internal_strong_refs ||
4261  					node->local_strong_refs;
4262  			weak = !hlist_empty(&node->refs) ||
4263  					node->local_weak_refs ||
4264  					node->tmp_refs || strong;
4265  			has_strong_ref = node->has_strong_ref;
4266  			has_weak_ref = node->has_weak_ref;
4267  
4268  			if (weak && !has_weak_ref) {
4269  				node->has_weak_ref = 1;
4270  				node->pending_weak_ref = 1;
4271  				node->local_weak_refs++;
4272  			}
4273  			if (strong && !has_strong_ref) {
4274  				node->has_strong_ref = 1;
4275  				node->pending_strong_ref = 1;
4276  				node->local_strong_refs++;
4277  			}
4278  			if (!strong && has_strong_ref)
4279  				node->has_strong_ref = 0;
4280  			if (!weak && has_weak_ref)
4281  				node->has_weak_ref = 0;
4282  			if (!weak && !strong) {
4283  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4284  					     "%d:%d node %d u%016llx c%016llx deleted\n",
4285  					     proc->pid, thread->pid,
4286  					     node_debug_id,
4287  					     (u64)node_ptr,
4288  					     (u64)node_cookie);
4289  				rb_erase(&node->rb_node, &proc->nodes);
4290  				binder_inner_proc_unlock(proc);
4291  				binder_node_lock(node);
4292  				/*
4293  				 * Acquire the node lock before freeing the
4294  				 * node to serialize with other threads that
4295  				 * may have been holding the node lock while
4296  				 * decrementing this node (avoids race where
4297  				 * this thread frees while the other thread
4298  				 * is unlocking the node after the final
4299  				 * decrement)
4300  				 */
4301  				binder_node_unlock(node);
4302  				binder_free_node(node);
4303  			} else
4304  				binder_inner_proc_unlock(proc);
4305  
4306  			if (weak && !has_weak_ref)
4307  				ret = binder_put_node_cmd(
4308  						proc, thread, &ptr, node_ptr,
4309  						node_cookie, node_debug_id,
4310  						BR_INCREFS, "BR_INCREFS");
4311  			if (!ret && strong && !has_strong_ref)
4312  				ret = binder_put_node_cmd(
4313  						proc, thread, &ptr, node_ptr,
4314  						node_cookie, node_debug_id,
4315  						BR_ACQUIRE, "BR_ACQUIRE");
4316  			if (!ret && !strong && has_strong_ref)
4317  				ret = binder_put_node_cmd(
4318  						proc, thread, &ptr, node_ptr,
4319  						node_cookie, node_debug_id,
4320  						BR_RELEASE, "BR_RELEASE");
4321  			if (!ret && !weak && has_weak_ref)
4322  				ret = binder_put_node_cmd(
4323  						proc, thread, &ptr, node_ptr,
4324  						node_cookie, node_debug_id,
4325  						BR_DECREFS, "BR_DECREFS");
4326  			if (orig_ptr == ptr)
4327  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4328  					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4329  					     proc->pid, thread->pid,
4330  					     node_debug_id,
4331  					     (u64)node_ptr,
4332  					     (u64)node_cookie);
4333  			if (ret)
4334  				return ret;
4335  		} break;
4336  		case BINDER_WORK_DEAD_BINDER:
4337  		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4338  		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4339  			struct binder_ref_death *death;
4340  			uint32_t cmd;
4341  			binder_uintptr_t cookie;
4342  
4343  			death = container_of(w, struct binder_ref_death, work);
4344  			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4345  				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4346  			else
4347  				cmd = BR_DEAD_BINDER;
4348  			cookie = death->cookie;
4349  
4350  			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4351  				     "%d:%d %s %016llx\n",
4352  				      proc->pid, thread->pid,
4353  				      cmd == BR_DEAD_BINDER ?
4354  				      "BR_DEAD_BINDER" :
4355  				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4356  				      (u64)cookie);
4357  			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4358  				binder_inner_proc_unlock(proc);
4359  				kfree(death);
4360  				binder_stats_deleted(BINDER_STAT_DEATH);
4361  			} else {
4362  				binder_enqueue_work_ilocked(
4363  						w, &proc->delivered_death);
4364  				binder_inner_proc_unlock(proc);
4365  			}
4366  			if (put_user(cmd, (uint32_t __user *)ptr))
4367  				return -EFAULT;
4368  			ptr += sizeof(uint32_t);
4369  			if (put_user(cookie,
4370  				     (binder_uintptr_t __user *)ptr))
4371  				return -EFAULT;
4372  			ptr += sizeof(binder_uintptr_t);
4373  			binder_stat_br(proc, thread, cmd);
4374  			if (cmd == BR_DEAD_BINDER)
4375  				goto done; /* DEAD_BINDER notifications can cause transactions */
4376  		} break;
4377  		default:
4378  			binder_inner_proc_unlock(proc);
4379  			pr_err("%d:%d: bad work type %d\n",
4380  			       proc->pid, thread->pid, w->type);
4381  			break;
4382  		}
4383  
4384  		if (!t)
4385  			continue;
4386  
4387  		BUG_ON(t->buffer == NULL);
4388  		if (t->buffer->target_node) {
4389  			struct binder_node *target_node = t->buffer->target_node;
4390  
4391  			trd->target.ptr = target_node->ptr;
4392  			trd->cookie =  target_node->cookie;
4393  			t->saved_priority = task_nice(current);
4394  			if (t->priority < target_node->min_priority &&
4395  			    !(t->flags & TF_ONE_WAY))
4396  				binder_set_nice(t->priority);
4397  			else if (!(t->flags & TF_ONE_WAY) ||
4398  				 t->saved_priority > target_node->min_priority)
4399  				binder_set_nice(target_node->min_priority);
4400  			cmd = BR_TRANSACTION;
4401  		} else {
4402  			trd->target.ptr = 0;
4403  			trd->cookie = 0;
4404  			cmd = BR_REPLY;
4405  		}
4406  		trd->code = t->code;
4407  		trd->flags = t->flags;
4408  		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4409  
4410  		t_from = binder_get_txn_from(t);
4411  		if (t_from) {
4412  			struct task_struct *sender = t_from->proc->tsk;
4413  
4414  			trd->sender_pid =
4415  				task_tgid_nr_ns(sender,
4416  						task_active_pid_ns(current));
4417  		} else {
4418  			trd->sender_pid = 0;
4419  		}
4420  
4421  		ret = binder_apply_fd_fixups(proc, t);
4422  		if (ret) {
4423  			struct binder_buffer *buffer = t->buffer;
4424  			bool oneway = !!(t->flags & TF_ONE_WAY);
4425  			int tid = t->debug_id;
4426  
4427  			if (t_from)
4428  				binder_thread_dec_tmpref(t_from);
4429  			buffer->transaction = NULL;
4430  			binder_cleanup_transaction(t, "fd fixups failed",
4431  						   BR_FAILED_REPLY);
4432  			binder_free_buf(proc, thread, buffer, true);
4433  			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4434  				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4435  				     proc->pid, thread->pid,
4436  				     oneway ? "async " :
4437  					(cmd == BR_REPLY ? "reply " : ""),
4438  				     tid, BR_FAILED_REPLY, ret, __LINE__);
4439  			if (cmd == BR_REPLY) {
4440  				cmd = BR_FAILED_REPLY;
4441  				if (put_user(cmd, (uint32_t __user *)ptr))
4442  					return -EFAULT;
4443  				ptr += sizeof(uint32_t);
4444  				binder_stat_br(proc, thread, cmd);
4445  				break;
4446  			}
4447  			continue;
4448  		}
4449  		trd->data_size = t->buffer->data_size;
4450  		trd->offsets_size = t->buffer->offsets_size;
4451  		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4452  		trd->data.ptr.offsets = trd->data.ptr.buffer +
4453  					ALIGN(t->buffer->data_size,
4454  					    sizeof(void *));
4455  
4456  		tr.secctx = t->security_ctx;
4457  		if (t->security_ctx) {
4458  			cmd = BR_TRANSACTION_SEC_CTX;
4459  			trsize = sizeof(tr);
4460  		}
4461  		if (put_user(cmd, (uint32_t __user *)ptr)) {
4462  			if (t_from)
4463  				binder_thread_dec_tmpref(t_from);
4464  
4465  			binder_cleanup_transaction(t, "put_user failed",
4466  						   BR_FAILED_REPLY);
4467  
4468  			return -EFAULT;
4469  		}
4470  		ptr += sizeof(uint32_t);
4471  		if (copy_to_user(ptr, &tr, trsize)) {
4472  			if (t_from)
4473  				binder_thread_dec_tmpref(t_from);
4474  
4475  			binder_cleanup_transaction(t, "copy_to_user failed",
4476  						   BR_FAILED_REPLY);
4477  
4478  			return -EFAULT;
4479  		}
4480  		ptr += trsize;
4481  
4482  		trace_binder_transaction_received(t);
4483  		binder_stat_br(proc, thread, cmd);
4484  		binder_debug(BINDER_DEBUG_TRANSACTION,
4485  			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4486  			     proc->pid, thread->pid,
4487  			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4488  				(cmd == BR_TRANSACTION_SEC_CTX) ?
4489  				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4490  			     t->debug_id, t_from ? t_from->proc->pid : 0,
4491  			     t_from ? t_from->pid : 0, cmd,
4492  			     t->buffer->data_size, t->buffer->offsets_size,
4493  			     (u64)trd->data.ptr.buffer,
4494  			     (u64)trd->data.ptr.offsets);
4495  
4496  		if (t_from)
4497  			binder_thread_dec_tmpref(t_from);
4498  		t->buffer->allow_user_free = 1;
4499  		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4500  			binder_inner_proc_lock(thread->proc);
4501  			t->to_parent = thread->transaction_stack;
4502  			t->to_thread = thread;
4503  			thread->transaction_stack = t;
4504  			binder_inner_proc_unlock(thread->proc);
4505  		} else {
4506  			binder_free_transaction(t);
4507  		}
4508  		break;
4509  	}
4510  
4511  done:
4512  
4513  	*consumed = ptr - buffer;
4514  	binder_inner_proc_lock(proc);
4515  	if (proc->requested_threads == 0 &&
4516  	    list_empty(&thread->proc->waiting_threads) &&
4517  	    proc->requested_threads_started < proc->max_threads &&
4518  	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4519  	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4520  	     /*spawn a new thread if we leave this out */) {
4521  		proc->requested_threads++;
4522  		binder_inner_proc_unlock(proc);
4523  		binder_debug(BINDER_DEBUG_THREADS,
4524  			     "%d:%d BR_SPAWN_LOOPER\n",
4525  			     proc->pid, thread->pid);
4526  		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4527  			return -EFAULT;
4528  		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4529  	} else
4530  		binder_inner_proc_unlock(proc);
4531  	return 0;
4532  }
4533  
4534  static void binder_release_work(struct binder_proc *proc,
4535  				struct list_head *list)
4536  {
4537  	struct binder_work *w;
4538  	enum binder_work_type wtype;
4539  
4540  	while (1) {
4541  		binder_inner_proc_lock(proc);
4542  		w = binder_dequeue_work_head_ilocked(list);
4543  		wtype = w ? w->type : 0;
4544  		binder_inner_proc_unlock(proc);
4545  		if (!w)
4546  			return;
4547  
4548  		switch (wtype) {
4549  		case BINDER_WORK_TRANSACTION: {
4550  			struct binder_transaction *t;
4551  
4552  			t = container_of(w, struct binder_transaction, work);
4553  
4554  			binder_cleanup_transaction(t, "process died.",
4555  						   BR_DEAD_REPLY);
4556  		} break;
4557  		case BINDER_WORK_RETURN_ERROR: {
4558  			struct binder_error *e = container_of(
4559  					w, struct binder_error, work);
4560  
4561  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4562  				"undelivered TRANSACTION_ERROR: %u\n",
4563  				e->cmd);
4564  		} break;
4565  		case BINDER_WORK_TRANSACTION_COMPLETE: {
4566  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4567  				"undelivered TRANSACTION_COMPLETE\n");
4568  			kfree(w);
4569  			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4570  		} break;
4571  		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4572  		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4573  			struct binder_ref_death *death;
4574  
4575  			death = container_of(w, struct binder_ref_death, work);
4576  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4577  				"undelivered death notification, %016llx\n",
4578  				(u64)death->cookie);
4579  			kfree(death);
4580  			binder_stats_deleted(BINDER_STAT_DEATH);
4581  		} break;
4582  		case BINDER_WORK_NODE:
4583  			break;
4584  		default:
4585  			pr_err("unexpected work type, %d, not freed\n",
4586  			       wtype);
4587  			break;
4588  		}
4589  	}
4590  
4591  }
4592  
4593  static struct binder_thread *binder_get_thread_ilocked(
4594  		struct binder_proc *proc, struct binder_thread *new_thread)
4595  {
4596  	struct binder_thread *thread = NULL;
4597  	struct rb_node *parent = NULL;
4598  	struct rb_node **p = &proc->threads.rb_node;
4599  
4600  	while (*p) {
4601  		parent = *p;
4602  		thread = rb_entry(parent, struct binder_thread, rb_node);
4603  
4604  		if (current->pid < thread->pid)
4605  			p = &(*p)->rb_left;
4606  		else if (current->pid > thread->pid)
4607  			p = &(*p)->rb_right;
4608  		else
4609  			return thread;
4610  	}
4611  	if (!new_thread)
4612  		return NULL;
4613  	thread = new_thread;
4614  	binder_stats_created(BINDER_STAT_THREAD);
4615  	thread->proc = proc;
4616  	thread->pid = current->pid;
4617  	atomic_set(&thread->tmp_ref, 0);
4618  	init_waitqueue_head(&thread->wait);
4619  	INIT_LIST_HEAD(&thread->todo);
4620  	rb_link_node(&thread->rb_node, parent, p);
4621  	rb_insert_color(&thread->rb_node, &proc->threads);
4622  	thread->looper_need_return = true;
4623  	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4624  	thread->return_error.cmd = BR_OK;
4625  	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4626  	thread->reply_error.cmd = BR_OK;
4627  	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4628  	return thread;
4629  }
4630  
4631  static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4632  {
4633  	struct binder_thread *thread;
4634  	struct binder_thread *new_thread;
4635  
4636  	binder_inner_proc_lock(proc);
4637  	thread = binder_get_thread_ilocked(proc, NULL);
4638  	binder_inner_proc_unlock(proc);
4639  	if (!thread) {
4640  		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4641  		if (new_thread == NULL)
4642  			return NULL;
4643  		binder_inner_proc_lock(proc);
4644  		thread = binder_get_thread_ilocked(proc, new_thread);
4645  		binder_inner_proc_unlock(proc);
4646  		if (thread != new_thread)
4647  			kfree(new_thread);
4648  	}
4649  	return thread;
4650  }
4651  
4652  static void binder_free_proc(struct binder_proc *proc)
4653  {
4654  	struct binder_device *device;
4655  
4656  	BUG_ON(!list_empty(&proc->todo));
4657  	BUG_ON(!list_empty(&proc->delivered_death));
4658  	if (proc->outstanding_txns)
4659  		pr_warn("%s: Unexpected outstanding_txns %d\n",
4660  			__func__, proc->outstanding_txns);
4661  	device = container_of(proc->context, struct binder_device, context);
4662  	if (refcount_dec_and_test(&device->ref)) {
4663  		kfree(proc->context->name);
4664  		kfree(device);
4665  	}
4666  	binder_alloc_deferred_release(&proc->alloc);
4667  	put_task_struct(proc->tsk);
4668  	put_cred(proc->cred);
4669  	binder_stats_deleted(BINDER_STAT_PROC);
4670  	kfree(proc);
4671  }
4672  
4673  static void binder_free_thread(struct binder_thread *thread)
4674  {
4675  	BUG_ON(!list_empty(&thread->todo));
4676  	binder_stats_deleted(BINDER_STAT_THREAD);
4677  	binder_proc_dec_tmpref(thread->proc);
4678  	kfree(thread);
4679  }
4680  
4681  static int binder_thread_release(struct binder_proc *proc,
4682  				 struct binder_thread *thread)
4683  {
4684  	struct binder_transaction *t;
4685  	struct binder_transaction *send_reply = NULL;
4686  	int active_transactions = 0;
4687  	struct binder_transaction *last_t = NULL;
4688  
4689  	binder_inner_proc_lock(thread->proc);
4690  	/*
4691  	 * take a ref on the proc so it survives
4692  	 * after we remove this thread from proc->threads.
4693  	 * The corresponding dec is when we actually
4694  	 * free the thread in binder_free_thread()
4695  	 */
4696  	proc->tmp_ref++;
4697  	/*
4698  	 * take a ref on this thread to ensure it
4699  	 * survives while we are releasing it
4700  	 */
4701  	atomic_inc(&thread->tmp_ref);
4702  	rb_erase(&thread->rb_node, &proc->threads);
4703  	t = thread->transaction_stack;
4704  	if (t) {
4705  		spin_lock(&t->lock);
4706  		if (t->to_thread == thread)
4707  			send_reply = t;
4708  	} else {
4709  		__acquire(&t->lock);
4710  	}
4711  	thread->is_dead = true;
4712  
4713  	while (t) {
4714  		last_t = t;
4715  		active_transactions++;
4716  		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4717  			     "release %d:%d transaction %d %s, still active\n",
4718  			      proc->pid, thread->pid,
4719  			     t->debug_id,
4720  			     (t->to_thread == thread) ? "in" : "out");
4721  
4722  		if (t->to_thread == thread) {
4723  			thread->proc->outstanding_txns--;
4724  			t->to_proc = NULL;
4725  			t->to_thread = NULL;
4726  			if (t->buffer) {
4727  				t->buffer->transaction = NULL;
4728  				t->buffer = NULL;
4729  			}
4730  			t = t->to_parent;
4731  		} else if (t->from == thread) {
4732  			t->from = NULL;
4733  			t = t->from_parent;
4734  		} else
4735  			BUG();
4736  		spin_unlock(&last_t->lock);
4737  		if (t)
4738  			spin_lock(&t->lock);
4739  		else
4740  			__acquire(&t->lock);
4741  	}
4742  	/* annotation for sparse, lock not acquired in last iteration above */
4743  	__release(&t->lock);
4744  
4745  	/*
4746  	 * If this thread used poll, make sure we remove the waitqueue from any
4747  	 * poll data structures holding it.
4748  	 */
4749  	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4750  		wake_up_pollfree(&thread->wait);
4751  
4752  	binder_inner_proc_unlock(thread->proc);
4753  
4754  	/*
4755  	 * This is needed to avoid races between wake_up_pollfree() above and
4756  	 * someone else removing the last entry from the queue for other reasons
4757  	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4758  	 * descriptor being closed).  Such other users hold an RCU read lock, so
4759  	 * we can be sure they're done after we call synchronize_rcu().
4760  	 */
4761  	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4762  		synchronize_rcu();
4763  
4764  	if (send_reply)
4765  		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4766  	binder_release_work(proc, &thread->todo);
4767  	binder_thread_dec_tmpref(thread);
4768  	return active_transactions;
4769  }
4770  
4771  static __poll_t binder_poll(struct file *filp,
4772  				struct poll_table_struct *wait)
4773  {
4774  	struct binder_proc *proc = filp->private_data;
4775  	struct binder_thread *thread = NULL;
4776  	bool wait_for_proc_work;
4777  
4778  	thread = binder_get_thread(proc);
4779  	if (!thread)
4780  		return POLLERR;
4781  
4782  	binder_inner_proc_lock(thread->proc);
4783  	thread->looper |= BINDER_LOOPER_STATE_POLL;
4784  	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4785  
4786  	binder_inner_proc_unlock(thread->proc);
4787  
4788  	poll_wait(filp, &thread->wait, wait);
4789  
4790  	if (binder_has_work(thread, wait_for_proc_work))
4791  		return EPOLLIN;
4792  
4793  	return 0;
4794  }
4795  
4796  static int binder_ioctl_write_read(struct file *filp,
4797  				unsigned int cmd, unsigned long arg,
4798  				struct binder_thread *thread)
4799  {
4800  	int ret = 0;
4801  	struct binder_proc *proc = filp->private_data;
4802  	unsigned int size = _IOC_SIZE(cmd);
4803  	void __user *ubuf = (void __user *)arg;
4804  	struct binder_write_read bwr;
4805  
4806  	if (size != sizeof(struct binder_write_read)) {
4807  		ret = -EINVAL;
4808  		goto out;
4809  	}
4810  	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4811  		ret = -EFAULT;
4812  		goto out;
4813  	}
4814  	binder_debug(BINDER_DEBUG_READ_WRITE,
4815  		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4816  		     proc->pid, thread->pid,
4817  		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4818  		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4819  
4820  	if (bwr.write_size > 0) {
4821  		ret = binder_thread_write(proc, thread,
4822  					  bwr.write_buffer,
4823  					  bwr.write_size,
4824  					  &bwr.write_consumed);
4825  		trace_binder_write_done(ret);
4826  		if (ret < 0) {
4827  			bwr.read_consumed = 0;
4828  			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4829  				ret = -EFAULT;
4830  			goto out;
4831  		}
4832  	}
4833  	if (bwr.read_size > 0) {
4834  		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4835  					 bwr.read_size,
4836  					 &bwr.read_consumed,
4837  					 filp->f_flags & O_NONBLOCK);
4838  		trace_binder_read_done(ret);
4839  		binder_inner_proc_lock(proc);
4840  		if (!binder_worklist_empty_ilocked(&proc->todo))
4841  			binder_wakeup_proc_ilocked(proc);
4842  		binder_inner_proc_unlock(proc);
4843  		if (ret < 0) {
4844  			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4845  				ret = -EFAULT;
4846  			goto out;
4847  		}
4848  	}
4849  	binder_debug(BINDER_DEBUG_READ_WRITE,
4850  		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4851  		     proc->pid, thread->pid,
4852  		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4853  		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4854  	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4855  		ret = -EFAULT;
4856  		goto out;
4857  	}
4858  out:
4859  	return ret;
4860  }
4861  
4862  static int binder_ioctl_set_ctx_mgr(struct file *filp,
4863  				    struct flat_binder_object *fbo)
4864  {
4865  	int ret = 0;
4866  	struct binder_proc *proc = filp->private_data;
4867  	struct binder_context *context = proc->context;
4868  	struct binder_node *new_node;
4869  	kuid_t curr_euid = current_euid();
4870  
4871  	mutex_lock(&context->context_mgr_node_lock);
4872  	if (context->binder_context_mgr_node) {
4873  		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4874  		ret = -EBUSY;
4875  		goto out;
4876  	}
4877  	ret = security_binder_set_context_mgr(proc->cred);
4878  	if (ret < 0)
4879  		goto out;
4880  	if (uid_valid(context->binder_context_mgr_uid)) {
4881  		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4882  			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4883  			       from_kuid(&init_user_ns, curr_euid),
4884  			       from_kuid(&init_user_ns,
4885  					 context->binder_context_mgr_uid));
4886  			ret = -EPERM;
4887  			goto out;
4888  		}
4889  	} else {
4890  		context->binder_context_mgr_uid = curr_euid;
4891  	}
4892  	new_node = binder_new_node(proc, fbo);
4893  	if (!new_node) {
4894  		ret = -ENOMEM;
4895  		goto out;
4896  	}
4897  	binder_node_lock(new_node);
4898  	new_node->local_weak_refs++;
4899  	new_node->local_strong_refs++;
4900  	new_node->has_strong_ref = 1;
4901  	new_node->has_weak_ref = 1;
4902  	context->binder_context_mgr_node = new_node;
4903  	binder_node_unlock(new_node);
4904  	binder_put_node(new_node);
4905  out:
4906  	mutex_unlock(&context->context_mgr_node_lock);
4907  	return ret;
4908  }
4909  
4910  static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4911  		struct binder_node_info_for_ref *info)
4912  {
4913  	struct binder_node *node;
4914  	struct binder_context *context = proc->context;
4915  	__u32 handle = info->handle;
4916  
4917  	if (info->strong_count || info->weak_count || info->reserved1 ||
4918  	    info->reserved2 || info->reserved3) {
4919  		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4920  				  proc->pid);
4921  		return -EINVAL;
4922  	}
4923  
4924  	/* This ioctl may only be used by the context manager */
4925  	mutex_lock(&context->context_mgr_node_lock);
4926  	if (!context->binder_context_mgr_node ||
4927  		context->binder_context_mgr_node->proc != proc) {
4928  		mutex_unlock(&context->context_mgr_node_lock);
4929  		return -EPERM;
4930  	}
4931  	mutex_unlock(&context->context_mgr_node_lock);
4932  
4933  	node = binder_get_node_from_ref(proc, handle, true, NULL);
4934  	if (!node)
4935  		return -EINVAL;
4936  
4937  	info->strong_count = node->local_strong_refs +
4938  		node->internal_strong_refs;
4939  	info->weak_count = node->local_weak_refs;
4940  
4941  	binder_put_node(node);
4942  
4943  	return 0;
4944  }
4945  
4946  static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4947  				struct binder_node_debug_info *info)
4948  {
4949  	struct rb_node *n;
4950  	binder_uintptr_t ptr = info->ptr;
4951  
4952  	memset(info, 0, sizeof(*info));
4953  
4954  	binder_inner_proc_lock(proc);
4955  	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4956  		struct binder_node *node = rb_entry(n, struct binder_node,
4957  						    rb_node);
4958  		if (node->ptr > ptr) {
4959  			info->ptr = node->ptr;
4960  			info->cookie = node->cookie;
4961  			info->has_strong_ref = node->has_strong_ref;
4962  			info->has_weak_ref = node->has_weak_ref;
4963  			break;
4964  		}
4965  	}
4966  	binder_inner_proc_unlock(proc);
4967  
4968  	return 0;
4969  }
4970  
4971  static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4972  {
4973  	struct rb_node *n;
4974  	struct binder_thread *thread;
4975  
4976  	if (proc->outstanding_txns > 0)
4977  		return true;
4978  
4979  	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4980  		thread = rb_entry(n, struct binder_thread, rb_node);
4981  		if (thread->transaction_stack)
4982  			return true;
4983  	}
4984  	return false;
4985  }
4986  
4987  static int binder_ioctl_freeze(struct binder_freeze_info *info,
4988  			       struct binder_proc *target_proc)
4989  {
4990  	int ret = 0;
4991  
4992  	if (!info->enable) {
4993  		binder_inner_proc_lock(target_proc);
4994  		target_proc->sync_recv = false;
4995  		target_proc->async_recv = false;
4996  		target_proc->is_frozen = false;
4997  		binder_inner_proc_unlock(target_proc);
4998  		return 0;
4999  	}
5000  
5001  	/*
5002  	 * Freezing the target. Prevent new transactions by
5003  	 * setting frozen state. If timeout specified, wait
5004  	 * for transactions to drain.
5005  	 */
5006  	binder_inner_proc_lock(target_proc);
5007  	target_proc->sync_recv = false;
5008  	target_proc->async_recv = false;
5009  	target_proc->is_frozen = true;
5010  	binder_inner_proc_unlock(target_proc);
5011  
5012  	if (info->timeout_ms > 0)
5013  		ret = wait_event_interruptible_timeout(
5014  			target_proc->freeze_wait,
5015  			(!target_proc->outstanding_txns),
5016  			msecs_to_jiffies(info->timeout_ms));
5017  
5018  	/* Check pending transactions that wait for reply */
5019  	if (ret >= 0) {
5020  		binder_inner_proc_lock(target_proc);
5021  		if (binder_txns_pending_ilocked(target_proc))
5022  			ret = -EAGAIN;
5023  		binder_inner_proc_unlock(target_proc);
5024  	}
5025  
5026  	if (ret < 0) {
5027  		binder_inner_proc_lock(target_proc);
5028  		target_proc->is_frozen = false;
5029  		binder_inner_proc_unlock(target_proc);
5030  	}
5031  
5032  	return ret;
5033  }
5034  
5035  static int binder_ioctl_get_freezer_info(
5036  				struct binder_frozen_status_info *info)
5037  {
5038  	struct binder_proc *target_proc;
5039  	bool found = false;
5040  	__u32 txns_pending;
5041  
5042  	info->sync_recv = 0;
5043  	info->async_recv = 0;
5044  
5045  	mutex_lock(&binder_procs_lock);
5046  	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5047  		if (target_proc->pid == info->pid) {
5048  			found = true;
5049  			binder_inner_proc_lock(target_proc);
5050  			txns_pending = binder_txns_pending_ilocked(target_proc);
5051  			info->sync_recv |= target_proc->sync_recv |
5052  					(txns_pending << 1);
5053  			info->async_recv |= target_proc->async_recv;
5054  			binder_inner_proc_unlock(target_proc);
5055  		}
5056  	}
5057  	mutex_unlock(&binder_procs_lock);
5058  
5059  	if (!found)
5060  		return -EINVAL;
5061  
5062  	return 0;
5063  }
5064  
5065  static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5066  {
5067  	int ret;
5068  	struct binder_proc *proc = filp->private_data;
5069  	struct binder_thread *thread;
5070  	unsigned int size = _IOC_SIZE(cmd);
5071  	void __user *ubuf = (void __user *)arg;
5072  
5073  	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5074  			proc->pid, current->pid, cmd, arg);*/
5075  
5076  	binder_selftest_alloc(&proc->alloc);
5077  
5078  	trace_binder_ioctl(cmd, arg);
5079  
5080  	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5081  	if (ret)
5082  		goto err_unlocked;
5083  
5084  	thread = binder_get_thread(proc);
5085  	if (thread == NULL) {
5086  		ret = -ENOMEM;
5087  		goto err;
5088  	}
5089  
5090  	switch (cmd) {
5091  	case BINDER_WRITE_READ:
5092  		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5093  		if (ret)
5094  			goto err;
5095  		break;
5096  	case BINDER_SET_MAX_THREADS: {
5097  		int max_threads;
5098  
5099  		if (copy_from_user(&max_threads, ubuf,
5100  				   sizeof(max_threads))) {
5101  			ret = -EINVAL;
5102  			goto err;
5103  		}
5104  		binder_inner_proc_lock(proc);
5105  		proc->max_threads = max_threads;
5106  		binder_inner_proc_unlock(proc);
5107  		break;
5108  	}
5109  	case BINDER_SET_CONTEXT_MGR_EXT: {
5110  		struct flat_binder_object fbo;
5111  
5112  		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5113  			ret = -EINVAL;
5114  			goto err;
5115  		}
5116  		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5117  		if (ret)
5118  			goto err;
5119  		break;
5120  	}
5121  	case BINDER_SET_CONTEXT_MGR:
5122  		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5123  		if (ret)
5124  			goto err;
5125  		break;
5126  	case BINDER_THREAD_EXIT:
5127  		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5128  			     proc->pid, thread->pid);
5129  		binder_thread_release(proc, thread);
5130  		thread = NULL;
5131  		break;
5132  	case BINDER_VERSION: {
5133  		struct binder_version __user *ver = ubuf;
5134  
5135  		if (size != sizeof(struct binder_version)) {
5136  			ret = -EINVAL;
5137  			goto err;
5138  		}
5139  		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5140  			     &ver->protocol_version)) {
5141  			ret = -EINVAL;
5142  			goto err;
5143  		}
5144  		break;
5145  	}
5146  	case BINDER_GET_NODE_INFO_FOR_REF: {
5147  		struct binder_node_info_for_ref info;
5148  
5149  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5150  			ret = -EFAULT;
5151  			goto err;
5152  		}
5153  
5154  		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5155  		if (ret < 0)
5156  			goto err;
5157  
5158  		if (copy_to_user(ubuf, &info, sizeof(info))) {
5159  			ret = -EFAULT;
5160  			goto err;
5161  		}
5162  
5163  		break;
5164  	}
5165  	case BINDER_GET_NODE_DEBUG_INFO: {
5166  		struct binder_node_debug_info info;
5167  
5168  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5169  			ret = -EFAULT;
5170  			goto err;
5171  		}
5172  
5173  		ret = binder_ioctl_get_node_debug_info(proc, &info);
5174  		if (ret < 0)
5175  			goto err;
5176  
5177  		if (copy_to_user(ubuf, &info, sizeof(info))) {
5178  			ret = -EFAULT;
5179  			goto err;
5180  		}
5181  		break;
5182  	}
5183  	case BINDER_FREEZE: {
5184  		struct binder_freeze_info info;
5185  		struct binder_proc **target_procs = NULL, *target_proc;
5186  		int target_procs_count = 0, i = 0;
5187  
5188  		ret = 0;
5189  
5190  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5191  			ret = -EFAULT;
5192  			goto err;
5193  		}
5194  
5195  		mutex_lock(&binder_procs_lock);
5196  		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5197  			if (target_proc->pid == info.pid)
5198  				target_procs_count++;
5199  		}
5200  
5201  		if (target_procs_count == 0) {
5202  			mutex_unlock(&binder_procs_lock);
5203  			ret = -EINVAL;
5204  			goto err;
5205  		}
5206  
5207  		target_procs = kcalloc(target_procs_count,
5208  				       sizeof(struct binder_proc *),
5209  				       GFP_KERNEL);
5210  
5211  		if (!target_procs) {
5212  			mutex_unlock(&binder_procs_lock);
5213  			ret = -ENOMEM;
5214  			goto err;
5215  		}
5216  
5217  		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5218  			if (target_proc->pid != info.pid)
5219  				continue;
5220  
5221  			binder_inner_proc_lock(target_proc);
5222  			target_proc->tmp_ref++;
5223  			binder_inner_proc_unlock(target_proc);
5224  
5225  			target_procs[i++] = target_proc;
5226  		}
5227  		mutex_unlock(&binder_procs_lock);
5228  
5229  		for (i = 0; i < target_procs_count; i++) {
5230  			if (ret >= 0)
5231  				ret = binder_ioctl_freeze(&info,
5232  							  target_procs[i]);
5233  
5234  			binder_proc_dec_tmpref(target_procs[i]);
5235  		}
5236  
5237  		kfree(target_procs);
5238  
5239  		if (ret < 0)
5240  			goto err;
5241  		break;
5242  	}
5243  	case BINDER_GET_FROZEN_INFO: {
5244  		struct binder_frozen_status_info info;
5245  
5246  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5247  			ret = -EFAULT;
5248  			goto err;
5249  		}
5250  
5251  		ret = binder_ioctl_get_freezer_info(&info);
5252  		if (ret < 0)
5253  			goto err;
5254  
5255  		if (copy_to_user(ubuf, &info, sizeof(info))) {
5256  			ret = -EFAULT;
5257  			goto err;
5258  		}
5259  		break;
5260  	}
5261  	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5262  		uint32_t enable;
5263  
5264  		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5265  			ret = -EFAULT;
5266  			goto err;
5267  		}
5268  		binder_inner_proc_lock(proc);
5269  		proc->oneway_spam_detection_enabled = (bool)enable;
5270  		binder_inner_proc_unlock(proc);
5271  		break;
5272  	}
5273  	default:
5274  		ret = -EINVAL;
5275  		goto err;
5276  	}
5277  	ret = 0;
5278  err:
5279  	if (thread)
5280  		thread->looper_need_return = false;
5281  	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5282  	if (ret && ret != -EINTR)
5283  		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5284  err_unlocked:
5285  	trace_binder_ioctl_done(ret);
5286  	return ret;
5287  }
5288  
5289  static void binder_vma_open(struct vm_area_struct *vma)
5290  {
5291  	struct binder_proc *proc = vma->vm_private_data;
5292  
5293  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5294  		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5295  		     proc->pid, vma->vm_start, vma->vm_end,
5296  		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5297  		     (unsigned long)pgprot_val(vma->vm_page_prot));
5298  }
5299  
5300  static void binder_vma_close(struct vm_area_struct *vma)
5301  {
5302  	struct binder_proc *proc = vma->vm_private_data;
5303  
5304  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5305  		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5306  		     proc->pid, vma->vm_start, vma->vm_end,
5307  		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5308  		     (unsigned long)pgprot_val(vma->vm_page_prot));
5309  	binder_alloc_vma_close(&proc->alloc);
5310  }
5311  
5312  static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5313  {
5314  	return VM_FAULT_SIGBUS;
5315  }
5316  
5317  static const struct vm_operations_struct binder_vm_ops = {
5318  	.open = binder_vma_open,
5319  	.close = binder_vma_close,
5320  	.fault = binder_vm_fault,
5321  };
5322  
5323  static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5324  {
5325  	struct binder_proc *proc = filp->private_data;
5326  
5327  	if (proc->tsk != current->group_leader)
5328  		return -EINVAL;
5329  
5330  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5331  		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5332  		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5333  		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5334  		     (unsigned long)pgprot_val(vma->vm_page_prot));
5335  
5336  	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5337  		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5338  		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5339  		return -EPERM;
5340  	}
5341  	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5342  	vma->vm_flags &= ~VM_MAYWRITE;
5343  
5344  	vma->vm_ops = &binder_vm_ops;
5345  	vma->vm_private_data = proc;
5346  
5347  	return binder_alloc_mmap_handler(&proc->alloc, vma);
5348  }
5349  
5350  static int binder_open(struct inode *nodp, struct file *filp)
5351  {
5352  	struct binder_proc *proc, *itr;
5353  	struct binder_device *binder_dev;
5354  	struct binderfs_info *info;
5355  	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5356  	bool existing_pid = false;
5357  
5358  	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5359  		     current->group_leader->pid, current->pid);
5360  
5361  	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5362  	if (proc == NULL)
5363  		return -ENOMEM;
5364  	spin_lock_init(&proc->inner_lock);
5365  	spin_lock_init(&proc->outer_lock);
5366  	get_task_struct(current->group_leader);
5367  	proc->tsk = current->group_leader;
5368  	proc->cred = get_cred(filp->f_cred);
5369  	INIT_LIST_HEAD(&proc->todo);
5370  	init_waitqueue_head(&proc->freeze_wait);
5371  	proc->default_priority = task_nice(current);
5372  	/* binderfs stashes devices in i_private */
5373  	if (is_binderfs_device(nodp)) {
5374  		binder_dev = nodp->i_private;
5375  		info = nodp->i_sb->s_fs_info;
5376  		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5377  	} else {
5378  		binder_dev = container_of(filp->private_data,
5379  					  struct binder_device, miscdev);
5380  	}
5381  	refcount_inc(&binder_dev->ref);
5382  	proc->context = &binder_dev->context;
5383  	binder_alloc_init(&proc->alloc);
5384  
5385  	binder_stats_created(BINDER_STAT_PROC);
5386  	proc->pid = current->group_leader->pid;
5387  	INIT_LIST_HEAD(&proc->delivered_death);
5388  	INIT_LIST_HEAD(&proc->waiting_threads);
5389  	filp->private_data = proc;
5390  
5391  	mutex_lock(&binder_procs_lock);
5392  	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5393  		if (itr->pid == proc->pid) {
5394  			existing_pid = true;
5395  			break;
5396  		}
5397  	}
5398  	hlist_add_head(&proc->proc_node, &binder_procs);
5399  	mutex_unlock(&binder_procs_lock);
5400  
5401  	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5402  		char strbuf[11];
5403  
5404  		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5405  		/*
5406  		 * proc debug entries are shared between contexts.
5407  		 * Only create for the first PID to avoid debugfs log spamming
5408  		 * The printing code will anyway print all contexts for a given
5409  		 * PID so this is not a problem.
5410  		 */
5411  		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5412  			binder_debugfs_dir_entry_proc,
5413  			(void *)(unsigned long)proc->pid,
5414  			&proc_fops);
5415  	}
5416  
5417  	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5418  		char strbuf[11];
5419  		struct dentry *binderfs_entry;
5420  
5421  		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5422  		/*
5423  		 * Similar to debugfs, the process specific log file is shared
5424  		 * between contexts. Only create for the first PID.
5425  		 * This is ok since same as debugfs, the log file will contain
5426  		 * information on all contexts of a given PID.
5427  		 */
5428  		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5429  			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5430  		if (!IS_ERR(binderfs_entry)) {
5431  			proc->binderfs_entry = binderfs_entry;
5432  		} else {
5433  			int error;
5434  
5435  			error = PTR_ERR(binderfs_entry);
5436  			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5437  				strbuf, error);
5438  		}
5439  	}
5440  
5441  	return 0;
5442  }
5443  
5444  static int binder_flush(struct file *filp, fl_owner_t id)
5445  {
5446  	struct binder_proc *proc = filp->private_data;
5447  
5448  	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5449  
5450  	return 0;
5451  }
5452  
5453  static void binder_deferred_flush(struct binder_proc *proc)
5454  {
5455  	struct rb_node *n;
5456  	int wake_count = 0;
5457  
5458  	binder_inner_proc_lock(proc);
5459  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5460  		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5461  
5462  		thread->looper_need_return = true;
5463  		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5464  			wake_up_interruptible(&thread->wait);
5465  			wake_count++;
5466  		}
5467  	}
5468  	binder_inner_proc_unlock(proc);
5469  
5470  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5471  		     "binder_flush: %d woke %d threads\n", proc->pid,
5472  		     wake_count);
5473  }
5474  
5475  static int binder_release(struct inode *nodp, struct file *filp)
5476  {
5477  	struct binder_proc *proc = filp->private_data;
5478  
5479  	debugfs_remove(proc->debugfs_entry);
5480  
5481  	if (proc->binderfs_entry) {
5482  		binderfs_remove_file(proc->binderfs_entry);
5483  		proc->binderfs_entry = NULL;
5484  	}
5485  
5486  	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5487  
5488  	return 0;
5489  }
5490  
5491  static int binder_node_release(struct binder_node *node, int refs)
5492  {
5493  	struct binder_ref *ref;
5494  	int death = 0;
5495  	struct binder_proc *proc = node->proc;
5496  
5497  	binder_release_work(proc, &node->async_todo);
5498  
5499  	binder_node_lock(node);
5500  	binder_inner_proc_lock(proc);
5501  	binder_dequeue_work_ilocked(&node->work);
5502  	/*
5503  	 * The caller must have taken a temporary ref on the node,
5504  	 */
5505  	BUG_ON(!node->tmp_refs);
5506  	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5507  		binder_inner_proc_unlock(proc);
5508  		binder_node_unlock(node);
5509  		binder_free_node(node);
5510  
5511  		return refs;
5512  	}
5513  
5514  	node->proc = NULL;
5515  	node->local_strong_refs = 0;
5516  	node->local_weak_refs = 0;
5517  	binder_inner_proc_unlock(proc);
5518  
5519  	spin_lock(&binder_dead_nodes_lock);
5520  	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5521  	spin_unlock(&binder_dead_nodes_lock);
5522  
5523  	hlist_for_each_entry(ref, &node->refs, node_entry) {
5524  		refs++;
5525  		/*
5526  		 * Need the node lock to synchronize
5527  		 * with new notification requests and the
5528  		 * inner lock to synchronize with queued
5529  		 * death notifications.
5530  		 */
5531  		binder_inner_proc_lock(ref->proc);
5532  		if (!ref->death) {
5533  			binder_inner_proc_unlock(ref->proc);
5534  			continue;
5535  		}
5536  
5537  		death++;
5538  
5539  		BUG_ON(!list_empty(&ref->death->work.entry));
5540  		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5541  		binder_enqueue_work_ilocked(&ref->death->work,
5542  					    &ref->proc->todo);
5543  		binder_wakeup_proc_ilocked(ref->proc);
5544  		binder_inner_proc_unlock(ref->proc);
5545  	}
5546  
5547  	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5548  		     "node %d now dead, refs %d, death %d\n",
5549  		     node->debug_id, refs, death);
5550  	binder_node_unlock(node);
5551  	binder_put_node(node);
5552  
5553  	return refs;
5554  }
5555  
5556  static void binder_deferred_release(struct binder_proc *proc)
5557  {
5558  	struct binder_context *context = proc->context;
5559  	struct rb_node *n;
5560  	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5561  
5562  	mutex_lock(&binder_procs_lock);
5563  	hlist_del(&proc->proc_node);
5564  	mutex_unlock(&binder_procs_lock);
5565  
5566  	mutex_lock(&context->context_mgr_node_lock);
5567  	if (context->binder_context_mgr_node &&
5568  	    context->binder_context_mgr_node->proc == proc) {
5569  		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5570  			     "%s: %d context_mgr_node gone\n",
5571  			     __func__, proc->pid);
5572  		context->binder_context_mgr_node = NULL;
5573  	}
5574  	mutex_unlock(&context->context_mgr_node_lock);
5575  	binder_inner_proc_lock(proc);
5576  	/*
5577  	 * Make sure proc stays alive after we
5578  	 * remove all the threads
5579  	 */
5580  	proc->tmp_ref++;
5581  
5582  	proc->is_dead = true;
5583  	proc->is_frozen = false;
5584  	proc->sync_recv = false;
5585  	proc->async_recv = false;
5586  	threads = 0;
5587  	active_transactions = 0;
5588  	while ((n = rb_first(&proc->threads))) {
5589  		struct binder_thread *thread;
5590  
5591  		thread = rb_entry(n, struct binder_thread, rb_node);
5592  		binder_inner_proc_unlock(proc);
5593  		threads++;
5594  		active_transactions += binder_thread_release(proc, thread);
5595  		binder_inner_proc_lock(proc);
5596  	}
5597  
5598  	nodes = 0;
5599  	incoming_refs = 0;
5600  	while ((n = rb_first(&proc->nodes))) {
5601  		struct binder_node *node;
5602  
5603  		node = rb_entry(n, struct binder_node, rb_node);
5604  		nodes++;
5605  		/*
5606  		 * take a temporary ref on the node before
5607  		 * calling binder_node_release() which will either
5608  		 * kfree() the node or call binder_put_node()
5609  		 */
5610  		binder_inc_node_tmpref_ilocked(node);
5611  		rb_erase(&node->rb_node, &proc->nodes);
5612  		binder_inner_proc_unlock(proc);
5613  		incoming_refs = binder_node_release(node, incoming_refs);
5614  		binder_inner_proc_lock(proc);
5615  	}
5616  	binder_inner_proc_unlock(proc);
5617  
5618  	outgoing_refs = 0;
5619  	binder_proc_lock(proc);
5620  	while ((n = rb_first(&proc->refs_by_desc))) {
5621  		struct binder_ref *ref;
5622  
5623  		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5624  		outgoing_refs++;
5625  		binder_cleanup_ref_olocked(ref);
5626  		binder_proc_unlock(proc);
5627  		binder_free_ref(ref);
5628  		binder_proc_lock(proc);
5629  	}
5630  	binder_proc_unlock(proc);
5631  
5632  	binder_release_work(proc, &proc->todo);
5633  	binder_release_work(proc, &proc->delivered_death);
5634  
5635  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5636  		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5637  		     __func__, proc->pid, threads, nodes, incoming_refs,
5638  		     outgoing_refs, active_transactions);
5639  
5640  	binder_proc_dec_tmpref(proc);
5641  }
5642  
5643  static void binder_deferred_func(struct work_struct *work)
5644  {
5645  	struct binder_proc *proc;
5646  
5647  	int defer;
5648  
5649  	do {
5650  		mutex_lock(&binder_deferred_lock);
5651  		if (!hlist_empty(&binder_deferred_list)) {
5652  			proc = hlist_entry(binder_deferred_list.first,
5653  					struct binder_proc, deferred_work_node);
5654  			hlist_del_init(&proc->deferred_work_node);
5655  			defer = proc->deferred_work;
5656  			proc->deferred_work = 0;
5657  		} else {
5658  			proc = NULL;
5659  			defer = 0;
5660  		}
5661  		mutex_unlock(&binder_deferred_lock);
5662  
5663  		if (defer & BINDER_DEFERRED_FLUSH)
5664  			binder_deferred_flush(proc);
5665  
5666  		if (defer & BINDER_DEFERRED_RELEASE)
5667  			binder_deferred_release(proc); /* frees proc */
5668  	} while (proc);
5669  }
5670  static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5671  
5672  static void
5673  binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5674  {
5675  	mutex_lock(&binder_deferred_lock);
5676  	proc->deferred_work |= defer;
5677  	if (hlist_unhashed(&proc->deferred_work_node)) {
5678  		hlist_add_head(&proc->deferred_work_node,
5679  				&binder_deferred_list);
5680  		schedule_work(&binder_deferred_work);
5681  	}
5682  	mutex_unlock(&binder_deferred_lock);
5683  }
5684  
5685  static void print_binder_transaction_ilocked(struct seq_file *m,
5686  					     struct binder_proc *proc,
5687  					     const char *prefix,
5688  					     struct binder_transaction *t)
5689  {
5690  	struct binder_proc *to_proc;
5691  	struct binder_buffer *buffer = t->buffer;
5692  
5693  	spin_lock(&t->lock);
5694  	to_proc = t->to_proc;
5695  	seq_printf(m,
5696  		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5697  		   prefix, t->debug_id, t,
5698  		   t->from ? t->from->proc->pid : 0,
5699  		   t->from ? t->from->pid : 0,
5700  		   to_proc ? to_proc->pid : 0,
5701  		   t->to_thread ? t->to_thread->pid : 0,
5702  		   t->code, t->flags, t->priority, t->need_reply);
5703  	spin_unlock(&t->lock);
5704  
5705  	if (proc != to_proc) {
5706  		/*
5707  		 * Can only safely deref buffer if we are holding the
5708  		 * correct proc inner lock for this node
5709  		 */
5710  		seq_puts(m, "\n");
5711  		return;
5712  	}
5713  
5714  	if (buffer == NULL) {
5715  		seq_puts(m, " buffer free\n");
5716  		return;
5717  	}
5718  	if (buffer->target_node)
5719  		seq_printf(m, " node %d", buffer->target_node->debug_id);
5720  	seq_printf(m, " size %zd:%zd data %pK\n",
5721  		   buffer->data_size, buffer->offsets_size,
5722  		   buffer->user_data);
5723  }
5724  
5725  static void print_binder_work_ilocked(struct seq_file *m,
5726  				     struct binder_proc *proc,
5727  				     const char *prefix,
5728  				     const char *transaction_prefix,
5729  				     struct binder_work *w)
5730  {
5731  	struct binder_node *node;
5732  	struct binder_transaction *t;
5733  
5734  	switch (w->type) {
5735  	case BINDER_WORK_TRANSACTION:
5736  		t = container_of(w, struct binder_transaction, work);
5737  		print_binder_transaction_ilocked(
5738  				m, proc, transaction_prefix, t);
5739  		break;
5740  	case BINDER_WORK_RETURN_ERROR: {
5741  		struct binder_error *e = container_of(
5742  				w, struct binder_error, work);
5743  
5744  		seq_printf(m, "%stransaction error: %u\n",
5745  			   prefix, e->cmd);
5746  	} break;
5747  	case BINDER_WORK_TRANSACTION_COMPLETE:
5748  		seq_printf(m, "%stransaction complete\n", prefix);
5749  		break;
5750  	case BINDER_WORK_NODE:
5751  		node = container_of(w, struct binder_node, work);
5752  		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5753  			   prefix, node->debug_id,
5754  			   (u64)node->ptr, (u64)node->cookie);
5755  		break;
5756  	case BINDER_WORK_DEAD_BINDER:
5757  		seq_printf(m, "%shas dead binder\n", prefix);
5758  		break;
5759  	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5760  		seq_printf(m, "%shas cleared dead binder\n", prefix);
5761  		break;
5762  	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5763  		seq_printf(m, "%shas cleared death notification\n", prefix);
5764  		break;
5765  	default:
5766  		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5767  		break;
5768  	}
5769  }
5770  
5771  static void print_binder_thread_ilocked(struct seq_file *m,
5772  					struct binder_thread *thread,
5773  					int print_always)
5774  {
5775  	struct binder_transaction *t;
5776  	struct binder_work *w;
5777  	size_t start_pos = m->count;
5778  	size_t header_pos;
5779  
5780  	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5781  			thread->pid, thread->looper,
5782  			thread->looper_need_return,
5783  			atomic_read(&thread->tmp_ref));
5784  	header_pos = m->count;
5785  	t = thread->transaction_stack;
5786  	while (t) {
5787  		if (t->from == thread) {
5788  			print_binder_transaction_ilocked(m, thread->proc,
5789  					"    outgoing transaction", t);
5790  			t = t->from_parent;
5791  		} else if (t->to_thread == thread) {
5792  			print_binder_transaction_ilocked(m, thread->proc,
5793  						 "    incoming transaction", t);
5794  			t = t->to_parent;
5795  		} else {
5796  			print_binder_transaction_ilocked(m, thread->proc,
5797  					"    bad transaction", t);
5798  			t = NULL;
5799  		}
5800  	}
5801  	list_for_each_entry(w, &thread->todo, entry) {
5802  		print_binder_work_ilocked(m, thread->proc, "    ",
5803  					  "    pending transaction", w);
5804  	}
5805  	if (!print_always && m->count == header_pos)
5806  		m->count = start_pos;
5807  }
5808  
5809  static void print_binder_node_nilocked(struct seq_file *m,
5810  				       struct binder_node *node)
5811  {
5812  	struct binder_ref *ref;
5813  	struct binder_work *w;
5814  	int count;
5815  
5816  	count = 0;
5817  	hlist_for_each_entry(ref, &node->refs, node_entry)
5818  		count++;
5819  
5820  	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5821  		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5822  		   node->has_strong_ref, node->has_weak_ref,
5823  		   node->local_strong_refs, node->local_weak_refs,
5824  		   node->internal_strong_refs, count, node->tmp_refs);
5825  	if (count) {
5826  		seq_puts(m, " proc");
5827  		hlist_for_each_entry(ref, &node->refs, node_entry)
5828  			seq_printf(m, " %d", ref->proc->pid);
5829  	}
5830  	seq_puts(m, "\n");
5831  	if (node->proc) {
5832  		list_for_each_entry(w, &node->async_todo, entry)
5833  			print_binder_work_ilocked(m, node->proc, "    ",
5834  					  "    pending async transaction", w);
5835  	}
5836  }
5837  
5838  static void print_binder_ref_olocked(struct seq_file *m,
5839  				     struct binder_ref *ref)
5840  {
5841  	binder_node_lock(ref->node);
5842  	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5843  		   ref->data.debug_id, ref->data.desc,
5844  		   ref->node->proc ? "" : "dead ",
5845  		   ref->node->debug_id, ref->data.strong,
5846  		   ref->data.weak, ref->death);
5847  	binder_node_unlock(ref->node);
5848  }
5849  
5850  static void print_binder_proc(struct seq_file *m,
5851  			      struct binder_proc *proc, int print_all)
5852  {
5853  	struct binder_work *w;
5854  	struct rb_node *n;
5855  	size_t start_pos = m->count;
5856  	size_t header_pos;
5857  	struct binder_node *last_node = NULL;
5858  
5859  	seq_printf(m, "proc %d\n", proc->pid);
5860  	seq_printf(m, "context %s\n", proc->context->name);
5861  	header_pos = m->count;
5862  
5863  	binder_inner_proc_lock(proc);
5864  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5865  		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5866  						rb_node), print_all);
5867  
5868  	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5869  		struct binder_node *node = rb_entry(n, struct binder_node,
5870  						    rb_node);
5871  		if (!print_all && !node->has_async_transaction)
5872  			continue;
5873  
5874  		/*
5875  		 * take a temporary reference on the node so it
5876  		 * survives and isn't removed from the tree
5877  		 * while we print it.
5878  		 */
5879  		binder_inc_node_tmpref_ilocked(node);
5880  		/* Need to drop inner lock to take node lock */
5881  		binder_inner_proc_unlock(proc);
5882  		if (last_node)
5883  			binder_put_node(last_node);
5884  		binder_node_inner_lock(node);
5885  		print_binder_node_nilocked(m, node);
5886  		binder_node_inner_unlock(node);
5887  		last_node = node;
5888  		binder_inner_proc_lock(proc);
5889  	}
5890  	binder_inner_proc_unlock(proc);
5891  	if (last_node)
5892  		binder_put_node(last_node);
5893  
5894  	if (print_all) {
5895  		binder_proc_lock(proc);
5896  		for (n = rb_first(&proc->refs_by_desc);
5897  		     n != NULL;
5898  		     n = rb_next(n))
5899  			print_binder_ref_olocked(m, rb_entry(n,
5900  							    struct binder_ref,
5901  							    rb_node_desc));
5902  		binder_proc_unlock(proc);
5903  	}
5904  	binder_alloc_print_allocated(m, &proc->alloc);
5905  	binder_inner_proc_lock(proc);
5906  	list_for_each_entry(w, &proc->todo, entry)
5907  		print_binder_work_ilocked(m, proc, "  ",
5908  					  "  pending transaction", w);
5909  	list_for_each_entry(w, &proc->delivered_death, entry) {
5910  		seq_puts(m, "  has delivered dead binder\n");
5911  		break;
5912  	}
5913  	binder_inner_proc_unlock(proc);
5914  	if (!print_all && m->count == header_pos)
5915  		m->count = start_pos;
5916  }
5917  
5918  static const char * const binder_return_strings[] = {
5919  	"BR_ERROR",
5920  	"BR_OK",
5921  	"BR_TRANSACTION",
5922  	"BR_REPLY",
5923  	"BR_ACQUIRE_RESULT",
5924  	"BR_DEAD_REPLY",
5925  	"BR_TRANSACTION_COMPLETE",
5926  	"BR_INCREFS",
5927  	"BR_ACQUIRE",
5928  	"BR_RELEASE",
5929  	"BR_DECREFS",
5930  	"BR_ATTEMPT_ACQUIRE",
5931  	"BR_NOOP",
5932  	"BR_SPAWN_LOOPER",
5933  	"BR_FINISHED",
5934  	"BR_DEAD_BINDER",
5935  	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5936  	"BR_FAILED_REPLY",
5937  	"BR_FROZEN_REPLY",
5938  	"BR_ONEWAY_SPAM_SUSPECT",
5939  };
5940  
5941  static const char * const binder_command_strings[] = {
5942  	"BC_TRANSACTION",
5943  	"BC_REPLY",
5944  	"BC_ACQUIRE_RESULT",
5945  	"BC_FREE_BUFFER",
5946  	"BC_INCREFS",
5947  	"BC_ACQUIRE",
5948  	"BC_RELEASE",
5949  	"BC_DECREFS",
5950  	"BC_INCREFS_DONE",
5951  	"BC_ACQUIRE_DONE",
5952  	"BC_ATTEMPT_ACQUIRE",
5953  	"BC_REGISTER_LOOPER",
5954  	"BC_ENTER_LOOPER",
5955  	"BC_EXIT_LOOPER",
5956  	"BC_REQUEST_DEATH_NOTIFICATION",
5957  	"BC_CLEAR_DEATH_NOTIFICATION",
5958  	"BC_DEAD_BINDER_DONE",
5959  	"BC_TRANSACTION_SG",
5960  	"BC_REPLY_SG",
5961  };
5962  
5963  static const char * const binder_objstat_strings[] = {
5964  	"proc",
5965  	"thread",
5966  	"node",
5967  	"ref",
5968  	"death",
5969  	"transaction",
5970  	"transaction_complete"
5971  };
5972  
5973  static void print_binder_stats(struct seq_file *m, const char *prefix,
5974  			       struct binder_stats *stats)
5975  {
5976  	int i;
5977  
5978  	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5979  		     ARRAY_SIZE(binder_command_strings));
5980  	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5981  		int temp = atomic_read(&stats->bc[i]);
5982  
5983  		if (temp)
5984  			seq_printf(m, "%s%s: %d\n", prefix,
5985  				   binder_command_strings[i], temp);
5986  	}
5987  
5988  	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5989  		     ARRAY_SIZE(binder_return_strings));
5990  	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5991  		int temp = atomic_read(&stats->br[i]);
5992  
5993  		if (temp)
5994  			seq_printf(m, "%s%s: %d\n", prefix,
5995  				   binder_return_strings[i], temp);
5996  	}
5997  
5998  	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5999  		     ARRAY_SIZE(binder_objstat_strings));
6000  	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6001  		     ARRAY_SIZE(stats->obj_deleted));
6002  	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6003  		int created = atomic_read(&stats->obj_created[i]);
6004  		int deleted = atomic_read(&stats->obj_deleted[i]);
6005  
6006  		if (created || deleted)
6007  			seq_printf(m, "%s%s: active %d total %d\n",
6008  				prefix,
6009  				binder_objstat_strings[i],
6010  				created - deleted,
6011  				created);
6012  	}
6013  }
6014  
6015  static void print_binder_proc_stats(struct seq_file *m,
6016  				    struct binder_proc *proc)
6017  {
6018  	struct binder_work *w;
6019  	struct binder_thread *thread;
6020  	struct rb_node *n;
6021  	int count, strong, weak, ready_threads;
6022  	size_t free_async_space =
6023  		binder_alloc_get_free_async_space(&proc->alloc);
6024  
6025  	seq_printf(m, "proc %d\n", proc->pid);
6026  	seq_printf(m, "context %s\n", proc->context->name);
6027  	count = 0;
6028  	ready_threads = 0;
6029  	binder_inner_proc_lock(proc);
6030  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6031  		count++;
6032  
6033  	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6034  		ready_threads++;
6035  
6036  	seq_printf(m, "  threads: %d\n", count);
6037  	seq_printf(m, "  requested threads: %d+%d/%d\n"
6038  			"  ready threads %d\n"
6039  			"  free async space %zd\n", proc->requested_threads,
6040  			proc->requested_threads_started, proc->max_threads,
6041  			ready_threads,
6042  			free_async_space);
6043  	count = 0;
6044  	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6045  		count++;
6046  	binder_inner_proc_unlock(proc);
6047  	seq_printf(m, "  nodes: %d\n", count);
6048  	count = 0;
6049  	strong = 0;
6050  	weak = 0;
6051  	binder_proc_lock(proc);
6052  	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6053  		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6054  						  rb_node_desc);
6055  		count++;
6056  		strong += ref->data.strong;
6057  		weak += ref->data.weak;
6058  	}
6059  	binder_proc_unlock(proc);
6060  	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6061  
6062  	count = binder_alloc_get_allocated_count(&proc->alloc);
6063  	seq_printf(m, "  buffers: %d\n", count);
6064  
6065  	binder_alloc_print_pages(m, &proc->alloc);
6066  
6067  	count = 0;
6068  	binder_inner_proc_lock(proc);
6069  	list_for_each_entry(w, &proc->todo, entry) {
6070  		if (w->type == BINDER_WORK_TRANSACTION)
6071  			count++;
6072  	}
6073  	binder_inner_proc_unlock(proc);
6074  	seq_printf(m, "  pending transactions: %d\n", count);
6075  
6076  	print_binder_stats(m, "  ", &proc->stats);
6077  }
6078  
6079  
6080  int binder_state_show(struct seq_file *m, void *unused)
6081  {
6082  	struct binder_proc *proc;
6083  	struct binder_node *node;
6084  	struct binder_node *last_node = NULL;
6085  
6086  	seq_puts(m, "binder state:\n");
6087  
6088  	spin_lock(&binder_dead_nodes_lock);
6089  	if (!hlist_empty(&binder_dead_nodes))
6090  		seq_puts(m, "dead nodes:\n");
6091  	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6092  		/*
6093  		 * take a temporary reference on the node so it
6094  		 * survives and isn't removed from the list
6095  		 * while we print it.
6096  		 */
6097  		node->tmp_refs++;
6098  		spin_unlock(&binder_dead_nodes_lock);
6099  		if (last_node)
6100  			binder_put_node(last_node);
6101  		binder_node_lock(node);
6102  		print_binder_node_nilocked(m, node);
6103  		binder_node_unlock(node);
6104  		last_node = node;
6105  		spin_lock(&binder_dead_nodes_lock);
6106  	}
6107  	spin_unlock(&binder_dead_nodes_lock);
6108  	if (last_node)
6109  		binder_put_node(last_node);
6110  
6111  	mutex_lock(&binder_procs_lock);
6112  	hlist_for_each_entry(proc, &binder_procs, proc_node)
6113  		print_binder_proc(m, proc, 1);
6114  	mutex_unlock(&binder_procs_lock);
6115  
6116  	return 0;
6117  }
6118  
6119  int binder_stats_show(struct seq_file *m, void *unused)
6120  {
6121  	struct binder_proc *proc;
6122  
6123  	seq_puts(m, "binder stats:\n");
6124  
6125  	print_binder_stats(m, "", &binder_stats);
6126  
6127  	mutex_lock(&binder_procs_lock);
6128  	hlist_for_each_entry(proc, &binder_procs, proc_node)
6129  		print_binder_proc_stats(m, proc);
6130  	mutex_unlock(&binder_procs_lock);
6131  
6132  	return 0;
6133  }
6134  
6135  int binder_transactions_show(struct seq_file *m, void *unused)
6136  {
6137  	struct binder_proc *proc;
6138  
6139  	seq_puts(m, "binder transactions:\n");
6140  	mutex_lock(&binder_procs_lock);
6141  	hlist_for_each_entry(proc, &binder_procs, proc_node)
6142  		print_binder_proc(m, proc, 0);
6143  	mutex_unlock(&binder_procs_lock);
6144  
6145  	return 0;
6146  }
6147  
6148  static int proc_show(struct seq_file *m, void *unused)
6149  {
6150  	struct binder_proc *itr;
6151  	int pid = (unsigned long)m->private;
6152  
6153  	mutex_lock(&binder_procs_lock);
6154  	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6155  		if (itr->pid == pid) {
6156  			seq_puts(m, "binder proc state:\n");
6157  			print_binder_proc(m, itr, 1);
6158  		}
6159  	}
6160  	mutex_unlock(&binder_procs_lock);
6161  
6162  	return 0;
6163  }
6164  
6165  static void print_binder_transaction_log_entry(struct seq_file *m,
6166  					struct binder_transaction_log_entry *e)
6167  {
6168  	int debug_id = READ_ONCE(e->debug_id_done);
6169  	/*
6170  	 * read barrier to guarantee debug_id_done read before
6171  	 * we print the log values
6172  	 */
6173  	smp_rmb();
6174  	seq_printf(m,
6175  		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6176  		   e->debug_id, (e->call_type == 2) ? "reply" :
6177  		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6178  		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6179  		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6180  		   e->return_error, e->return_error_param,
6181  		   e->return_error_line);
6182  	/*
6183  	 * read-barrier to guarantee read of debug_id_done after
6184  	 * done printing the fields of the entry
6185  	 */
6186  	smp_rmb();
6187  	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6188  			"\n" : " (incomplete)\n");
6189  }
6190  
6191  int binder_transaction_log_show(struct seq_file *m, void *unused)
6192  {
6193  	struct binder_transaction_log *log = m->private;
6194  	unsigned int log_cur = atomic_read(&log->cur);
6195  	unsigned int count;
6196  	unsigned int cur;
6197  	int i;
6198  
6199  	count = log_cur + 1;
6200  	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6201  		0 : count % ARRAY_SIZE(log->entry);
6202  	if (count > ARRAY_SIZE(log->entry) || log->full)
6203  		count = ARRAY_SIZE(log->entry);
6204  	for (i = 0; i < count; i++) {
6205  		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6206  
6207  		print_binder_transaction_log_entry(m, &log->entry[index]);
6208  	}
6209  	return 0;
6210  }
6211  
6212  const struct file_operations binder_fops = {
6213  	.owner = THIS_MODULE,
6214  	.poll = binder_poll,
6215  	.unlocked_ioctl = binder_ioctl,
6216  	.compat_ioctl = compat_ptr_ioctl,
6217  	.mmap = binder_mmap,
6218  	.open = binder_open,
6219  	.flush = binder_flush,
6220  	.release = binder_release,
6221  };
6222  
6223  static int __init init_binder_device(const char *name)
6224  {
6225  	int ret;
6226  	struct binder_device *binder_device;
6227  
6228  	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6229  	if (!binder_device)
6230  		return -ENOMEM;
6231  
6232  	binder_device->miscdev.fops = &binder_fops;
6233  	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6234  	binder_device->miscdev.name = name;
6235  
6236  	refcount_set(&binder_device->ref, 1);
6237  	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6238  	binder_device->context.name = name;
6239  	mutex_init(&binder_device->context.context_mgr_node_lock);
6240  
6241  	ret = misc_register(&binder_device->miscdev);
6242  	if (ret < 0) {
6243  		kfree(binder_device);
6244  		return ret;
6245  	}
6246  
6247  	hlist_add_head(&binder_device->hlist, &binder_devices);
6248  
6249  	return ret;
6250  }
6251  
6252  static int __init binder_init(void)
6253  {
6254  	int ret;
6255  	char *device_name, *device_tmp;
6256  	struct binder_device *device;
6257  	struct hlist_node *tmp;
6258  	char *device_names = NULL;
6259  
6260  	ret = binder_alloc_shrinker_init();
6261  	if (ret)
6262  		return ret;
6263  
6264  	atomic_set(&binder_transaction_log.cur, ~0U);
6265  	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6266  
6267  	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6268  	if (binder_debugfs_dir_entry_root)
6269  		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6270  						 binder_debugfs_dir_entry_root);
6271  
6272  	if (binder_debugfs_dir_entry_root) {
6273  		debugfs_create_file("state",
6274  				    0444,
6275  				    binder_debugfs_dir_entry_root,
6276  				    NULL,
6277  				    &binder_state_fops);
6278  		debugfs_create_file("stats",
6279  				    0444,
6280  				    binder_debugfs_dir_entry_root,
6281  				    NULL,
6282  				    &binder_stats_fops);
6283  		debugfs_create_file("transactions",
6284  				    0444,
6285  				    binder_debugfs_dir_entry_root,
6286  				    NULL,
6287  				    &binder_transactions_fops);
6288  		debugfs_create_file("transaction_log",
6289  				    0444,
6290  				    binder_debugfs_dir_entry_root,
6291  				    &binder_transaction_log,
6292  				    &binder_transaction_log_fops);
6293  		debugfs_create_file("failed_transaction_log",
6294  				    0444,
6295  				    binder_debugfs_dir_entry_root,
6296  				    &binder_transaction_log_failed,
6297  				    &binder_transaction_log_fops);
6298  	}
6299  
6300  	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6301  	    strcmp(binder_devices_param, "") != 0) {
6302  		/*
6303  		* Copy the module_parameter string, because we don't want to
6304  		* tokenize it in-place.
6305  		 */
6306  		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6307  		if (!device_names) {
6308  			ret = -ENOMEM;
6309  			goto err_alloc_device_names_failed;
6310  		}
6311  
6312  		device_tmp = device_names;
6313  		while ((device_name = strsep(&device_tmp, ","))) {
6314  			ret = init_binder_device(device_name);
6315  			if (ret)
6316  				goto err_init_binder_device_failed;
6317  		}
6318  	}
6319  
6320  	ret = init_binderfs();
6321  	if (ret)
6322  		goto err_init_binder_device_failed;
6323  
6324  	return ret;
6325  
6326  err_init_binder_device_failed:
6327  	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6328  		misc_deregister(&device->miscdev);
6329  		hlist_del(&device->hlist);
6330  		kfree(device);
6331  	}
6332  
6333  	kfree(device_names);
6334  
6335  err_alloc_device_names_failed:
6336  	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6337  
6338  	return ret;
6339  }
6340  
6341  device_initcall(binder_init);
6342  
6343  #define CREATE_TRACE_POINTS
6344  #include "binder_trace.h"
6345  
6346  MODULE_LICENSE("GPL v2");
6347