xref: /linux/drivers/android/binder.c (revision bdfa89c489296f092751fcee23b5d171c9fdc7f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <uapi/linux/android/binder.h>
72 
73 #include <linux/cacheflush.h>
74 
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77 
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80 
81 static HLIST_HEAD(binder_devices);
82 static DEFINE_SPINLOCK(binder_devices_lock);
83 
84 static HLIST_HEAD(binder_procs);
85 static DEFINE_MUTEX(binder_procs_lock);
86 
87 static HLIST_HEAD(binder_dead_nodes);
88 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89 
90 static struct dentry *binder_debugfs_dir_entry_root;
91 static struct dentry *binder_debugfs_dir_entry_proc;
92 static atomic_t binder_last_id;
93 
94 static int proc_show(struct seq_file *m, void *unused);
95 DEFINE_SHOW_ATTRIBUTE(proc);
96 
97 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
98 
99 enum {
100 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
101 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
102 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
103 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
104 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
105 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
106 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
107 	BINDER_DEBUG_USER_REFS              = 1U << 7,
108 	BINDER_DEBUG_THREADS                = 1U << 8,
109 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
110 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
111 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
112 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
113 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
114 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
115 };
116 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
117 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
118 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
119 
120 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
121 module_param_named(devices, binder_devices_param, charp, 0444);
122 
123 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
124 static int binder_stop_on_user_error;
125 
126 static int binder_set_stop_on_user_error(const char *val,
127 					 const struct kernel_param *kp)
128 {
129 	int ret;
130 
131 	ret = param_set_int(val, kp);
132 	if (binder_stop_on_user_error < 2)
133 		wake_up(&binder_user_error_wait);
134 	return ret;
135 }
136 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
137 	param_get_int, &binder_stop_on_user_error, 0644);
138 
139 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
140 {
141 	struct va_format vaf;
142 	va_list args;
143 
144 	if (binder_debug_mask & mask) {
145 		va_start(args, format);
146 		vaf.va = &args;
147 		vaf.fmt = format;
148 		pr_info_ratelimited("%pV", &vaf);
149 		va_end(args);
150 	}
151 }
152 
153 #define binder_txn_error(x...) \
154 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
155 
156 static __printf(1, 2) void binder_user_error(const char *format, ...)
157 {
158 	struct va_format vaf;
159 	va_list args;
160 
161 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
162 		va_start(args, format);
163 		vaf.va = &args;
164 		vaf.fmt = format;
165 		pr_info_ratelimited("%pV", &vaf);
166 		va_end(args);
167 	}
168 
169 	if (binder_stop_on_user_error)
170 		binder_stop_on_user_error = 2;
171 }
172 
173 #define binder_set_extended_error(ee, _id, _command, _param) \
174 	do { \
175 		(ee)->id = _id; \
176 		(ee)->command = _command; \
177 		(ee)->param = _param; \
178 	} while (0)
179 
180 #define to_flat_binder_object(hdr) \
181 	container_of(hdr, struct flat_binder_object, hdr)
182 
183 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184 
185 #define to_binder_buffer_object(hdr) \
186 	container_of(hdr, struct binder_buffer_object, hdr)
187 
188 #define to_binder_fd_array_object(hdr) \
189 	container_of(hdr, struct binder_fd_array_object, hdr)
190 
191 static struct binder_stats binder_stats;
192 
193 static inline void binder_stats_deleted(enum binder_stat_types type)
194 {
195 	atomic_inc(&binder_stats.obj_deleted[type]);
196 }
197 
198 static inline void binder_stats_created(enum binder_stat_types type)
199 {
200 	atomic_inc(&binder_stats.obj_created[type]);
201 }
202 
203 struct binder_transaction_log_entry {
204 	int debug_id;
205 	int debug_id_done;
206 	int call_type;
207 	int from_proc;
208 	int from_thread;
209 	int target_handle;
210 	int to_proc;
211 	int to_thread;
212 	int to_node;
213 	int data_size;
214 	int offsets_size;
215 	int return_error_line;
216 	uint32_t return_error;
217 	uint32_t return_error_param;
218 	char context_name[BINDERFS_MAX_NAME + 1];
219 };
220 
221 struct binder_transaction_log {
222 	atomic_t cur;
223 	bool full;
224 	struct binder_transaction_log_entry entry[32];
225 };
226 
227 static struct binder_transaction_log binder_transaction_log;
228 static struct binder_transaction_log binder_transaction_log_failed;
229 
230 static struct binder_transaction_log_entry *binder_transaction_log_add(
231 	struct binder_transaction_log *log)
232 {
233 	struct binder_transaction_log_entry *e;
234 	unsigned int cur = atomic_inc_return(&log->cur);
235 
236 	if (cur >= ARRAY_SIZE(log->entry))
237 		log->full = true;
238 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
239 	WRITE_ONCE(e->debug_id_done, 0);
240 	/*
241 	 * write-barrier to synchronize access to e->debug_id_done.
242 	 * We make sure the initialized 0 value is seen before
243 	 * memset() other fields are zeroed by memset.
244 	 */
245 	smp_wmb();
246 	memset(e, 0, sizeof(*e));
247 	return e;
248 }
249 
250 enum binder_deferred_state {
251 	BINDER_DEFERRED_FLUSH        = 0x01,
252 	BINDER_DEFERRED_RELEASE      = 0x02,
253 };
254 
255 enum {
256 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
257 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
258 	BINDER_LOOPER_STATE_EXITED      = 0x04,
259 	BINDER_LOOPER_STATE_INVALID     = 0x08,
260 	BINDER_LOOPER_STATE_WAITING     = 0x10,
261 	BINDER_LOOPER_STATE_POLL        = 0x20,
262 };
263 
264 /**
265  * binder_proc_lock() - Acquire outer lock for given binder_proc
266  * @proc:         struct binder_proc to acquire
267  *
268  * Acquires proc->outer_lock. Used to protect binder_ref
269  * structures associated with the given proc.
270  */
271 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
272 static void
273 _binder_proc_lock(struct binder_proc *proc, int line)
274 	__acquires(&proc->outer_lock)
275 {
276 	binder_debug(BINDER_DEBUG_SPINLOCKS,
277 		     "%s: line=%d\n", __func__, line);
278 	spin_lock(&proc->outer_lock);
279 }
280 
281 /**
282  * binder_proc_unlock() - Release outer lock for given binder_proc
283  * @proc:                struct binder_proc to acquire
284  *
285  * Release lock acquired via binder_proc_lock()
286  */
287 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
288 static void
289 _binder_proc_unlock(struct binder_proc *proc, int line)
290 	__releases(&proc->outer_lock)
291 {
292 	binder_debug(BINDER_DEBUG_SPINLOCKS,
293 		     "%s: line=%d\n", __func__, line);
294 	spin_unlock(&proc->outer_lock);
295 }
296 
297 /**
298  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
299  * @proc:         struct binder_proc to acquire
300  *
301  * Acquires proc->inner_lock. Used to protect todo lists
302  */
303 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
304 static void
305 _binder_inner_proc_lock(struct binder_proc *proc, int line)
306 	__acquires(&proc->inner_lock)
307 {
308 	binder_debug(BINDER_DEBUG_SPINLOCKS,
309 		     "%s: line=%d\n", __func__, line);
310 	spin_lock(&proc->inner_lock);
311 }
312 
313 /**
314  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
315  * @proc:         struct binder_proc to acquire
316  *
317  * Release lock acquired via binder_inner_proc_lock()
318  */
319 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
320 static void
321 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
322 	__releases(&proc->inner_lock)
323 {
324 	binder_debug(BINDER_DEBUG_SPINLOCKS,
325 		     "%s: line=%d\n", __func__, line);
326 	spin_unlock(&proc->inner_lock);
327 }
328 
329 /**
330  * binder_node_lock() - Acquire spinlock for given binder_node
331  * @node:         struct binder_node to acquire
332  *
333  * Acquires node->lock. Used to protect binder_node fields
334  */
335 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
336 static void
337 _binder_node_lock(struct binder_node *node, int line)
338 	__acquires(&node->lock)
339 {
340 	binder_debug(BINDER_DEBUG_SPINLOCKS,
341 		     "%s: line=%d\n", __func__, line);
342 	spin_lock(&node->lock);
343 }
344 
345 /**
346  * binder_node_unlock() - Release spinlock for given binder_proc
347  * @node:         struct binder_node to acquire
348  *
349  * Release lock acquired via binder_node_lock()
350  */
351 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
352 static void
353 _binder_node_unlock(struct binder_node *node, int line)
354 	__releases(&node->lock)
355 {
356 	binder_debug(BINDER_DEBUG_SPINLOCKS,
357 		     "%s: line=%d\n", __func__, line);
358 	spin_unlock(&node->lock);
359 }
360 
361 /**
362  * binder_node_inner_lock() - Acquire node and inner locks
363  * @node:         struct binder_node to acquire
364  *
365  * Acquires node->lock. If node->proc also acquires
366  * proc->inner_lock. Used to protect binder_node fields
367  */
368 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
369 static void
370 _binder_node_inner_lock(struct binder_node *node, int line)
371 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
372 {
373 	binder_debug(BINDER_DEBUG_SPINLOCKS,
374 		     "%s: line=%d\n", __func__, line);
375 	spin_lock(&node->lock);
376 	if (node->proc)
377 		binder_inner_proc_lock(node->proc);
378 	else
379 		/* annotation for sparse */
380 		__acquire(&node->proc->inner_lock);
381 }
382 
383 /**
384  * binder_node_inner_unlock() - Release node and inner locks
385  * @node:         struct binder_node to acquire
386  *
387  * Release lock acquired via binder_node_lock()
388  */
389 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
390 static void
391 _binder_node_inner_unlock(struct binder_node *node, int line)
392 	__releases(&node->lock) __releases(&node->proc->inner_lock)
393 {
394 	struct binder_proc *proc = node->proc;
395 
396 	binder_debug(BINDER_DEBUG_SPINLOCKS,
397 		     "%s: line=%d\n", __func__, line);
398 	if (proc)
399 		binder_inner_proc_unlock(proc);
400 	else
401 		/* annotation for sparse */
402 		__release(&node->proc->inner_lock);
403 	spin_unlock(&node->lock);
404 }
405 
406 static bool binder_worklist_empty_ilocked(struct list_head *list)
407 {
408 	return list_empty(list);
409 }
410 
411 /**
412  * binder_worklist_empty() - Check if no items on the work list
413  * @proc:       binder_proc associated with list
414  * @list:	list to check
415  *
416  * Return: true if there are no items on list, else false
417  */
418 static bool binder_worklist_empty(struct binder_proc *proc,
419 				  struct list_head *list)
420 {
421 	bool ret;
422 
423 	binder_inner_proc_lock(proc);
424 	ret = binder_worklist_empty_ilocked(list);
425 	binder_inner_proc_unlock(proc);
426 	return ret;
427 }
428 
429 /**
430  * binder_enqueue_work_ilocked() - Add an item to the work list
431  * @work:         struct binder_work to add to list
432  * @target_list:  list to add work to
433  *
434  * Adds the work to the specified list. Asserts that work
435  * is not already on a list.
436  *
437  * Requires the proc->inner_lock to be held.
438  */
439 static void
440 binder_enqueue_work_ilocked(struct binder_work *work,
441 			   struct list_head *target_list)
442 {
443 	BUG_ON(target_list == NULL);
444 	BUG_ON(work->entry.next && !list_empty(&work->entry));
445 	list_add_tail(&work->entry, target_list);
446 }
447 
448 /**
449  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
450  * @thread:       thread to queue work to
451  * @work:         struct binder_work to add to list
452  *
453  * Adds the work to the todo list of the thread. Doesn't set the process_todo
454  * flag, which means that (if it wasn't already set) the thread will go to
455  * sleep without handling this work when it calls read.
456  *
457  * Requires the proc->inner_lock to be held.
458  */
459 static void
460 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
461 					    struct binder_work *work)
462 {
463 	WARN_ON(!list_empty(&thread->waiting_thread_node));
464 	binder_enqueue_work_ilocked(work, &thread->todo);
465 }
466 
467 /**
468  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
469  * @thread:       thread to queue work to
470  * @work:         struct binder_work to add to list
471  *
472  * Adds the work to the todo list of the thread, and enables processing
473  * of the todo queue.
474  *
475  * Requires the proc->inner_lock to be held.
476  */
477 static void
478 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
479 				   struct binder_work *work)
480 {
481 	WARN_ON(!list_empty(&thread->waiting_thread_node));
482 	binder_enqueue_work_ilocked(work, &thread->todo);
483 
484 	/* (e)poll-based threads require an explicit wakeup signal when
485 	 * queuing their own work; they rely on these events to consume
486 	 * messages without I/O block. Without it, threads risk waiting
487 	 * indefinitely without handling the work.
488 	 */
489 	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
490 	    thread->pid == current->pid && !thread->process_todo)
491 		wake_up_interruptible_sync(&thread->wait);
492 
493 	thread->process_todo = true;
494 }
495 
496 /**
497  * binder_enqueue_thread_work() - Add an item to the thread work list
498  * @thread:       thread to queue work to
499  * @work:         struct binder_work to add to list
500  *
501  * Adds the work to the todo list of the thread, and enables processing
502  * of the todo queue.
503  */
504 static void
505 binder_enqueue_thread_work(struct binder_thread *thread,
506 			   struct binder_work *work)
507 {
508 	binder_inner_proc_lock(thread->proc);
509 	binder_enqueue_thread_work_ilocked(thread, work);
510 	binder_inner_proc_unlock(thread->proc);
511 }
512 
513 static void
514 binder_dequeue_work_ilocked(struct binder_work *work)
515 {
516 	list_del_init(&work->entry);
517 }
518 
519 /**
520  * binder_dequeue_work() - Removes an item from the work list
521  * @proc:         binder_proc associated with list
522  * @work:         struct binder_work to remove from list
523  *
524  * Removes the specified work item from whatever list it is on.
525  * Can safely be called if work is not on any list.
526  */
527 static void
528 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
529 {
530 	binder_inner_proc_lock(proc);
531 	binder_dequeue_work_ilocked(work);
532 	binder_inner_proc_unlock(proc);
533 }
534 
535 static struct binder_work *binder_dequeue_work_head_ilocked(
536 					struct list_head *list)
537 {
538 	struct binder_work *w;
539 
540 	w = list_first_entry_or_null(list, struct binder_work, entry);
541 	if (w)
542 		list_del_init(&w->entry);
543 	return w;
544 }
545 
546 static void
547 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
548 static void binder_free_thread(struct binder_thread *thread);
549 static void binder_free_proc(struct binder_proc *proc);
550 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
551 
552 static bool binder_has_work_ilocked(struct binder_thread *thread,
553 				    bool do_proc_work)
554 {
555 	return thread->process_todo ||
556 		thread->looper_need_return ||
557 		(do_proc_work &&
558 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
559 }
560 
561 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
562 {
563 	bool has_work;
564 
565 	binder_inner_proc_lock(thread->proc);
566 	has_work = binder_has_work_ilocked(thread, do_proc_work);
567 	binder_inner_proc_unlock(thread->proc);
568 
569 	return has_work;
570 }
571 
572 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
573 {
574 	return !thread->transaction_stack &&
575 		binder_worklist_empty_ilocked(&thread->todo);
576 }
577 
578 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
579 					       bool sync)
580 {
581 	struct rb_node *n;
582 	struct binder_thread *thread;
583 
584 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
585 		thread = rb_entry(n, struct binder_thread, rb_node);
586 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
587 		    binder_available_for_proc_work_ilocked(thread)) {
588 			if (sync)
589 				wake_up_interruptible_sync(&thread->wait);
590 			else
591 				wake_up_interruptible(&thread->wait);
592 		}
593 	}
594 }
595 
596 /**
597  * binder_select_thread_ilocked() - selects a thread for doing proc work.
598  * @proc:	process to select a thread from
599  *
600  * Note that calling this function moves the thread off the waiting_threads
601  * list, so it can only be woken up by the caller of this function, or a
602  * signal. Therefore, callers *should* always wake up the thread this function
603  * returns.
604  *
605  * Return:	If there's a thread currently waiting for process work,
606  *		returns that thread. Otherwise returns NULL.
607  */
608 static struct binder_thread *
609 binder_select_thread_ilocked(struct binder_proc *proc)
610 {
611 	struct binder_thread *thread;
612 
613 	assert_spin_locked(&proc->inner_lock);
614 	thread = list_first_entry_or_null(&proc->waiting_threads,
615 					  struct binder_thread,
616 					  waiting_thread_node);
617 
618 	if (thread)
619 		list_del_init(&thread->waiting_thread_node);
620 
621 	return thread;
622 }
623 
624 /**
625  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
626  * @proc:	process to wake up a thread in
627  * @thread:	specific thread to wake-up (may be NULL)
628  * @sync:	whether to do a synchronous wake-up
629  *
630  * This function wakes up a thread in the @proc process.
631  * The caller may provide a specific thread to wake-up in
632  * the @thread parameter. If @thread is NULL, this function
633  * will wake up threads that have called poll().
634  *
635  * Note that for this function to work as expected, callers
636  * should first call binder_select_thread() to find a thread
637  * to handle the work (if they don't have a thread already),
638  * and pass the result into the @thread parameter.
639  */
640 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
641 					 struct binder_thread *thread,
642 					 bool sync)
643 {
644 	assert_spin_locked(&proc->inner_lock);
645 
646 	if (thread) {
647 		if (sync)
648 			wake_up_interruptible_sync(&thread->wait);
649 		else
650 			wake_up_interruptible(&thread->wait);
651 		return;
652 	}
653 
654 	/* Didn't find a thread waiting for proc work; this can happen
655 	 * in two scenarios:
656 	 * 1. All threads are busy handling transactions
657 	 *    In that case, one of those threads should call back into
658 	 *    the kernel driver soon and pick up this work.
659 	 * 2. Threads are using the (e)poll interface, in which case
660 	 *    they may be blocked on the waitqueue without having been
661 	 *    added to waiting_threads. For this case, we just iterate
662 	 *    over all threads not handling transaction work, and
663 	 *    wake them all up. We wake all because we don't know whether
664 	 *    a thread that called into (e)poll is handling non-binder
665 	 *    work currently.
666 	 */
667 	binder_wakeup_poll_threads_ilocked(proc, sync);
668 }
669 
670 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
671 {
672 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
673 
674 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
675 }
676 
677 static void binder_set_nice(long nice)
678 {
679 	long min_nice;
680 
681 	if (can_nice(current, nice)) {
682 		set_user_nice(current, nice);
683 		return;
684 	}
685 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
686 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
687 		     "%d: nice value %ld not allowed use %ld instead\n",
688 		      current->pid, nice, min_nice);
689 	set_user_nice(current, min_nice);
690 	if (min_nice <= MAX_NICE)
691 		return;
692 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
693 }
694 
695 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
696 						   binder_uintptr_t ptr)
697 {
698 	struct rb_node *n = proc->nodes.rb_node;
699 	struct binder_node *node;
700 
701 	assert_spin_locked(&proc->inner_lock);
702 
703 	while (n) {
704 		node = rb_entry(n, struct binder_node, rb_node);
705 
706 		if (ptr < node->ptr)
707 			n = n->rb_left;
708 		else if (ptr > node->ptr)
709 			n = n->rb_right;
710 		else {
711 			/*
712 			 * take an implicit weak reference
713 			 * to ensure node stays alive until
714 			 * call to binder_put_node()
715 			 */
716 			binder_inc_node_tmpref_ilocked(node);
717 			return node;
718 		}
719 	}
720 	return NULL;
721 }
722 
723 static struct binder_node *binder_get_node(struct binder_proc *proc,
724 					   binder_uintptr_t ptr)
725 {
726 	struct binder_node *node;
727 
728 	binder_inner_proc_lock(proc);
729 	node = binder_get_node_ilocked(proc, ptr);
730 	binder_inner_proc_unlock(proc);
731 	return node;
732 }
733 
734 static struct binder_node *binder_init_node_ilocked(
735 						struct binder_proc *proc,
736 						struct binder_node *new_node,
737 						struct flat_binder_object *fp)
738 {
739 	struct rb_node **p = &proc->nodes.rb_node;
740 	struct rb_node *parent = NULL;
741 	struct binder_node *node;
742 	binder_uintptr_t ptr = fp ? fp->binder : 0;
743 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
744 	__u32 flags = fp ? fp->flags : 0;
745 
746 	assert_spin_locked(&proc->inner_lock);
747 
748 	while (*p) {
749 
750 		parent = *p;
751 		node = rb_entry(parent, struct binder_node, rb_node);
752 
753 		if (ptr < node->ptr)
754 			p = &(*p)->rb_left;
755 		else if (ptr > node->ptr)
756 			p = &(*p)->rb_right;
757 		else {
758 			/*
759 			 * A matching node is already in
760 			 * the rb tree. Abandon the init
761 			 * and return it.
762 			 */
763 			binder_inc_node_tmpref_ilocked(node);
764 			return node;
765 		}
766 	}
767 	node = new_node;
768 	binder_stats_created(BINDER_STAT_NODE);
769 	node->tmp_refs++;
770 	rb_link_node(&node->rb_node, parent, p);
771 	rb_insert_color(&node->rb_node, &proc->nodes);
772 	node->debug_id = atomic_inc_return(&binder_last_id);
773 	node->proc = proc;
774 	node->ptr = ptr;
775 	node->cookie = cookie;
776 	node->work.type = BINDER_WORK_NODE;
777 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
778 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
779 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
780 	spin_lock_init(&node->lock);
781 	INIT_LIST_HEAD(&node->work.entry);
782 	INIT_LIST_HEAD(&node->async_todo);
783 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
784 		     "%d:%d node %d u%016llx c%016llx created\n",
785 		     proc->pid, current->pid, node->debug_id,
786 		     (u64)node->ptr, (u64)node->cookie);
787 
788 	return node;
789 }
790 
791 static struct binder_node *binder_new_node(struct binder_proc *proc,
792 					   struct flat_binder_object *fp)
793 {
794 	struct binder_node *node;
795 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
796 
797 	if (!new_node)
798 		return NULL;
799 	binder_inner_proc_lock(proc);
800 	node = binder_init_node_ilocked(proc, new_node, fp);
801 	binder_inner_proc_unlock(proc);
802 	if (node != new_node)
803 		/*
804 		 * The node was already added by another thread
805 		 */
806 		kfree(new_node);
807 
808 	return node;
809 }
810 
811 static void binder_free_node(struct binder_node *node)
812 {
813 	kfree(node);
814 	binder_stats_deleted(BINDER_STAT_NODE);
815 }
816 
817 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
818 				    int internal,
819 				    struct list_head *target_list)
820 {
821 	struct binder_proc *proc = node->proc;
822 
823 	assert_spin_locked(&node->lock);
824 	if (proc)
825 		assert_spin_locked(&proc->inner_lock);
826 	if (strong) {
827 		if (internal) {
828 			if (target_list == NULL &&
829 			    node->internal_strong_refs == 0 &&
830 			    !(node->proc &&
831 			      node == node->proc->context->binder_context_mgr_node &&
832 			      node->has_strong_ref)) {
833 				pr_err("invalid inc strong node for %d\n",
834 					node->debug_id);
835 				return -EINVAL;
836 			}
837 			node->internal_strong_refs++;
838 		} else
839 			node->local_strong_refs++;
840 		if (!node->has_strong_ref && target_list) {
841 			struct binder_thread *thread = container_of(target_list,
842 						    struct binder_thread, todo);
843 			binder_dequeue_work_ilocked(&node->work);
844 			BUG_ON(&thread->todo != target_list);
845 			binder_enqueue_deferred_thread_work_ilocked(thread,
846 								   &node->work);
847 		}
848 	} else {
849 		if (!internal)
850 			node->local_weak_refs++;
851 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
852 			if (target_list == NULL) {
853 				pr_err("invalid inc weak node for %d\n",
854 					node->debug_id);
855 				return -EINVAL;
856 			}
857 			/*
858 			 * See comment above
859 			 */
860 			binder_enqueue_work_ilocked(&node->work, target_list);
861 		}
862 	}
863 	return 0;
864 }
865 
866 static int binder_inc_node(struct binder_node *node, int strong, int internal,
867 			   struct list_head *target_list)
868 {
869 	int ret;
870 
871 	binder_node_inner_lock(node);
872 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
873 	binder_node_inner_unlock(node);
874 
875 	return ret;
876 }
877 
878 static bool binder_dec_node_nilocked(struct binder_node *node,
879 				     int strong, int internal)
880 {
881 	struct binder_proc *proc = node->proc;
882 
883 	assert_spin_locked(&node->lock);
884 	if (proc)
885 		assert_spin_locked(&proc->inner_lock);
886 	if (strong) {
887 		if (internal)
888 			node->internal_strong_refs--;
889 		else
890 			node->local_strong_refs--;
891 		if (node->local_strong_refs || node->internal_strong_refs)
892 			return false;
893 	} else {
894 		if (!internal)
895 			node->local_weak_refs--;
896 		if (node->local_weak_refs || node->tmp_refs ||
897 				!hlist_empty(&node->refs))
898 			return false;
899 	}
900 
901 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
902 		if (list_empty(&node->work.entry)) {
903 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
904 			binder_wakeup_proc_ilocked(proc);
905 		}
906 	} else {
907 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
908 		    !node->local_weak_refs && !node->tmp_refs) {
909 			if (proc) {
910 				binder_dequeue_work_ilocked(&node->work);
911 				rb_erase(&node->rb_node, &proc->nodes);
912 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
913 					     "refless node %d deleted\n",
914 					     node->debug_id);
915 			} else {
916 				BUG_ON(!list_empty(&node->work.entry));
917 				spin_lock(&binder_dead_nodes_lock);
918 				/*
919 				 * tmp_refs could have changed so
920 				 * check it again
921 				 */
922 				if (node->tmp_refs) {
923 					spin_unlock(&binder_dead_nodes_lock);
924 					return false;
925 				}
926 				hlist_del(&node->dead_node);
927 				spin_unlock(&binder_dead_nodes_lock);
928 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
929 					     "dead node %d deleted\n",
930 					     node->debug_id);
931 			}
932 			return true;
933 		}
934 	}
935 	return false;
936 }
937 
938 static void binder_dec_node(struct binder_node *node, int strong, int internal)
939 {
940 	bool free_node;
941 
942 	binder_node_inner_lock(node);
943 	free_node = binder_dec_node_nilocked(node, strong, internal);
944 	binder_node_inner_unlock(node);
945 	if (free_node)
946 		binder_free_node(node);
947 }
948 
949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
950 {
951 	/*
952 	 * No call to binder_inc_node() is needed since we
953 	 * don't need to inform userspace of any changes to
954 	 * tmp_refs
955 	 */
956 	node->tmp_refs++;
957 }
958 
959 /**
960  * binder_inc_node_tmpref() - take a temporary reference on node
961  * @node:	node to reference
962  *
963  * Take reference on node to prevent the node from being freed
964  * while referenced only by a local variable. The inner lock is
965  * needed to serialize with the node work on the queue (which
966  * isn't needed after the node is dead). If the node is dead
967  * (node->proc is NULL), use binder_dead_nodes_lock to protect
968  * node->tmp_refs against dead-node-only cases where the node
969  * lock cannot be acquired (eg traversing the dead node list to
970  * print nodes)
971  */
972 static void binder_inc_node_tmpref(struct binder_node *node)
973 {
974 	binder_node_lock(node);
975 	if (node->proc)
976 		binder_inner_proc_lock(node->proc);
977 	else
978 		spin_lock(&binder_dead_nodes_lock);
979 	binder_inc_node_tmpref_ilocked(node);
980 	if (node->proc)
981 		binder_inner_proc_unlock(node->proc);
982 	else
983 		spin_unlock(&binder_dead_nodes_lock);
984 	binder_node_unlock(node);
985 }
986 
987 /**
988  * binder_dec_node_tmpref() - remove a temporary reference on node
989  * @node:	node to reference
990  *
991  * Release temporary reference on node taken via binder_inc_node_tmpref()
992  */
993 static void binder_dec_node_tmpref(struct binder_node *node)
994 {
995 	bool free_node;
996 
997 	binder_node_inner_lock(node);
998 	if (!node->proc)
999 		spin_lock(&binder_dead_nodes_lock);
1000 	else
1001 		__acquire(&binder_dead_nodes_lock);
1002 	node->tmp_refs--;
1003 	BUG_ON(node->tmp_refs < 0);
1004 	if (!node->proc)
1005 		spin_unlock(&binder_dead_nodes_lock);
1006 	else
1007 		__release(&binder_dead_nodes_lock);
1008 	/*
1009 	 * Call binder_dec_node() to check if all refcounts are 0
1010 	 * and cleanup is needed. Calling with strong=0 and internal=1
1011 	 * causes no actual reference to be released in binder_dec_node().
1012 	 * If that changes, a change is needed here too.
1013 	 */
1014 	free_node = binder_dec_node_nilocked(node, 0, 1);
1015 	binder_node_inner_unlock(node);
1016 	if (free_node)
1017 		binder_free_node(node);
1018 }
1019 
1020 static void binder_put_node(struct binder_node *node)
1021 {
1022 	binder_dec_node_tmpref(node);
1023 }
1024 
1025 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1026 						 u32 desc, bool need_strong_ref)
1027 {
1028 	struct rb_node *n = proc->refs_by_desc.rb_node;
1029 	struct binder_ref *ref;
1030 
1031 	while (n) {
1032 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1033 
1034 		if (desc < ref->data.desc) {
1035 			n = n->rb_left;
1036 		} else if (desc > ref->data.desc) {
1037 			n = n->rb_right;
1038 		} else if (need_strong_ref && !ref->data.strong) {
1039 			binder_user_error("tried to use weak ref as strong ref\n");
1040 			return NULL;
1041 		} else {
1042 			return ref;
1043 		}
1044 	}
1045 	return NULL;
1046 }
1047 
1048 /* Find the smallest unused descriptor the "slow way" */
1049 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1050 {
1051 	struct binder_ref *ref;
1052 	struct rb_node *n;
1053 	u32 desc;
1054 
1055 	desc = offset;
1056 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1057 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1058 		if (ref->data.desc > desc)
1059 			break;
1060 		desc = ref->data.desc + 1;
1061 	}
1062 
1063 	return desc;
1064 }
1065 
1066 /*
1067  * Find an available reference descriptor ID. The proc->outer_lock might
1068  * be released in the process, in which case -EAGAIN is returned and the
1069  * @desc should be considered invalid.
1070  */
1071 static int get_ref_desc_olocked(struct binder_proc *proc,
1072 				struct binder_node *node,
1073 				u32 *desc)
1074 {
1075 	struct dbitmap *dmap = &proc->dmap;
1076 	unsigned int nbits, offset;
1077 	unsigned long *new, bit;
1078 
1079 	/* 0 is reserved for the context manager */
1080 	offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1081 
1082 	if (!dbitmap_enabled(dmap)) {
1083 		*desc = slow_desc_lookup_olocked(proc, offset);
1084 		return 0;
1085 	}
1086 
1087 	if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1088 		*desc = bit;
1089 		return 0;
1090 	}
1091 
1092 	/*
1093 	 * The dbitmap is full and needs to grow. The proc->outer_lock
1094 	 * is briefly released to allocate the new bitmap safely.
1095 	 */
1096 	nbits = dbitmap_grow_nbits(dmap);
1097 	binder_proc_unlock(proc);
1098 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1099 	binder_proc_lock(proc);
1100 	dbitmap_grow(dmap, new, nbits);
1101 
1102 	return -EAGAIN;
1103 }
1104 
1105 /**
1106  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1107  * @proc:	binder_proc that owns the ref
1108  * @node:	binder_node of target
1109  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1110  *
1111  * Look up the ref for the given node and return it if it exists
1112  *
1113  * If it doesn't exist and the caller provides a newly allocated
1114  * ref, initialize the fields of the newly allocated ref and insert
1115  * into the given proc rb_trees and node refs list.
1116  *
1117  * Return:	the ref for node. It is possible that another thread
1118  *		allocated/initialized the ref first in which case the
1119  *		returned ref would be different than the passed-in
1120  *		new_ref. new_ref must be kfree'd by the caller in
1121  *		this case.
1122  */
1123 static struct binder_ref *binder_get_ref_for_node_olocked(
1124 					struct binder_proc *proc,
1125 					struct binder_node *node,
1126 					struct binder_ref *new_ref)
1127 {
1128 	struct binder_ref *ref;
1129 	struct rb_node *parent;
1130 	struct rb_node **p;
1131 	u32 desc;
1132 
1133 retry:
1134 	p = &proc->refs_by_node.rb_node;
1135 	parent = NULL;
1136 	while (*p) {
1137 		parent = *p;
1138 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1139 
1140 		if (node < ref->node)
1141 			p = &(*p)->rb_left;
1142 		else if (node > ref->node)
1143 			p = &(*p)->rb_right;
1144 		else
1145 			return ref;
1146 	}
1147 	if (!new_ref)
1148 		return NULL;
1149 
1150 	/* might release the proc->outer_lock */
1151 	if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1152 		goto retry;
1153 
1154 	binder_stats_created(BINDER_STAT_REF);
1155 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1156 	new_ref->proc = proc;
1157 	new_ref->node = node;
1158 	rb_link_node(&new_ref->rb_node_node, parent, p);
1159 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1160 
1161 	new_ref->data.desc = desc;
1162 	p = &proc->refs_by_desc.rb_node;
1163 	while (*p) {
1164 		parent = *p;
1165 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1166 
1167 		if (new_ref->data.desc < ref->data.desc)
1168 			p = &(*p)->rb_left;
1169 		else if (new_ref->data.desc > ref->data.desc)
1170 			p = &(*p)->rb_right;
1171 		else
1172 			BUG();
1173 	}
1174 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1175 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1176 
1177 	binder_node_lock(node);
1178 	hlist_add_head(&new_ref->node_entry, &node->refs);
1179 
1180 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1181 		     "%d new ref %d desc %d for node %d\n",
1182 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1183 		      node->debug_id);
1184 	binder_node_unlock(node);
1185 	return new_ref;
1186 }
1187 
1188 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1189 {
1190 	struct dbitmap *dmap = &ref->proc->dmap;
1191 	bool delete_node = false;
1192 
1193 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1194 		     "%d delete ref %d desc %d for node %d\n",
1195 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1196 		      ref->node->debug_id);
1197 
1198 	if (dbitmap_enabled(dmap))
1199 		dbitmap_clear_bit(dmap, ref->data.desc);
1200 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1201 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1202 
1203 	binder_node_inner_lock(ref->node);
1204 	if (ref->data.strong)
1205 		binder_dec_node_nilocked(ref->node, 1, 1);
1206 
1207 	hlist_del(&ref->node_entry);
1208 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1209 	binder_node_inner_unlock(ref->node);
1210 	/*
1211 	 * Clear ref->node unless we want the caller to free the node
1212 	 */
1213 	if (!delete_node) {
1214 		/*
1215 		 * The caller uses ref->node to determine
1216 		 * whether the node needs to be freed. Clear
1217 		 * it since the node is still alive.
1218 		 */
1219 		ref->node = NULL;
1220 	}
1221 
1222 	if (ref->death) {
1223 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1224 			     "%d delete ref %d desc %d has death notification\n",
1225 			      ref->proc->pid, ref->data.debug_id,
1226 			      ref->data.desc);
1227 		binder_dequeue_work(ref->proc, &ref->death->work);
1228 		binder_stats_deleted(BINDER_STAT_DEATH);
1229 	}
1230 
1231 	if (ref->freeze) {
1232 		binder_dequeue_work(ref->proc, &ref->freeze->work);
1233 		binder_stats_deleted(BINDER_STAT_FREEZE);
1234 	}
1235 
1236 	binder_stats_deleted(BINDER_STAT_REF);
1237 }
1238 
1239 /**
1240  * binder_inc_ref_olocked() - increment the ref for given handle
1241  * @ref:         ref to be incremented
1242  * @strong:      if true, strong increment, else weak
1243  * @target_list: list to queue node work on
1244  *
1245  * Increment the ref. @ref->proc->outer_lock must be held on entry
1246  *
1247  * Return: 0, if successful, else errno
1248  */
1249 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1250 				  struct list_head *target_list)
1251 {
1252 	int ret;
1253 
1254 	if (strong) {
1255 		if (ref->data.strong == 0) {
1256 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1257 			if (ret)
1258 				return ret;
1259 		}
1260 		ref->data.strong++;
1261 	} else {
1262 		if (ref->data.weak == 0) {
1263 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1264 			if (ret)
1265 				return ret;
1266 		}
1267 		ref->data.weak++;
1268 	}
1269 	return 0;
1270 }
1271 
1272 /**
1273  * binder_dec_ref_olocked() - dec the ref for given handle
1274  * @ref:	ref to be decremented
1275  * @strong:	if true, strong decrement, else weak
1276  *
1277  * Decrement the ref.
1278  *
1279  * Return: %true if ref is cleaned up and ready to be freed.
1280  */
1281 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1282 {
1283 	if (strong) {
1284 		if (ref->data.strong == 0) {
1285 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1286 					  ref->proc->pid, ref->data.debug_id,
1287 					  ref->data.desc, ref->data.strong,
1288 					  ref->data.weak);
1289 			return false;
1290 		}
1291 		ref->data.strong--;
1292 		if (ref->data.strong == 0)
1293 			binder_dec_node(ref->node, strong, 1);
1294 	} else {
1295 		if (ref->data.weak == 0) {
1296 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1297 					  ref->proc->pid, ref->data.debug_id,
1298 					  ref->data.desc, ref->data.strong,
1299 					  ref->data.weak);
1300 			return false;
1301 		}
1302 		ref->data.weak--;
1303 	}
1304 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1305 		binder_cleanup_ref_olocked(ref);
1306 		return true;
1307 	}
1308 	return false;
1309 }
1310 
1311 /**
1312  * binder_get_node_from_ref() - get the node from the given proc/desc
1313  * @proc:	proc containing the ref
1314  * @desc:	the handle associated with the ref
1315  * @need_strong_ref: if true, only return node if ref is strong
1316  * @rdata:	the id/refcount data for the ref
1317  *
1318  * Given a proc and ref handle, return the associated binder_node
1319  *
1320  * Return: a binder_node or NULL if not found or not strong when strong required
1321  */
1322 static struct binder_node *binder_get_node_from_ref(
1323 		struct binder_proc *proc,
1324 		u32 desc, bool need_strong_ref,
1325 		struct binder_ref_data *rdata)
1326 {
1327 	struct binder_node *node;
1328 	struct binder_ref *ref;
1329 
1330 	binder_proc_lock(proc);
1331 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1332 	if (!ref)
1333 		goto err_no_ref;
1334 	node = ref->node;
1335 	/*
1336 	 * Take an implicit reference on the node to ensure
1337 	 * it stays alive until the call to binder_put_node()
1338 	 */
1339 	binder_inc_node_tmpref(node);
1340 	if (rdata)
1341 		*rdata = ref->data;
1342 	binder_proc_unlock(proc);
1343 
1344 	return node;
1345 
1346 err_no_ref:
1347 	binder_proc_unlock(proc);
1348 	return NULL;
1349 }
1350 
1351 /**
1352  * binder_free_ref() - free the binder_ref
1353  * @ref:	ref to free
1354  *
1355  * Free the binder_ref. Free the binder_node indicated by ref->node
1356  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1357  */
1358 static void binder_free_ref(struct binder_ref *ref)
1359 {
1360 	if (ref->node)
1361 		binder_free_node(ref->node);
1362 	kfree(ref->death);
1363 	kfree(ref->freeze);
1364 	kfree(ref);
1365 }
1366 
1367 /* shrink descriptor bitmap if needed */
1368 static void try_shrink_dmap(struct binder_proc *proc)
1369 {
1370 	unsigned long *new;
1371 	int nbits;
1372 
1373 	binder_proc_lock(proc);
1374 	nbits = dbitmap_shrink_nbits(&proc->dmap);
1375 	binder_proc_unlock(proc);
1376 
1377 	if (!nbits)
1378 		return;
1379 
1380 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1381 	binder_proc_lock(proc);
1382 	dbitmap_shrink(&proc->dmap, new, nbits);
1383 	binder_proc_unlock(proc);
1384 }
1385 
1386 /**
1387  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1388  * @proc:	proc containing the ref
1389  * @desc:	the handle associated with the ref
1390  * @increment:	true=inc reference, false=dec reference
1391  * @strong:	true=strong reference, false=weak reference
1392  * @rdata:	the id/refcount data for the ref
1393  *
1394  * Given a proc and ref handle, increment or decrement the ref
1395  * according to "increment" arg.
1396  *
1397  * Return: 0 if successful, else errno
1398  */
1399 static int binder_update_ref_for_handle(struct binder_proc *proc,
1400 		uint32_t desc, bool increment, bool strong,
1401 		struct binder_ref_data *rdata)
1402 {
1403 	int ret = 0;
1404 	struct binder_ref *ref;
1405 	bool delete_ref = false;
1406 
1407 	binder_proc_lock(proc);
1408 	ref = binder_get_ref_olocked(proc, desc, strong);
1409 	if (!ref) {
1410 		ret = -EINVAL;
1411 		goto err_no_ref;
1412 	}
1413 	if (increment)
1414 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1415 	else
1416 		delete_ref = binder_dec_ref_olocked(ref, strong);
1417 
1418 	if (rdata)
1419 		*rdata = ref->data;
1420 	binder_proc_unlock(proc);
1421 
1422 	if (delete_ref) {
1423 		binder_free_ref(ref);
1424 		try_shrink_dmap(proc);
1425 	}
1426 	return ret;
1427 
1428 err_no_ref:
1429 	binder_proc_unlock(proc);
1430 	return ret;
1431 }
1432 
1433 /**
1434  * binder_dec_ref_for_handle() - dec the ref for given handle
1435  * @proc:	proc containing the ref
1436  * @desc:	the handle associated with the ref
1437  * @strong:	true=strong reference, false=weak reference
1438  * @rdata:	the id/refcount data for the ref
1439  *
1440  * Just calls binder_update_ref_for_handle() to decrement the ref.
1441  *
1442  * Return: 0 if successful, else errno
1443  */
1444 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1445 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1446 {
1447 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1448 }
1449 
1450 
1451 /**
1452  * binder_inc_ref_for_node() - increment the ref for given proc/node
1453  * @proc:	 proc containing the ref
1454  * @node:	 target node
1455  * @strong:	 true=strong reference, false=weak reference
1456  * @target_list: worklist to use if node is incremented
1457  * @rdata:	 the id/refcount data for the ref
1458  *
1459  * Given a proc and node, increment the ref. Create the ref if it
1460  * doesn't already exist
1461  *
1462  * Return: 0 if successful, else errno
1463  */
1464 static int binder_inc_ref_for_node(struct binder_proc *proc,
1465 			struct binder_node *node,
1466 			bool strong,
1467 			struct list_head *target_list,
1468 			struct binder_ref_data *rdata)
1469 {
1470 	struct binder_ref *ref;
1471 	struct binder_ref *new_ref = NULL;
1472 	int ret = 0;
1473 
1474 	binder_proc_lock(proc);
1475 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1476 	if (!ref) {
1477 		binder_proc_unlock(proc);
1478 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1479 		if (!new_ref)
1480 			return -ENOMEM;
1481 		binder_proc_lock(proc);
1482 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1483 	}
1484 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1485 	*rdata = ref->data;
1486 	if (ret && ref == new_ref) {
1487 		/*
1488 		 * Cleanup the failed reference here as the target
1489 		 * could now be dead and have already released its
1490 		 * references by now. Calling on the new reference
1491 		 * with strong=0 and a tmp_refs will not decrement
1492 		 * the node. The new_ref gets kfree'd below.
1493 		 */
1494 		binder_cleanup_ref_olocked(new_ref);
1495 		ref = NULL;
1496 	}
1497 
1498 	binder_proc_unlock(proc);
1499 	if (new_ref && ref != new_ref)
1500 		/*
1501 		 * Another thread created the ref first so
1502 		 * free the one we allocated
1503 		 */
1504 		kfree(new_ref);
1505 	return ret;
1506 }
1507 
1508 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1509 					   struct binder_transaction *t)
1510 {
1511 	BUG_ON(!target_thread);
1512 	assert_spin_locked(&target_thread->proc->inner_lock);
1513 	BUG_ON(target_thread->transaction_stack != t);
1514 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1515 	target_thread->transaction_stack =
1516 		target_thread->transaction_stack->from_parent;
1517 	t->from = NULL;
1518 }
1519 
1520 /**
1521  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1522  * @thread:	thread to decrement
1523  *
1524  * A thread needs to be kept alive while being used to create or
1525  * handle a transaction. binder_get_txn_from() is used to safely
1526  * extract t->from from a binder_transaction and keep the thread
1527  * indicated by t->from from being freed. When done with that
1528  * binder_thread, this function is called to decrement the
1529  * tmp_ref and free if appropriate (thread has been released
1530  * and no transaction being processed by the driver)
1531  */
1532 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1533 {
1534 	/*
1535 	 * atomic is used to protect the counter value while
1536 	 * it cannot reach zero or thread->is_dead is false
1537 	 */
1538 	binder_inner_proc_lock(thread->proc);
1539 	atomic_dec(&thread->tmp_ref);
1540 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1541 		binder_inner_proc_unlock(thread->proc);
1542 		binder_free_thread(thread);
1543 		return;
1544 	}
1545 	binder_inner_proc_unlock(thread->proc);
1546 }
1547 
1548 /**
1549  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1550  * @proc:	proc to decrement
1551  *
1552  * A binder_proc needs to be kept alive while being used to create or
1553  * handle a transaction. proc->tmp_ref is incremented when
1554  * creating a new transaction or the binder_proc is currently in-use
1555  * by threads that are being released. When done with the binder_proc,
1556  * this function is called to decrement the counter and free the
1557  * proc if appropriate (proc has been released, all threads have
1558  * been released and not currently in-use to process a transaction).
1559  */
1560 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1561 {
1562 	binder_inner_proc_lock(proc);
1563 	proc->tmp_ref--;
1564 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1565 			!proc->tmp_ref) {
1566 		binder_inner_proc_unlock(proc);
1567 		binder_free_proc(proc);
1568 		return;
1569 	}
1570 	binder_inner_proc_unlock(proc);
1571 }
1572 
1573 /**
1574  * binder_get_txn_from() - safely extract the "from" thread in transaction
1575  * @t:	binder transaction for t->from
1576  *
1577  * Atomically return the "from" thread and increment the tmp_ref
1578  * count for the thread to ensure it stays alive until
1579  * binder_thread_dec_tmpref() is called.
1580  *
1581  * Return: the value of t->from
1582  */
1583 static struct binder_thread *binder_get_txn_from(
1584 		struct binder_transaction *t)
1585 {
1586 	struct binder_thread *from;
1587 
1588 	guard(spinlock)(&t->lock);
1589 	from = t->from;
1590 	if (from)
1591 		atomic_inc(&from->tmp_ref);
1592 	return from;
1593 }
1594 
1595 /**
1596  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1597  * @t:	binder transaction for t->from
1598  *
1599  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1600  * to guarantee that the thread cannot be released while operating on it.
1601  * The caller must call binder_inner_proc_unlock() to release the inner lock
1602  * as well as call binder_dec_thread_txn() to release the reference.
1603  *
1604  * Return: the value of t->from
1605  */
1606 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1607 		struct binder_transaction *t)
1608 	__acquires(&t->from->proc->inner_lock)
1609 {
1610 	struct binder_thread *from;
1611 
1612 	from = binder_get_txn_from(t);
1613 	if (!from) {
1614 		__acquire(&from->proc->inner_lock);
1615 		return NULL;
1616 	}
1617 	binder_inner_proc_lock(from->proc);
1618 	if (t->from) {
1619 		BUG_ON(from != t->from);
1620 		return from;
1621 	}
1622 	binder_inner_proc_unlock(from->proc);
1623 	__acquire(&from->proc->inner_lock);
1624 	binder_thread_dec_tmpref(from);
1625 	return NULL;
1626 }
1627 
1628 /**
1629  * binder_free_txn_fixups() - free unprocessed fd fixups
1630  * @t:	binder transaction for t->from
1631  *
1632  * If the transaction is being torn down prior to being
1633  * processed by the target process, free all of the
1634  * fd fixups and fput the file structs. It is safe to
1635  * call this function after the fixups have been
1636  * processed -- in that case, the list will be empty.
1637  */
1638 static void binder_free_txn_fixups(struct binder_transaction *t)
1639 {
1640 	struct binder_txn_fd_fixup *fixup, *tmp;
1641 
1642 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1643 		fput(fixup->file);
1644 		if (fixup->target_fd >= 0)
1645 			put_unused_fd(fixup->target_fd);
1646 		list_del(&fixup->fixup_entry);
1647 		kfree(fixup);
1648 	}
1649 }
1650 
1651 static void binder_txn_latency_free(struct binder_transaction *t)
1652 {
1653 	int from_proc, from_thread, to_proc, to_thread;
1654 
1655 	spin_lock(&t->lock);
1656 	from_proc = t->from ? t->from->proc->pid : 0;
1657 	from_thread = t->from ? t->from->pid : 0;
1658 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1659 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1660 	spin_unlock(&t->lock);
1661 
1662 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1663 }
1664 
1665 static void binder_free_transaction(struct binder_transaction *t)
1666 {
1667 	struct binder_proc *target_proc = t->to_proc;
1668 
1669 	if (target_proc) {
1670 		binder_inner_proc_lock(target_proc);
1671 		target_proc->outstanding_txns--;
1672 		if (target_proc->outstanding_txns < 0)
1673 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1674 				__func__, target_proc->outstanding_txns);
1675 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1676 			wake_up_interruptible_all(&target_proc->freeze_wait);
1677 		if (t->buffer)
1678 			t->buffer->transaction = NULL;
1679 		binder_inner_proc_unlock(target_proc);
1680 	}
1681 	if (trace_binder_txn_latency_free_enabled())
1682 		binder_txn_latency_free(t);
1683 	/*
1684 	 * If the transaction has no target_proc, then
1685 	 * t->buffer->transaction has already been cleared.
1686 	 */
1687 	binder_free_txn_fixups(t);
1688 	kfree(t);
1689 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1690 }
1691 
1692 static void binder_send_failed_reply(struct binder_transaction *t,
1693 				     uint32_t error_code)
1694 {
1695 	struct binder_thread *target_thread;
1696 	struct binder_transaction *next;
1697 
1698 	BUG_ON(t->flags & TF_ONE_WAY);
1699 	while (1) {
1700 		target_thread = binder_get_txn_from_and_acq_inner(t);
1701 		if (target_thread) {
1702 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1703 				     "send failed reply for transaction %d to %d:%d\n",
1704 				      t->debug_id,
1705 				      target_thread->proc->pid,
1706 				      target_thread->pid);
1707 
1708 			binder_pop_transaction_ilocked(target_thread, t);
1709 			if (target_thread->reply_error.cmd == BR_OK) {
1710 				target_thread->reply_error.cmd = error_code;
1711 				binder_enqueue_thread_work_ilocked(
1712 					target_thread,
1713 					&target_thread->reply_error.work);
1714 				wake_up_interruptible(&target_thread->wait);
1715 			} else {
1716 				/*
1717 				 * Cannot get here for normal operation, but
1718 				 * we can if multiple synchronous transactions
1719 				 * are sent without blocking for responses.
1720 				 * Just ignore the 2nd error in this case.
1721 				 */
1722 				pr_warn("Unexpected reply error: %u\n",
1723 					target_thread->reply_error.cmd);
1724 			}
1725 			binder_inner_proc_unlock(target_thread->proc);
1726 			binder_thread_dec_tmpref(target_thread);
1727 			binder_free_transaction(t);
1728 			return;
1729 		}
1730 		__release(&target_thread->proc->inner_lock);
1731 		next = t->from_parent;
1732 
1733 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1734 			     "send failed reply for transaction %d, target dead\n",
1735 			     t->debug_id);
1736 
1737 		binder_free_transaction(t);
1738 		if (next == NULL) {
1739 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1740 				     "reply failed, no target thread at root\n");
1741 			return;
1742 		}
1743 		t = next;
1744 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1745 			     "reply failed, no target thread -- retry %d\n",
1746 			      t->debug_id);
1747 	}
1748 }
1749 
1750 /**
1751  * binder_cleanup_transaction() - cleans up undelivered transaction
1752  * @t:		transaction that needs to be cleaned up
1753  * @reason:	reason the transaction wasn't delivered
1754  * @error_code:	error to return to caller (if synchronous call)
1755  */
1756 static void binder_cleanup_transaction(struct binder_transaction *t,
1757 				       const char *reason,
1758 				       uint32_t error_code)
1759 {
1760 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1761 		binder_send_failed_reply(t, error_code);
1762 	} else {
1763 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1764 			"undelivered transaction %d, %s\n",
1765 			t->debug_id, reason);
1766 		binder_free_transaction(t);
1767 	}
1768 }
1769 
1770 /**
1771  * binder_get_object() - gets object and checks for valid metadata
1772  * @proc:	binder_proc owning the buffer
1773  * @u:		sender's user pointer to base of buffer
1774  * @buffer:	binder_buffer that we're parsing.
1775  * @offset:	offset in the @buffer at which to validate an object.
1776  * @object:	struct binder_object to read into
1777  *
1778  * Copy the binder object at the given offset into @object. If @u is
1779  * provided then the copy is from the sender's buffer. If not, then
1780  * it is copied from the target's @buffer.
1781  *
1782  * Return:	If there's a valid metadata object at @offset, the
1783  *		size of that object. Otherwise, it returns zero. The object
1784  *		is read into the struct binder_object pointed to by @object.
1785  */
1786 static size_t binder_get_object(struct binder_proc *proc,
1787 				const void __user *u,
1788 				struct binder_buffer *buffer,
1789 				unsigned long offset,
1790 				struct binder_object *object)
1791 {
1792 	size_t read_size;
1793 	struct binder_object_header *hdr;
1794 	size_t object_size = 0;
1795 
1796 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1797 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1798 	    !IS_ALIGNED(offset, sizeof(u32)))
1799 		return 0;
1800 
1801 	if (u) {
1802 		if (copy_from_user(object, u + offset, read_size))
1803 			return 0;
1804 	} else {
1805 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1806 						  offset, read_size))
1807 			return 0;
1808 	}
1809 
1810 	/* Ok, now see if we read a complete object. */
1811 	hdr = &object->hdr;
1812 	switch (hdr->type) {
1813 	case BINDER_TYPE_BINDER:
1814 	case BINDER_TYPE_WEAK_BINDER:
1815 	case BINDER_TYPE_HANDLE:
1816 	case BINDER_TYPE_WEAK_HANDLE:
1817 		object_size = sizeof(struct flat_binder_object);
1818 		break;
1819 	case BINDER_TYPE_FD:
1820 		object_size = sizeof(struct binder_fd_object);
1821 		break;
1822 	case BINDER_TYPE_PTR:
1823 		object_size = sizeof(struct binder_buffer_object);
1824 		break;
1825 	case BINDER_TYPE_FDA:
1826 		object_size = sizeof(struct binder_fd_array_object);
1827 		break;
1828 	default:
1829 		return 0;
1830 	}
1831 	if (offset <= buffer->data_size - object_size &&
1832 	    buffer->data_size >= object_size)
1833 		return object_size;
1834 	else
1835 		return 0;
1836 }
1837 
1838 /**
1839  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1840  * @proc:	binder_proc owning the buffer
1841  * @b:		binder_buffer containing the object
1842  * @object:	struct binder_object to read into
1843  * @index:	index in offset array at which the binder_buffer_object is
1844  *		located
1845  * @start_offset: points to the start of the offset array
1846  * @object_offsetp: offset of @object read from @b
1847  * @num_valid:	the number of valid offsets in the offset array
1848  *
1849  * Return:	If @index is within the valid range of the offset array
1850  *		described by @start and @num_valid, and if there's a valid
1851  *		binder_buffer_object at the offset found in index @index
1852  *		of the offset array, that object is returned. Otherwise,
1853  *		%NULL is returned.
1854  *		Note that the offset found in index @index itself is not
1855  *		verified; this function assumes that @num_valid elements
1856  *		from @start were previously verified to have valid offsets.
1857  *		If @object_offsetp is non-NULL, then the offset within
1858  *		@b is written to it.
1859  */
1860 static struct binder_buffer_object *binder_validate_ptr(
1861 						struct binder_proc *proc,
1862 						struct binder_buffer *b,
1863 						struct binder_object *object,
1864 						binder_size_t index,
1865 						binder_size_t start_offset,
1866 						binder_size_t *object_offsetp,
1867 						binder_size_t num_valid)
1868 {
1869 	size_t object_size;
1870 	binder_size_t object_offset;
1871 	unsigned long buffer_offset;
1872 
1873 	if (index >= num_valid)
1874 		return NULL;
1875 
1876 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1877 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1878 					  b, buffer_offset,
1879 					  sizeof(object_offset)))
1880 		return NULL;
1881 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1882 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1883 		return NULL;
1884 	if (object_offsetp)
1885 		*object_offsetp = object_offset;
1886 
1887 	return &object->bbo;
1888 }
1889 
1890 /**
1891  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1892  * @proc:		binder_proc owning the buffer
1893  * @b:			transaction buffer
1894  * @objects_start_offset: offset to start of objects buffer
1895  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1896  * @fixup_offset:	start offset in @buffer to fix up
1897  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1898  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1899  *
1900  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1901  *			allowed.
1902  *
1903  * For safety reasons, we only allow fixups inside a buffer to happen
1904  * at increasing offsets; additionally, we only allow fixup on the last
1905  * buffer object that was verified, or one of its parents.
1906  *
1907  * Example of what is allowed:
1908  *
1909  * A
1910  *   B (parent = A, offset = 0)
1911  *   C (parent = A, offset = 16)
1912  *     D (parent = C, offset = 0)
1913  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1914  *
1915  * Examples of what is not allowed:
1916  *
1917  * Decreasing offsets within the same parent:
1918  * A
1919  *   C (parent = A, offset = 16)
1920  *   B (parent = A, offset = 0) // decreasing offset within A
1921  *
1922  * Referring to a parent that wasn't the last object or any of its parents:
1923  * A
1924  *   B (parent = A, offset = 0)
1925  *   C (parent = A, offset = 0)
1926  *   C (parent = A, offset = 16)
1927  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1928  */
1929 static bool binder_validate_fixup(struct binder_proc *proc,
1930 				  struct binder_buffer *b,
1931 				  binder_size_t objects_start_offset,
1932 				  binder_size_t buffer_obj_offset,
1933 				  binder_size_t fixup_offset,
1934 				  binder_size_t last_obj_offset,
1935 				  binder_size_t last_min_offset)
1936 {
1937 	if (!last_obj_offset) {
1938 		/* Nothing to fix up in */
1939 		return false;
1940 	}
1941 
1942 	while (last_obj_offset != buffer_obj_offset) {
1943 		unsigned long buffer_offset;
1944 		struct binder_object last_object;
1945 		struct binder_buffer_object *last_bbo;
1946 		size_t object_size = binder_get_object(proc, NULL, b,
1947 						       last_obj_offset,
1948 						       &last_object);
1949 		if (object_size != sizeof(*last_bbo))
1950 			return false;
1951 
1952 		last_bbo = &last_object.bbo;
1953 		/*
1954 		 * Safe to retrieve the parent of last_obj, since it
1955 		 * was already previously verified by the driver.
1956 		 */
1957 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1958 			return false;
1959 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1960 		buffer_offset = objects_start_offset +
1961 			sizeof(binder_size_t) * last_bbo->parent;
1962 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1963 						  &last_obj_offset,
1964 						  b, buffer_offset,
1965 						  sizeof(last_obj_offset)))
1966 			return false;
1967 	}
1968 	return (fixup_offset >= last_min_offset);
1969 }
1970 
1971 /**
1972  * struct binder_task_work_cb - for deferred close
1973  *
1974  * @twork:                callback_head for task work
1975  * @file:                 file to close
1976  *
1977  * Structure to pass task work to be handled after
1978  * returning from binder_ioctl() via task_work_add().
1979  */
1980 struct binder_task_work_cb {
1981 	struct callback_head twork;
1982 	struct file *file;
1983 };
1984 
1985 /**
1986  * binder_do_fd_close() - close list of file descriptors
1987  * @twork:	callback head for task work
1988  *
1989  * It is not safe to call ksys_close() during the binder_ioctl()
1990  * function if there is a chance that binder's own file descriptor
1991  * might be closed. This is to meet the requirements for using
1992  * fdget() (see comments for __fget_light()). Therefore use
1993  * task_work_add() to schedule the close operation once we have
1994  * returned from binder_ioctl(). This function is a callback
1995  * for that mechanism and does the actual ksys_close() on the
1996  * given file descriptor.
1997  */
1998 static void binder_do_fd_close(struct callback_head *twork)
1999 {
2000 	struct binder_task_work_cb *twcb = container_of(twork,
2001 			struct binder_task_work_cb, twork);
2002 
2003 	fput(twcb->file);
2004 	kfree(twcb);
2005 }
2006 
2007 /**
2008  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2009  * @fd:		file-descriptor to close
2010  *
2011  * See comments in binder_do_fd_close(). This function is used to schedule
2012  * a file-descriptor to be closed after returning from binder_ioctl().
2013  */
2014 static void binder_deferred_fd_close(int fd)
2015 {
2016 	struct binder_task_work_cb *twcb;
2017 
2018 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2019 	if (!twcb)
2020 		return;
2021 	init_task_work(&twcb->twork, binder_do_fd_close);
2022 	twcb->file = file_close_fd(fd);
2023 	if (twcb->file) {
2024 		// pin it until binder_do_fd_close(); see comments there
2025 		get_file(twcb->file);
2026 		filp_close(twcb->file, current->files);
2027 		task_work_add(current, &twcb->twork, TWA_RESUME);
2028 	} else {
2029 		kfree(twcb);
2030 	}
2031 }
2032 
2033 static void binder_transaction_buffer_release(struct binder_proc *proc,
2034 					      struct binder_thread *thread,
2035 					      struct binder_buffer *buffer,
2036 					      binder_size_t off_end_offset,
2037 					      bool is_failure)
2038 {
2039 	int debug_id = buffer->debug_id;
2040 	binder_size_t off_start_offset, buffer_offset;
2041 
2042 	binder_debug(BINDER_DEBUG_TRANSACTION,
2043 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2044 		     proc->pid, buffer->debug_id,
2045 		     buffer->data_size, buffer->offsets_size,
2046 		     (unsigned long long)off_end_offset);
2047 
2048 	if (buffer->target_node)
2049 		binder_dec_node(buffer->target_node, 1, 0);
2050 
2051 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2052 
2053 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2054 	     buffer_offset += sizeof(binder_size_t)) {
2055 		struct binder_object_header *hdr;
2056 		size_t object_size = 0;
2057 		struct binder_object object;
2058 		binder_size_t object_offset;
2059 
2060 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2061 						   buffer, buffer_offset,
2062 						   sizeof(object_offset)))
2063 			object_size = binder_get_object(proc, NULL, buffer,
2064 							object_offset, &object);
2065 		if (object_size == 0) {
2066 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2067 			       debug_id, (u64)object_offset, buffer->data_size);
2068 			continue;
2069 		}
2070 		hdr = &object.hdr;
2071 		switch (hdr->type) {
2072 		case BINDER_TYPE_BINDER:
2073 		case BINDER_TYPE_WEAK_BINDER: {
2074 			struct flat_binder_object *fp;
2075 			struct binder_node *node;
2076 
2077 			fp = to_flat_binder_object(hdr);
2078 			node = binder_get_node(proc, fp->binder);
2079 			if (node == NULL) {
2080 				pr_err("transaction release %d bad node %016llx\n",
2081 				       debug_id, (u64)fp->binder);
2082 				break;
2083 			}
2084 			binder_debug(BINDER_DEBUG_TRANSACTION,
2085 				     "        node %d u%016llx\n",
2086 				     node->debug_id, (u64)node->ptr);
2087 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2088 					0);
2089 			binder_put_node(node);
2090 		} break;
2091 		case BINDER_TYPE_HANDLE:
2092 		case BINDER_TYPE_WEAK_HANDLE: {
2093 			struct flat_binder_object *fp;
2094 			struct binder_ref_data rdata;
2095 			int ret;
2096 
2097 			fp = to_flat_binder_object(hdr);
2098 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2099 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2100 
2101 			if (ret) {
2102 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2103 				 debug_id, fp->handle, ret);
2104 				break;
2105 			}
2106 			binder_debug(BINDER_DEBUG_TRANSACTION,
2107 				     "        ref %d desc %d\n",
2108 				     rdata.debug_id, rdata.desc);
2109 		} break;
2110 
2111 		case BINDER_TYPE_FD: {
2112 			/*
2113 			 * No need to close the file here since user-space
2114 			 * closes it for successfully delivered
2115 			 * transactions. For transactions that weren't
2116 			 * delivered, the new fd was never allocated so
2117 			 * there is no need to close and the fput on the
2118 			 * file is done when the transaction is torn
2119 			 * down.
2120 			 */
2121 		} break;
2122 		case BINDER_TYPE_PTR:
2123 			/*
2124 			 * Nothing to do here, this will get cleaned up when the
2125 			 * transaction buffer gets freed
2126 			 */
2127 			break;
2128 		case BINDER_TYPE_FDA: {
2129 			struct binder_fd_array_object *fda;
2130 			struct binder_buffer_object *parent;
2131 			struct binder_object ptr_object;
2132 			binder_size_t fda_offset;
2133 			size_t fd_index;
2134 			binder_size_t fd_buf_size;
2135 			binder_size_t num_valid;
2136 
2137 			if (is_failure) {
2138 				/*
2139 				 * The fd fixups have not been applied so no
2140 				 * fds need to be closed.
2141 				 */
2142 				continue;
2143 			}
2144 
2145 			num_valid = (buffer_offset - off_start_offset) /
2146 						sizeof(binder_size_t);
2147 			fda = to_binder_fd_array_object(hdr);
2148 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2149 						     fda->parent,
2150 						     off_start_offset,
2151 						     NULL,
2152 						     num_valid);
2153 			if (!parent) {
2154 				pr_err("transaction release %d bad parent offset\n",
2155 				       debug_id);
2156 				continue;
2157 			}
2158 			fd_buf_size = sizeof(u32) * fda->num_fds;
2159 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2160 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2161 				       debug_id, (u64)fda->num_fds);
2162 				continue;
2163 			}
2164 			if (fd_buf_size > parent->length ||
2165 			    fda->parent_offset > parent->length - fd_buf_size) {
2166 				/* No space for all file descriptors here. */
2167 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2168 				       debug_id, (u64)fda->num_fds);
2169 				continue;
2170 			}
2171 			/*
2172 			 * the source data for binder_buffer_object is visible
2173 			 * to user-space and the @buffer element is the user
2174 			 * pointer to the buffer_object containing the fd_array.
2175 			 * Convert the address to an offset relative to
2176 			 * the base of the transaction buffer.
2177 			 */
2178 			fda_offset = parent->buffer - buffer->user_data +
2179 				fda->parent_offset;
2180 			for (fd_index = 0; fd_index < fda->num_fds;
2181 			     fd_index++) {
2182 				u32 fd;
2183 				int err;
2184 				binder_size_t offset = fda_offset +
2185 					fd_index * sizeof(fd);
2186 
2187 				err = binder_alloc_copy_from_buffer(
2188 						&proc->alloc, &fd, buffer,
2189 						offset, sizeof(fd));
2190 				WARN_ON(err);
2191 				if (!err) {
2192 					binder_deferred_fd_close(fd);
2193 					/*
2194 					 * Need to make sure the thread goes
2195 					 * back to userspace to complete the
2196 					 * deferred close
2197 					 */
2198 					if (thread)
2199 						thread->looper_need_return = true;
2200 				}
2201 			}
2202 		} break;
2203 		default:
2204 			pr_err("transaction release %d bad object type %x\n",
2205 				debug_id, hdr->type);
2206 			break;
2207 		}
2208 	}
2209 }
2210 
2211 /* Clean up all the objects in the buffer */
2212 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2213 						struct binder_thread *thread,
2214 						struct binder_buffer *buffer,
2215 						bool is_failure)
2216 {
2217 	binder_size_t off_end_offset;
2218 
2219 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2220 	off_end_offset += buffer->offsets_size;
2221 
2222 	binder_transaction_buffer_release(proc, thread, buffer,
2223 					  off_end_offset, is_failure);
2224 }
2225 
2226 static int binder_translate_binder(struct flat_binder_object *fp,
2227 				   struct binder_transaction *t,
2228 				   struct binder_thread *thread)
2229 {
2230 	struct binder_node *node;
2231 	struct binder_proc *proc = thread->proc;
2232 	struct binder_proc *target_proc = t->to_proc;
2233 	struct binder_ref_data rdata;
2234 	int ret = 0;
2235 
2236 	node = binder_get_node(proc, fp->binder);
2237 	if (!node) {
2238 		node = binder_new_node(proc, fp);
2239 		if (!node)
2240 			return -ENOMEM;
2241 	}
2242 	if (fp->cookie != node->cookie) {
2243 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2244 				  proc->pid, thread->pid, (u64)fp->binder,
2245 				  node->debug_id, (u64)fp->cookie,
2246 				  (u64)node->cookie);
2247 		ret = -EINVAL;
2248 		goto done;
2249 	}
2250 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2251 		ret = -EPERM;
2252 		goto done;
2253 	}
2254 
2255 	ret = binder_inc_ref_for_node(target_proc, node,
2256 			fp->hdr.type == BINDER_TYPE_BINDER,
2257 			&thread->todo, &rdata);
2258 	if (ret)
2259 		goto done;
2260 
2261 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2262 		fp->hdr.type = BINDER_TYPE_HANDLE;
2263 	else
2264 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2265 	fp->binder = 0;
2266 	fp->handle = rdata.desc;
2267 	fp->cookie = 0;
2268 
2269 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2270 	binder_debug(BINDER_DEBUG_TRANSACTION,
2271 		     "        node %d u%016llx -> ref %d desc %d\n",
2272 		     node->debug_id, (u64)node->ptr,
2273 		     rdata.debug_id, rdata.desc);
2274 done:
2275 	binder_put_node(node);
2276 	return ret;
2277 }
2278 
2279 static int binder_translate_handle(struct flat_binder_object *fp,
2280 				   struct binder_transaction *t,
2281 				   struct binder_thread *thread)
2282 {
2283 	struct binder_proc *proc = thread->proc;
2284 	struct binder_proc *target_proc = t->to_proc;
2285 	struct binder_node *node;
2286 	struct binder_ref_data src_rdata;
2287 	int ret = 0;
2288 
2289 	node = binder_get_node_from_ref(proc, fp->handle,
2290 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2291 	if (!node) {
2292 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2293 				  proc->pid, thread->pid, fp->handle);
2294 		return -EINVAL;
2295 	}
2296 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2297 		ret = -EPERM;
2298 		goto done;
2299 	}
2300 
2301 	binder_node_lock(node);
2302 	if (node->proc == target_proc) {
2303 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2304 			fp->hdr.type = BINDER_TYPE_BINDER;
2305 		else
2306 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2307 		fp->binder = node->ptr;
2308 		fp->cookie = node->cookie;
2309 		if (node->proc)
2310 			binder_inner_proc_lock(node->proc);
2311 		else
2312 			__acquire(&node->proc->inner_lock);
2313 		binder_inc_node_nilocked(node,
2314 					 fp->hdr.type == BINDER_TYPE_BINDER,
2315 					 0, NULL);
2316 		if (node->proc)
2317 			binder_inner_proc_unlock(node->proc);
2318 		else
2319 			__release(&node->proc->inner_lock);
2320 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2321 		binder_debug(BINDER_DEBUG_TRANSACTION,
2322 			     "        ref %d desc %d -> node %d u%016llx\n",
2323 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2324 			     (u64)node->ptr);
2325 		binder_node_unlock(node);
2326 	} else {
2327 		struct binder_ref_data dest_rdata;
2328 
2329 		binder_node_unlock(node);
2330 		ret = binder_inc_ref_for_node(target_proc, node,
2331 				fp->hdr.type == BINDER_TYPE_HANDLE,
2332 				NULL, &dest_rdata);
2333 		if (ret)
2334 			goto done;
2335 
2336 		fp->binder = 0;
2337 		fp->handle = dest_rdata.desc;
2338 		fp->cookie = 0;
2339 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2340 						    &dest_rdata);
2341 		binder_debug(BINDER_DEBUG_TRANSACTION,
2342 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2343 			     src_rdata.debug_id, src_rdata.desc,
2344 			     dest_rdata.debug_id, dest_rdata.desc,
2345 			     node->debug_id);
2346 	}
2347 done:
2348 	binder_put_node(node);
2349 	return ret;
2350 }
2351 
2352 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2353 			       struct binder_transaction *t,
2354 			       struct binder_thread *thread,
2355 			       struct binder_transaction *in_reply_to)
2356 {
2357 	struct binder_proc *proc = thread->proc;
2358 	struct binder_proc *target_proc = t->to_proc;
2359 	struct binder_txn_fd_fixup *fixup;
2360 	struct file *file;
2361 	int ret = 0;
2362 	bool target_allows_fd;
2363 
2364 	if (in_reply_to)
2365 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2366 	else
2367 		target_allows_fd = t->buffer->target_node->accept_fds;
2368 	if (!target_allows_fd) {
2369 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2370 				  proc->pid, thread->pid,
2371 				  in_reply_to ? "reply" : "transaction",
2372 				  fd);
2373 		ret = -EPERM;
2374 		goto err_fd_not_accepted;
2375 	}
2376 
2377 	file = fget(fd);
2378 	if (!file) {
2379 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2380 				  proc->pid, thread->pid, fd);
2381 		ret = -EBADF;
2382 		goto err_fget;
2383 	}
2384 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2385 	if (ret < 0) {
2386 		ret = -EPERM;
2387 		goto err_security;
2388 	}
2389 
2390 	/*
2391 	 * Add fixup record for this transaction. The allocation
2392 	 * of the fd in the target needs to be done from a
2393 	 * target thread.
2394 	 */
2395 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2396 	if (!fixup) {
2397 		ret = -ENOMEM;
2398 		goto err_alloc;
2399 	}
2400 	fixup->file = file;
2401 	fixup->offset = fd_offset;
2402 	fixup->target_fd = -1;
2403 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2404 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2405 
2406 	return ret;
2407 
2408 err_alloc:
2409 err_security:
2410 	fput(file);
2411 err_fget:
2412 err_fd_not_accepted:
2413 	return ret;
2414 }
2415 
2416 /**
2417  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2418  * @offset	offset in target buffer to fixup
2419  * @skip_size	bytes to skip in copy (fixup will be written later)
2420  * @fixup_data	data to write at fixup offset
2421  * @node	list node
2422  *
2423  * This is used for the pointer fixup list (pf) which is created and consumed
2424  * during binder_transaction() and is only accessed locally. No
2425  * locking is necessary.
2426  *
2427  * The list is ordered by @offset.
2428  */
2429 struct binder_ptr_fixup {
2430 	binder_size_t offset;
2431 	size_t skip_size;
2432 	binder_uintptr_t fixup_data;
2433 	struct list_head node;
2434 };
2435 
2436 /**
2437  * struct binder_sg_copy - scatter-gather data to be copied
2438  * @offset		offset in target buffer
2439  * @sender_uaddr	user address in source buffer
2440  * @length		bytes to copy
2441  * @node		list node
2442  *
2443  * This is used for the sg copy list (sgc) which is created and consumed
2444  * during binder_transaction() and is only accessed locally. No
2445  * locking is necessary.
2446  *
2447  * The list is ordered by @offset.
2448  */
2449 struct binder_sg_copy {
2450 	binder_size_t offset;
2451 	const void __user *sender_uaddr;
2452 	size_t length;
2453 	struct list_head node;
2454 };
2455 
2456 /**
2457  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2458  * @alloc:	binder_alloc associated with @buffer
2459  * @buffer:	binder buffer in target process
2460  * @sgc_head:	list_head of scatter-gather copy list
2461  * @pf_head:	list_head of pointer fixup list
2462  *
2463  * Processes all elements of @sgc_head, applying fixups from @pf_head
2464  * and copying the scatter-gather data from the source process' user
2465  * buffer to the target's buffer. It is expected that the list creation
2466  * and processing all occurs during binder_transaction() so these lists
2467  * are only accessed in local context.
2468  *
2469  * Return: 0=success, else -errno
2470  */
2471 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2472 					 struct binder_buffer *buffer,
2473 					 struct list_head *sgc_head,
2474 					 struct list_head *pf_head)
2475 {
2476 	int ret = 0;
2477 	struct binder_sg_copy *sgc, *tmpsgc;
2478 	struct binder_ptr_fixup *tmppf;
2479 	struct binder_ptr_fixup *pf =
2480 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2481 					 node);
2482 
2483 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2484 		size_t bytes_copied = 0;
2485 
2486 		while (bytes_copied < sgc->length) {
2487 			size_t copy_size;
2488 			size_t bytes_left = sgc->length - bytes_copied;
2489 			size_t offset = sgc->offset + bytes_copied;
2490 
2491 			/*
2492 			 * We copy up to the fixup (pointed to by pf)
2493 			 */
2494 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2495 				       : bytes_left;
2496 			if (!ret && copy_size)
2497 				ret = binder_alloc_copy_user_to_buffer(
2498 						alloc, buffer,
2499 						offset,
2500 						sgc->sender_uaddr + bytes_copied,
2501 						copy_size);
2502 			bytes_copied += copy_size;
2503 			if (copy_size != bytes_left) {
2504 				BUG_ON(!pf);
2505 				/* we stopped at a fixup offset */
2506 				if (pf->skip_size) {
2507 					/*
2508 					 * we are just skipping. This is for
2509 					 * BINDER_TYPE_FDA where the translated
2510 					 * fds will be fixed up when we get
2511 					 * to target context.
2512 					 */
2513 					bytes_copied += pf->skip_size;
2514 				} else {
2515 					/* apply the fixup indicated by pf */
2516 					if (!ret)
2517 						ret = binder_alloc_copy_to_buffer(
2518 							alloc, buffer,
2519 							pf->offset,
2520 							&pf->fixup_data,
2521 							sizeof(pf->fixup_data));
2522 					bytes_copied += sizeof(pf->fixup_data);
2523 				}
2524 				list_del(&pf->node);
2525 				kfree(pf);
2526 				pf = list_first_entry_or_null(pf_head,
2527 						struct binder_ptr_fixup, node);
2528 			}
2529 		}
2530 		list_del(&sgc->node);
2531 		kfree(sgc);
2532 	}
2533 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2534 		BUG_ON(pf->skip_size == 0);
2535 		list_del(&pf->node);
2536 		kfree(pf);
2537 	}
2538 	BUG_ON(!list_empty(sgc_head));
2539 
2540 	return ret > 0 ? -EINVAL : ret;
2541 }
2542 
2543 /**
2544  * binder_cleanup_deferred_txn_lists() - free specified lists
2545  * @sgc_head:	list_head of scatter-gather copy list
2546  * @pf_head:	list_head of pointer fixup list
2547  *
2548  * Called to clean up @sgc_head and @pf_head if there is an
2549  * error.
2550  */
2551 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2552 					      struct list_head *pf_head)
2553 {
2554 	struct binder_sg_copy *sgc, *tmpsgc;
2555 	struct binder_ptr_fixup *pf, *tmppf;
2556 
2557 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2558 		list_del(&sgc->node);
2559 		kfree(sgc);
2560 	}
2561 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2562 		list_del(&pf->node);
2563 		kfree(pf);
2564 	}
2565 }
2566 
2567 /**
2568  * binder_defer_copy() - queue a scatter-gather buffer for copy
2569  * @sgc_head:		list_head of scatter-gather copy list
2570  * @offset:		binder buffer offset in target process
2571  * @sender_uaddr:	user address in source process
2572  * @length:		bytes to copy
2573  *
2574  * Specify a scatter-gather block to be copied. The actual copy must
2575  * be deferred until all the needed fixups are identified and queued.
2576  * Then the copy and fixups are done together so un-translated values
2577  * from the source are never visible in the target buffer.
2578  *
2579  * We are guaranteed that repeated calls to this function will have
2580  * monotonically increasing @offset values so the list will naturally
2581  * be ordered.
2582  *
2583  * Return: 0=success, else -errno
2584  */
2585 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2586 			     const void __user *sender_uaddr, size_t length)
2587 {
2588 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2589 
2590 	if (!bc)
2591 		return -ENOMEM;
2592 
2593 	bc->offset = offset;
2594 	bc->sender_uaddr = sender_uaddr;
2595 	bc->length = length;
2596 	INIT_LIST_HEAD(&bc->node);
2597 
2598 	/*
2599 	 * We are guaranteed that the deferred copies are in-order
2600 	 * so just add to the tail.
2601 	 */
2602 	list_add_tail(&bc->node, sgc_head);
2603 
2604 	return 0;
2605 }
2606 
2607 /**
2608  * binder_add_fixup() - queue a fixup to be applied to sg copy
2609  * @pf_head:	list_head of binder ptr fixup list
2610  * @offset:	binder buffer offset in target process
2611  * @fixup:	bytes to be copied for fixup
2612  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2613  *
2614  * Add the specified fixup to a list ordered by @offset. When copying
2615  * the scatter-gather buffers, the fixup will be copied instead of
2616  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2617  * will be applied later (in target process context), so we just skip
2618  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2619  * value in @fixup.
2620  *
2621  * This function is called *mostly* in @offset order, but there are
2622  * exceptions. Since out-of-order inserts are relatively uncommon,
2623  * we insert the new element by searching backward from the tail of
2624  * the list.
2625  *
2626  * Return: 0=success, else -errno
2627  */
2628 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2629 			    binder_uintptr_t fixup, size_t skip_size)
2630 {
2631 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2632 	struct binder_ptr_fixup *tmppf;
2633 
2634 	if (!pf)
2635 		return -ENOMEM;
2636 
2637 	pf->offset = offset;
2638 	pf->fixup_data = fixup;
2639 	pf->skip_size = skip_size;
2640 	INIT_LIST_HEAD(&pf->node);
2641 
2642 	/* Fixups are *mostly* added in-order, but there are some
2643 	 * exceptions. Look backwards through list for insertion point.
2644 	 */
2645 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2646 		if (tmppf->offset < pf->offset) {
2647 			list_add(&pf->node, &tmppf->node);
2648 			return 0;
2649 		}
2650 	}
2651 	/*
2652 	 * if we get here, then the new offset is the lowest so
2653 	 * insert at the head
2654 	 */
2655 	list_add(&pf->node, pf_head);
2656 	return 0;
2657 }
2658 
2659 static int binder_translate_fd_array(struct list_head *pf_head,
2660 				     struct binder_fd_array_object *fda,
2661 				     const void __user *sender_ubuffer,
2662 				     struct binder_buffer_object *parent,
2663 				     struct binder_buffer_object *sender_uparent,
2664 				     struct binder_transaction *t,
2665 				     struct binder_thread *thread,
2666 				     struct binder_transaction *in_reply_to)
2667 {
2668 	binder_size_t fdi, fd_buf_size;
2669 	binder_size_t fda_offset;
2670 	const void __user *sender_ufda_base;
2671 	struct binder_proc *proc = thread->proc;
2672 	int ret;
2673 
2674 	if (fda->num_fds == 0)
2675 		return 0;
2676 
2677 	fd_buf_size = sizeof(u32) * fda->num_fds;
2678 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2679 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2680 				  proc->pid, thread->pid, (u64)fda->num_fds);
2681 		return -EINVAL;
2682 	}
2683 	if (fd_buf_size > parent->length ||
2684 	    fda->parent_offset > parent->length - fd_buf_size) {
2685 		/* No space for all file descriptors here. */
2686 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2687 				  proc->pid, thread->pid, (u64)fda->num_fds);
2688 		return -EINVAL;
2689 	}
2690 	/*
2691 	 * the source data for binder_buffer_object is visible
2692 	 * to user-space and the @buffer element is the user
2693 	 * pointer to the buffer_object containing the fd_array.
2694 	 * Convert the address to an offset relative to
2695 	 * the base of the transaction buffer.
2696 	 */
2697 	fda_offset = parent->buffer - t->buffer->user_data +
2698 		fda->parent_offset;
2699 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2700 				fda->parent_offset;
2701 
2702 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2703 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2704 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2705 				  proc->pid, thread->pid);
2706 		return -EINVAL;
2707 	}
2708 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2709 	if (ret)
2710 		return ret;
2711 
2712 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2713 		u32 fd;
2714 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2715 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2716 
2717 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2718 		if (!ret)
2719 			ret = binder_translate_fd(fd, offset, t, thread,
2720 						  in_reply_to);
2721 		if (ret)
2722 			return ret > 0 ? -EINVAL : ret;
2723 	}
2724 	return 0;
2725 }
2726 
2727 static int binder_fixup_parent(struct list_head *pf_head,
2728 			       struct binder_transaction *t,
2729 			       struct binder_thread *thread,
2730 			       struct binder_buffer_object *bp,
2731 			       binder_size_t off_start_offset,
2732 			       binder_size_t num_valid,
2733 			       binder_size_t last_fixup_obj_off,
2734 			       binder_size_t last_fixup_min_off)
2735 {
2736 	struct binder_buffer_object *parent;
2737 	struct binder_buffer *b = t->buffer;
2738 	struct binder_proc *proc = thread->proc;
2739 	struct binder_proc *target_proc = t->to_proc;
2740 	struct binder_object object;
2741 	binder_size_t buffer_offset;
2742 	binder_size_t parent_offset;
2743 
2744 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2745 		return 0;
2746 
2747 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2748 				     off_start_offset, &parent_offset,
2749 				     num_valid);
2750 	if (!parent) {
2751 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2752 				  proc->pid, thread->pid);
2753 		return -EINVAL;
2754 	}
2755 
2756 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2757 				   parent_offset, bp->parent_offset,
2758 				   last_fixup_obj_off,
2759 				   last_fixup_min_off)) {
2760 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2761 				  proc->pid, thread->pid);
2762 		return -EINVAL;
2763 	}
2764 
2765 	if (parent->length < sizeof(binder_uintptr_t) ||
2766 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2767 		/* No space for a pointer here! */
2768 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2769 				  proc->pid, thread->pid);
2770 		return -EINVAL;
2771 	}
2772 
2773 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2774 
2775 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2776 }
2777 
2778 /**
2779  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2780  * @t1: the pending async txn in the frozen process
2781  * @t2: the new async txn to supersede the outdated pending one
2782  *
2783  * Return:  true if t2 can supersede t1
2784  *          false if t2 can not supersede t1
2785  */
2786 static bool binder_can_update_transaction(struct binder_transaction *t1,
2787 					  struct binder_transaction *t2)
2788 {
2789 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2790 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2791 		return false;
2792 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2793 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2794 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2795 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2796 		return true;
2797 	return false;
2798 }
2799 
2800 /**
2801  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2802  * @t:		 new async transaction
2803  * @target_list: list to find outdated transaction
2804  *
2805  * Return: the outdated transaction if found
2806  *         NULL if no outdated transacton can be found
2807  *
2808  * Requires the proc->inner_lock to be held.
2809  */
2810 static struct binder_transaction *
2811 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2812 					 struct list_head *target_list)
2813 {
2814 	struct binder_work *w;
2815 
2816 	list_for_each_entry(w, target_list, entry) {
2817 		struct binder_transaction *t_queued;
2818 
2819 		if (w->type != BINDER_WORK_TRANSACTION)
2820 			continue;
2821 		t_queued = container_of(w, struct binder_transaction, work);
2822 		if (binder_can_update_transaction(t_queued, t))
2823 			return t_queued;
2824 	}
2825 	return NULL;
2826 }
2827 
2828 /**
2829  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2830  * @t:		transaction to send
2831  * @proc:	process to send the transaction to
2832  * @thread:	thread in @proc to send the transaction to (may be NULL)
2833  *
2834  * This function queues a transaction to the specified process. It will try
2835  * to find a thread in the target process to handle the transaction and
2836  * wake it up. If no thread is found, the work is queued to the proc
2837  * waitqueue.
2838  *
2839  * If the @thread parameter is not NULL, the transaction is always queued
2840  * to the waitlist of that specific thread.
2841  *
2842  * Return:	0 if the transaction was successfully queued
2843  *		BR_DEAD_REPLY if the target process or thread is dead
2844  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2845  *			the sync transaction was rejected
2846  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2847  *		and the async transaction was successfully queued
2848  */
2849 static int binder_proc_transaction(struct binder_transaction *t,
2850 				    struct binder_proc *proc,
2851 				    struct binder_thread *thread)
2852 {
2853 	struct binder_node *node = t->buffer->target_node;
2854 	bool oneway = !!(t->flags & TF_ONE_WAY);
2855 	bool pending_async = false;
2856 	struct binder_transaction *t_outdated = NULL;
2857 	bool frozen = false;
2858 
2859 	BUG_ON(!node);
2860 	binder_node_lock(node);
2861 	if (oneway) {
2862 		BUG_ON(thread);
2863 		if (node->has_async_transaction)
2864 			pending_async = true;
2865 		else
2866 			node->has_async_transaction = true;
2867 	}
2868 
2869 	binder_inner_proc_lock(proc);
2870 	if (proc->is_frozen) {
2871 		frozen = true;
2872 		proc->sync_recv |= !oneway;
2873 		proc->async_recv |= oneway;
2874 	}
2875 
2876 	if ((frozen && !oneway) || proc->is_dead ||
2877 			(thread && thread->is_dead)) {
2878 		binder_inner_proc_unlock(proc);
2879 		binder_node_unlock(node);
2880 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2881 	}
2882 
2883 	if (!thread && !pending_async)
2884 		thread = binder_select_thread_ilocked(proc);
2885 
2886 	if (thread) {
2887 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2888 	} else if (!pending_async) {
2889 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2890 	} else {
2891 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2892 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2893 									      &node->async_todo);
2894 			if (t_outdated) {
2895 				binder_debug(BINDER_DEBUG_TRANSACTION,
2896 					     "txn %d supersedes %d\n",
2897 					     t->debug_id, t_outdated->debug_id);
2898 				list_del_init(&t_outdated->work.entry);
2899 				proc->outstanding_txns--;
2900 			}
2901 		}
2902 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2903 	}
2904 
2905 	if (!pending_async)
2906 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2907 
2908 	proc->outstanding_txns++;
2909 	binder_inner_proc_unlock(proc);
2910 	binder_node_unlock(node);
2911 
2912 	/*
2913 	 * To reduce potential contention, free the outdated transaction and
2914 	 * buffer after releasing the locks.
2915 	 */
2916 	if (t_outdated) {
2917 		struct binder_buffer *buffer = t_outdated->buffer;
2918 
2919 		t_outdated->buffer = NULL;
2920 		buffer->transaction = NULL;
2921 		trace_binder_transaction_update_buffer_release(buffer);
2922 		binder_release_entire_buffer(proc, NULL, buffer, false);
2923 		binder_alloc_free_buf(&proc->alloc, buffer);
2924 		kfree(t_outdated);
2925 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2926 	}
2927 
2928 	if (oneway && frozen)
2929 		return BR_TRANSACTION_PENDING_FROZEN;
2930 
2931 	return 0;
2932 }
2933 
2934 /**
2935  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2936  * @node:         struct binder_node for which to get refs
2937  * @procp:        returns @node->proc if valid
2938  * @error:        if no @procp then returns BR_DEAD_REPLY
2939  *
2940  * User-space normally keeps the node alive when creating a transaction
2941  * since it has a reference to the target. The local strong ref keeps it
2942  * alive if the sending process dies before the target process processes
2943  * the transaction. If the source process is malicious or has a reference
2944  * counting bug, relying on the local strong ref can fail.
2945  *
2946  * Since user-space can cause the local strong ref to go away, we also take
2947  * a tmpref on the node to ensure it survives while we are constructing
2948  * the transaction. We also need a tmpref on the proc while we are
2949  * constructing the transaction, so we take that here as well.
2950  *
2951  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2952  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2953  * target proc has died, @error is set to BR_DEAD_REPLY.
2954  */
2955 static struct binder_node *binder_get_node_refs_for_txn(
2956 		struct binder_node *node,
2957 		struct binder_proc **procp,
2958 		uint32_t *error)
2959 {
2960 	struct binder_node *target_node = NULL;
2961 
2962 	binder_node_inner_lock(node);
2963 	if (node->proc) {
2964 		target_node = node;
2965 		binder_inc_node_nilocked(node, 1, 0, NULL);
2966 		binder_inc_node_tmpref_ilocked(node);
2967 		node->proc->tmp_ref++;
2968 		*procp = node->proc;
2969 	} else
2970 		*error = BR_DEAD_REPLY;
2971 	binder_node_inner_unlock(node);
2972 
2973 	return target_node;
2974 }
2975 
2976 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2977 				      uint32_t command, int32_t param)
2978 {
2979 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2980 
2981 	if (!from) {
2982 		/* annotation for sparse */
2983 		__release(&from->proc->inner_lock);
2984 		return;
2985 	}
2986 
2987 	/* don't override existing errors */
2988 	if (from->ee.command == BR_OK)
2989 		binder_set_extended_error(&from->ee, id, command, param);
2990 	binder_inner_proc_unlock(from->proc);
2991 	binder_thread_dec_tmpref(from);
2992 }
2993 
2994 static void binder_transaction(struct binder_proc *proc,
2995 			       struct binder_thread *thread,
2996 			       struct binder_transaction_data *tr, int reply,
2997 			       binder_size_t extra_buffers_size)
2998 {
2999 	int ret;
3000 	struct binder_transaction *t;
3001 	struct binder_work *w;
3002 	struct binder_work *tcomplete;
3003 	binder_size_t buffer_offset = 0;
3004 	binder_size_t off_start_offset, off_end_offset;
3005 	binder_size_t off_min;
3006 	binder_size_t sg_buf_offset, sg_buf_end_offset;
3007 	binder_size_t user_offset = 0;
3008 	struct binder_proc *target_proc = NULL;
3009 	struct binder_thread *target_thread = NULL;
3010 	struct binder_node *target_node = NULL;
3011 	struct binder_transaction *in_reply_to = NULL;
3012 	struct binder_transaction_log_entry *e;
3013 	uint32_t return_error = 0;
3014 	uint32_t return_error_param = 0;
3015 	uint32_t return_error_line = 0;
3016 	binder_size_t last_fixup_obj_off = 0;
3017 	binder_size_t last_fixup_min_off = 0;
3018 	struct binder_context *context = proc->context;
3019 	int t_debug_id = atomic_inc_return(&binder_last_id);
3020 	ktime_t t_start_time = ktime_get();
3021 	struct lsm_context lsmctx = { };
3022 	struct list_head sgc_head;
3023 	struct list_head pf_head;
3024 	const void __user *user_buffer = (const void __user *)
3025 				(uintptr_t)tr->data.ptr.buffer;
3026 	INIT_LIST_HEAD(&sgc_head);
3027 	INIT_LIST_HEAD(&pf_head);
3028 
3029 	e = binder_transaction_log_add(&binder_transaction_log);
3030 	e->debug_id = t_debug_id;
3031 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3032 	e->from_proc = proc->pid;
3033 	e->from_thread = thread->pid;
3034 	e->target_handle = tr->target.handle;
3035 	e->data_size = tr->data_size;
3036 	e->offsets_size = tr->offsets_size;
3037 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3038 
3039 	binder_inner_proc_lock(proc);
3040 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3041 	binder_inner_proc_unlock(proc);
3042 
3043 	if (reply) {
3044 		binder_inner_proc_lock(proc);
3045 		in_reply_to = thread->transaction_stack;
3046 		if (in_reply_to == NULL) {
3047 			binder_inner_proc_unlock(proc);
3048 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3049 					  proc->pid, thread->pid);
3050 			return_error = BR_FAILED_REPLY;
3051 			return_error_param = -EPROTO;
3052 			return_error_line = __LINE__;
3053 			goto err_empty_call_stack;
3054 		}
3055 		if (in_reply_to->to_thread != thread) {
3056 			spin_lock(&in_reply_to->lock);
3057 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3058 				proc->pid, thread->pid, in_reply_to->debug_id,
3059 				in_reply_to->to_proc ?
3060 				in_reply_to->to_proc->pid : 0,
3061 				in_reply_to->to_thread ?
3062 				in_reply_to->to_thread->pid : 0);
3063 			spin_unlock(&in_reply_to->lock);
3064 			binder_inner_proc_unlock(proc);
3065 			return_error = BR_FAILED_REPLY;
3066 			return_error_param = -EPROTO;
3067 			return_error_line = __LINE__;
3068 			in_reply_to = NULL;
3069 			goto err_bad_call_stack;
3070 		}
3071 		thread->transaction_stack = in_reply_to->to_parent;
3072 		binder_inner_proc_unlock(proc);
3073 		binder_set_nice(in_reply_to->saved_priority);
3074 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3075 		if (target_thread == NULL) {
3076 			/* annotation for sparse */
3077 			__release(&target_thread->proc->inner_lock);
3078 			binder_txn_error("%d:%d reply target not found\n",
3079 				thread->pid, proc->pid);
3080 			return_error = BR_DEAD_REPLY;
3081 			return_error_line = __LINE__;
3082 			goto err_dead_binder;
3083 		}
3084 		if (target_thread->transaction_stack != in_reply_to) {
3085 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3086 				proc->pid, thread->pid,
3087 				target_thread->transaction_stack ?
3088 				target_thread->transaction_stack->debug_id : 0,
3089 				in_reply_to->debug_id);
3090 			binder_inner_proc_unlock(target_thread->proc);
3091 			return_error = BR_FAILED_REPLY;
3092 			return_error_param = -EPROTO;
3093 			return_error_line = __LINE__;
3094 			in_reply_to = NULL;
3095 			target_thread = NULL;
3096 			goto err_dead_binder;
3097 		}
3098 		target_proc = target_thread->proc;
3099 		target_proc->tmp_ref++;
3100 		binder_inner_proc_unlock(target_thread->proc);
3101 	} else {
3102 		if (tr->target.handle) {
3103 			struct binder_ref *ref;
3104 
3105 			/*
3106 			 * There must already be a strong ref
3107 			 * on this node. If so, do a strong
3108 			 * increment on the node to ensure it
3109 			 * stays alive until the transaction is
3110 			 * done.
3111 			 */
3112 			binder_proc_lock(proc);
3113 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3114 						     true);
3115 			if (ref) {
3116 				target_node = binder_get_node_refs_for_txn(
3117 						ref->node, &target_proc,
3118 						&return_error);
3119 			} else {
3120 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3121 						  proc->pid, thread->pid, tr->target.handle);
3122 				return_error = BR_FAILED_REPLY;
3123 			}
3124 			binder_proc_unlock(proc);
3125 		} else {
3126 			mutex_lock(&context->context_mgr_node_lock);
3127 			target_node = context->binder_context_mgr_node;
3128 			if (target_node)
3129 				target_node = binder_get_node_refs_for_txn(
3130 						target_node, &target_proc,
3131 						&return_error);
3132 			else
3133 				return_error = BR_DEAD_REPLY;
3134 			mutex_unlock(&context->context_mgr_node_lock);
3135 			if (target_node && target_proc->pid == proc->pid) {
3136 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3137 						  proc->pid, thread->pid);
3138 				return_error = BR_FAILED_REPLY;
3139 				return_error_param = -EINVAL;
3140 				return_error_line = __LINE__;
3141 				goto err_invalid_target_handle;
3142 			}
3143 		}
3144 		if (!target_node) {
3145 			binder_txn_error("%d:%d cannot find target node\n",
3146 					 proc->pid, thread->pid);
3147 			/* return_error is set above */
3148 			return_error_param = -EINVAL;
3149 			return_error_line = __LINE__;
3150 			goto err_dead_binder;
3151 		}
3152 		e->to_node = target_node->debug_id;
3153 		if (WARN_ON(proc == target_proc)) {
3154 			binder_txn_error("%d:%d self transactions not allowed\n",
3155 				thread->pid, proc->pid);
3156 			return_error = BR_FAILED_REPLY;
3157 			return_error_param = -EINVAL;
3158 			return_error_line = __LINE__;
3159 			goto err_invalid_target_handle;
3160 		}
3161 		if (security_binder_transaction(proc->cred,
3162 						target_proc->cred) < 0) {
3163 			binder_txn_error("%d:%d transaction credentials failed\n",
3164 				thread->pid, proc->pid);
3165 			return_error = BR_FAILED_REPLY;
3166 			return_error_param = -EPERM;
3167 			return_error_line = __LINE__;
3168 			goto err_invalid_target_handle;
3169 		}
3170 		binder_inner_proc_lock(proc);
3171 
3172 		w = list_first_entry_or_null(&thread->todo,
3173 					     struct binder_work, entry);
3174 		if (!(tr->flags & TF_ONE_WAY) && w &&
3175 		    w->type == BINDER_WORK_TRANSACTION) {
3176 			/*
3177 			 * Do not allow new outgoing transaction from a
3178 			 * thread that has a transaction at the head of
3179 			 * its todo list. Only need to check the head
3180 			 * because binder_select_thread_ilocked picks a
3181 			 * thread from proc->waiting_threads to enqueue
3182 			 * the transaction, and nothing is queued to the
3183 			 * todo list while the thread is on waiting_threads.
3184 			 */
3185 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3186 					  proc->pid, thread->pid);
3187 			binder_inner_proc_unlock(proc);
3188 			return_error = BR_FAILED_REPLY;
3189 			return_error_param = -EPROTO;
3190 			return_error_line = __LINE__;
3191 			goto err_bad_todo_list;
3192 		}
3193 
3194 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3195 			struct binder_transaction *tmp;
3196 
3197 			tmp = thread->transaction_stack;
3198 			if (tmp->to_thread != thread) {
3199 				spin_lock(&tmp->lock);
3200 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3201 					proc->pid, thread->pid, tmp->debug_id,
3202 					tmp->to_proc ? tmp->to_proc->pid : 0,
3203 					tmp->to_thread ?
3204 					tmp->to_thread->pid : 0);
3205 				spin_unlock(&tmp->lock);
3206 				binder_inner_proc_unlock(proc);
3207 				return_error = BR_FAILED_REPLY;
3208 				return_error_param = -EPROTO;
3209 				return_error_line = __LINE__;
3210 				goto err_bad_call_stack;
3211 			}
3212 			while (tmp) {
3213 				struct binder_thread *from;
3214 
3215 				spin_lock(&tmp->lock);
3216 				from = tmp->from;
3217 				if (from && from->proc == target_proc) {
3218 					atomic_inc(&from->tmp_ref);
3219 					target_thread = from;
3220 					spin_unlock(&tmp->lock);
3221 					break;
3222 				}
3223 				spin_unlock(&tmp->lock);
3224 				tmp = tmp->from_parent;
3225 			}
3226 		}
3227 		binder_inner_proc_unlock(proc);
3228 	}
3229 	if (target_thread)
3230 		e->to_thread = target_thread->pid;
3231 	e->to_proc = target_proc->pid;
3232 
3233 	/* TODO: reuse incoming transaction for reply */
3234 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3235 	if (t == NULL) {
3236 		binder_txn_error("%d:%d cannot allocate transaction\n",
3237 			thread->pid, proc->pid);
3238 		return_error = BR_FAILED_REPLY;
3239 		return_error_param = -ENOMEM;
3240 		return_error_line = __LINE__;
3241 		goto err_alloc_t_failed;
3242 	}
3243 	INIT_LIST_HEAD(&t->fd_fixups);
3244 	binder_stats_created(BINDER_STAT_TRANSACTION);
3245 	spin_lock_init(&t->lock);
3246 
3247 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3248 	if (tcomplete == NULL) {
3249 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3250 			thread->pid, proc->pid);
3251 		return_error = BR_FAILED_REPLY;
3252 		return_error_param = -ENOMEM;
3253 		return_error_line = __LINE__;
3254 		goto err_alloc_tcomplete_failed;
3255 	}
3256 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3257 
3258 	t->debug_id = t_debug_id;
3259 	t->start_time = t_start_time;
3260 
3261 	if (reply)
3262 		binder_debug(BINDER_DEBUG_TRANSACTION,
3263 			     "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3264 			     proc->pid, thread->pid, t->debug_id,
3265 			     target_proc->pid, target_thread->pid,
3266 			     (u64)tr->data_size, (u64)tr->offsets_size,
3267 			     (u64)extra_buffers_size);
3268 	else
3269 		binder_debug(BINDER_DEBUG_TRANSACTION,
3270 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3271 			     proc->pid, thread->pid, t->debug_id,
3272 			     target_proc->pid, target_node->debug_id,
3273 			     (u64)tr->data_size, (u64)tr->offsets_size,
3274 			     (u64)extra_buffers_size);
3275 
3276 	if (!reply && !(tr->flags & TF_ONE_WAY))
3277 		t->from = thread;
3278 	else
3279 		t->from = NULL;
3280 	t->from_pid = proc->pid;
3281 	t->from_tid = thread->pid;
3282 	t->sender_euid = task_euid(proc->tsk);
3283 	t->to_proc = target_proc;
3284 	t->to_thread = target_thread;
3285 	t->code = tr->code;
3286 	t->flags = tr->flags;
3287 	t->priority = task_nice(current);
3288 
3289 	if (target_node && target_node->txn_security_ctx) {
3290 		u32 secid;
3291 		size_t added_size;
3292 
3293 		security_cred_getsecid(proc->cred, &secid);
3294 		ret = security_secid_to_secctx(secid, &lsmctx);
3295 		if (ret < 0) {
3296 			binder_txn_error("%d:%d failed to get security context\n",
3297 				thread->pid, proc->pid);
3298 			return_error = BR_FAILED_REPLY;
3299 			return_error_param = ret;
3300 			return_error_line = __LINE__;
3301 			goto err_get_secctx_failed;
3302 		}
3303 		added_size = ALIGN(lsmctx.len, sizeof(u64));
3304 		extra_buffers_size += added_size;
3305 		if (extra_buffers_size < added_size) {
3306 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3307 				thread->pid, proc->pid);
3308 			return_error = BR_FAILED_REPLY;
3309 			return_error_param = -EINVAL;
3310 			return_error_line = __LINE__;
3311 			goto err_bad_extra_size;
3312 		}
3313 	}
3314 
3315 	trace_binder_transaction(reply, t, target_node);
3316 
3317 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3318 		tr->offsets_size, extra_buffers_size,
3319 		!reply && (t->flags & TF_ONE_WAY));
3320 	if (IS_ERR(t->buffer)) {
3321 		char *s;
3322 
3323 		ret = PTR_ERR(t->buffer);
3324 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3325 			: (ret == -ENOSPC) ? ": no space left"
3326 			: (ret == -ENOMEM) ? ": memory allocation failed"
3327 			: "";
3328 		binder_txn_error("cannot allocate buffer%s", s);
3329 
3330 		return_error_param = PTR_ERR(t->buffer);
3331 		return_error = return_error_param == -ESRCH ?
3332 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3333 		return_error_line = __LINE__;
3334 		t->buffer = NULL;
3335 		goto err_binder_alloc_buf_failed;
3336 	}
3337 	if (lsmctx.context) {
3338 		int err;
3339 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3340 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3341 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3342 				    ALIGN(lsmctx.len, sizeof(u64));
3343 
3344 		t->security_ctx = t->buffer->user_data + buf_offset;
3345 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3346 						  t->buffer, buf_offset,
3347 						  lsmctx.context, lsmctx.len);
3348 		if (err) {
3349 			t->security_ctx = 0;
3350 			WARN_ON(1);
3351 		}
3352 		security_release_secctx(&lsmctx);
3353 		lsmctx.context = NULL;
3354 	}
3355 	t->buffer->debug_id = t->debug_id;
3356 	t->buffer->transaction = t;
3357 	t->buffer->target_node = target_node;
3358 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3359 	trace_binder_transaction_alloc_buf(t->buffer);
3360 
3361 	if (binder_alloc_copy_user_to_buffer(
3362 				&target_proc->alloc,
3363 				t->buffer,
3364 				ALIGN(tr->data_size, sizeof(void *)),
3365 				(const void __user *)
3366 					(uintptr_t)tr->data.ptr.offsets,
3367 				tr->offsets_size)) {
3368 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3369 				proc->pid, thread->pid);
3370 		return_error = BR_FAILED_REPLY;
3371 		return_error_param = -EFAULT;
3372 		return_error_line = __LINE__;
3373 		goto err_copy_data_failed;
3374 	}
3375 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3376 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3377 				proc->pid, thread->pid, (u64)tr->offsets_size);
3378 		return_error = BR_FAILED_REPLY;
3379 		return_error_param = -EINVAL;
3380 		return_error_line = __LINE__;
3381 		goto err_bad_offset;
3382 	}
3383 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3384 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3385 				  proc->pid, thread->pid,
3386 				  (u64)extra_buffers_size);
3387 		return_error = BR_FAILED_REPLY;
3388 		return_error_param = -EINVAL;
3389 		return_error_line = __LINE__;
3390 		goto err_bad_offset;
3391 	}
3392 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3393 	buffer_offset = off_start_offset;
3394 	off_end_offset = off_start_offset + tr->offsets_size;
3395 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3396 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3397 		ALIGN(lsmctx.len, sizeof(u64));
3398 	off_min = 0;
3399 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3400 	     buffer_offset += sizeof(binder_size_t)) {
3401 		struct binder_object_header *hdr;
3402 		size_t object_size;
3403 		struct binder_object object;
3404 		binder_size_t object_offset;
3405 		binder_size_t copy_size;
3406 
3407 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3408 						  &object_offset,
3409 						  t->buffer,
3410 						  buffer_offset,
3411 						  sizeof(object_offset))) {
3412 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3413 				thread->pid, proc->pid);
3414 			return_error = BR_FAILED_REPLY;
3415 			return_error_param = -EINVAL;
3416 			return_error_line = __LINE__;
3417 			goto err_bad_offset;
3418 		}
3419 
3420 		/*
3421 		 * Copy the source user buffer up to the next object
3422 		 * that will be processed.
3423 		 */
3424 		copy_size = object_offset - user_offset;
3425 		if (copy_size && (user_offset > object_offset ||
3426 				object_offset > tr->data_size ||
3427 				binder_alloc_copy_user_to_buffer(
3428 					&target_proc->alloc,
3429 					t->buffer, user_offset,
3430 					user_buffer + user_offset,
3431 					copy_size))) {
3432 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3433 					proc->pid, thread->pid);
3434 			return_error = BR_FAILED_REPLY;
3435 			return_error_param = -EFAULT;
3436 			return_error_line = __LINE__;
3437 			goto err_copy_data_failed;
3438 		}
3439 		object_size = binder_get_object(target_proc, user_buffer,
3440 				t->buffer, object_offset, &object);
3441 		if (object_size == 0 || object_offset < off_min) {
3442 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3443 					  proc->pid, thread->pid,
3444 					  (u64)object_offset,
3445 					  (u64)off_min,
3446 					  (u64)t->buffer->data_size);
3447 			return_error = BR_FAILED_REPLY;
3448 			return_error_param = -EINVAL;
3449 			return_error_line = __LINE__;
3450 			goto err_bad_offset;
3451 		}
3452 		/*
3453 		 * Set offset to the next buffer fragment to be
3454 		 * copied
3455 		 */
3456 		user_offset = object_offset + object_size;
3457 
3458 		hdr = &object.hdr;
3459 		off_min = object_offset + object_size;
3460 		switch (hdr->type) {
3461 		case BINDER_TYPE_BINDER:
3462 		case BINDER_TYPE_WEAK_BINDER: {
3463 			struct flat_binder_object *fp;
3464 
3465 			fp = to_flat_binder_object(hdr);
3466 			ret = binder_translate_binder(fp, t, thread);
3467 
3468 			if (ret < 0 ||
3469 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3470 							t->buffer,
3471 							object_offset,
3472 							fp, sizeof(*fp))) {
3473 				binder_txn_error("%d:%d translate binder failed\n",
3474 					thread->pid, proc->pid);
3475 				return_error = BR_FAILED_REPLY;
3476 				return_error_param = ret;
3477 				return_error_line = __LINE__;
3478 				goto err_translate_failed;
3479 			}
3480 		} break;
3481 		case BINDER_TYPE_HANDLE:
3482 		case BINDER_TYPE_WEAK_HANDLE: {
3483 			struct flat_binder_object *fp;
3484 
3485 			fp = to_flat_binder_object(hdr);
3486 			ret = binder_translate_handle(fp, t, thread);
3487 			if (ret < 0 ||
3488 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3489 							t->buffer,
3490 							object_offset,
3491 							fp, sizeof(*fp))) {
3492 				binder_txn_error("%d:%d translate handle failed\n",
3493 					thread->pid, proc->pid);
3494 				return_error = BR_FAILED_REPLY;
3495 				return_error_param = ret;
3496 				return_error_line = __LINE__;
3497 				goto err_translate_failed;
3498 			}
3499 		} break;
3500 
3501 		case BINDER_TYPE_FD: {
3502 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3503 			binder_size_t fd_offset = object_offset +
3504 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3505 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3506 						      thread, in_reply_to);
3507 
3508 			fp->pad_binder = 0;
3509 			if (ret < 0 ||
3510 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3511 							t->buffer,
3512 							object_offset,
3513 							fp, sizeof(*fp))) {
3514 				binder_txn_error("%d:%d translate fd failed\n",
3515 					thread->pid, proc->pid);
3516 				return_error = BR_FAILED_REPLY;
3517 				return_error_param = ret;
3518 				return_error_line = __LINE__;
3519 				goto err_translate_failed;
3520 			}
3521 		} break;
3522 		case BINDER_TYPE_FDA: {
3523 			struct binder_object ptr_object;
3524 			binder_size_t parent_offset;
3525 			struct binder_object user_object;
3526 			size_t user_parent_size;
3527 			struct binder_fd_array_object *fda =
3528 				to_binder_fd_array_object(hdr);
3529 			size_t num_valid = (buffer_offset - off_start_offset) /
3530 						sizeof(binder_size_t);
3531 			struct binder_buffer_object *parent =
3532 				binder_validate_ptr(target_proc, t->buffer,
3533 						    &ptr_object, fda->parent,
3534 						    off_start_offset,
3535 						    &parent_offset,
3536 						    num_valid);
3537 			if (!parent) {
3538 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3539 						  proc->pid, thread->pid);
3540 				return_error = BR_FAILED_REPLY;
3541 				return_error_param = -EINVAL;
3542 				return_error_line = __LINE__;
3543 				goto err_bad_parent;
3544 			}
3545 			if (!binder_validate_fixup(target_proc, t->buffer,
3546 						   off_start_offset,
3547 						   parent_offset,
3548 						   fda->parent_offset,
3549 						   last_fixup_obj_off,
3550 						   last_fixup_min_off)) {
3551 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3552 						  proc->pid, thread->pid);
3553 				return_error = BR_FAILED_REPLY;
3554 				return_error_param = -EINVAL;
3555 				return_error_line = __LINE__;
3556 				goto err_bad_parent;
3557 			}
3558 			/*
3559 			 * We need to read the user version of the parent
3560 			 * object to get the original user offset
3561 			 */
3562 			user_parent_size =
3563 				binder_get_object(proc, user_buffer, t->buffer,
3564 						  parent_offset, &user_object);
3565 			if (user_parent_size != sizeof(user_object.bbo)) {
3566 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3567 						  proc->pid, thread->pid,
3568 						  user_parent_size,
3569 						  sizeof(user_object.bbo));
3570 				return_error = BR_FAILED_REPLY;
3571 				return_error_param = -EINVAL;
3572 				return_error_line = __LINE__;
3573 				goto err_bad_parent;
3574 			}
3575 			ret = binder_translate_fd_array(&pf_head, fda,
3576 							user_buffer, parent,
3577 							&user_object.bbo, t,
3578 							thread, in_reply_to);
3579 			if (!ret)
3580 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3581 								  t->buffer,
3582 								  object_offset,
3583 								  fda, sizeof(*fda));
3584 			if (ret) {
3585 				binder_txn_error("%d:%d translate fd array failed\n",
3586 					thread->pid, proc->pid);
3587 				return_error = BR_FAILED_REPLY;
3588 				return_error_param = ret > 0 ? -EINVAL : ret;
3589 				return_error_line = __LINE__;
3590 				goto err_translate_failed;
3591 			}
3592 			last_fixup_obj_off = parent_offset;
3593 			last_fixup_min_off =
3594 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3595 		} break;
3596 		case BINDER_TYPE_PTR: {
3597 			struct binder_buffer_object *bp =
3598 				to_binder_buffer_object(hdr);
3599 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3600 			size_t num_valid;
3601 
3602 			if (bp->length > buf_left) {
3603 				binder_user_error("%d:%d got transaction with too large buffer\n",
3604 						  proc->pid, thread->pid);
3605 				return_error = BR_FAILED_REPLY;
3606 				return_error_param = -EINVAL;
3607 				return_error_line = __LINE__;
3608 				goto err_bad_offset;
3609 			}
3610 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3611 				(const void __user *)(uintptr_t)bp->buffer,
3612 				bp->length);
3613 			if (ret) {
3614 				binder_txn_error("%d:%d deferred copy failed\n",
3615 					thread->pid, proc->pid);
3616 				return_error = BR_FAILED_REPLY;
3617 				return_error_param = ret;
3618 				return_error_line = __LINE__;
3619 				goto err_translate_failed;
3620 			}
3621 			/* Fixup buffer pointer to target proc address space */
3622 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3623 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3624 
3625 			num_valid = (buffer_offset - off_start_offset) /
3626 					sizeof(binder_size_t);
3627 			ret = binder_fixup_parent(&pf_head, t,
3628 						  thread, bp,
3629 						  off_start_offset,
3630 						  num_valid,
3631 						  last_fixup_obj_off,
3632 						  last_fixup_min_off);
3633 			if (ret < 0 ||
3634 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3635 							t->buffer,
3636 							object_offset,
3637 							bp, sizeof(*bp))) {
3638 				binder_txn_error("%d:%d failed to fixup parent\n",
3639 					thread->pid, proc->pid);
3640 				return_error = BR_FAILED_REPLY;
3641 				return_error_param = ret;
3642 				return_error_line = __LINE__;
3643 				goto err_translate_failed;
3644 			}
3645 			last_fixup_obj_off = object_offset;
3646 			last_fixup_min_off = 0;
3647 		} break;
3648 		default:
3649 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3650 				proc->pid, thread->pid, hdr->type);
3651 			return_error = BR_FAILED_REPLY;
3652 			return_error_param = -EINVAL;
3653 			return_error_line = __LINE__;
3654 			goto err_bad_object_type;
3655 		}
3656 	}
3657 	/* Done processing objects, copy the rest of the buffer */
3658 	if (binder_alloc_copy_user_to_buffer(
3659 				&target_proc->alloc,
3660 				t->buffer, user_offset,
3661 				user_buffer + user_offset,
3662 				tr->data_size - user_offset)) {
3663 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3664 				proc->pid, thread->pid);
3665 		return_error = BR_FAILED_REPLY;
3666 		return_error_param = -EFAULT;
3667 		return_error_line = __LINE__;
3668 		goto err_copy_data_failed;
3669 	}
3670 
3671 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3672 					    &sgc_head, &pf_head);
3673 	if (ret) {
3674 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3675 				  proc->pid, thread->pid);
3676 		return_error = BR_FAILED_REPLY;
3677 		return_error_param = ret;
3678 		return_error_line = __LINE__;
3679 		goto err_copy_data_failed;
3680 	}
3681 	if (t->buffer->oneway_spam_suspect)
3682 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3683 	else
3684 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3685 	t->work.type = BINDER_WORK_TRANSACTION;
3686 
3687 	if (reply) {
3688 		binder_enqueue_thread_work(thread, tcomplete);
3689 		binder_inner_proc_lock(target_proc);
3690 		if (target_thread->is_dead) {
3691 			return_error = BR_DEAD_REPLY;
3692 			binder_inner_proc_unlock(target_proc);
3693 			goto err_dead_proc_or_thread;
3694 		}
3695 		BUG_ON(t->buffer->async_transaction != 0);
3696 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3697 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3698 		target_proc->outstanding_txns++;
3699 		binder_inner_proc_unlock(target_proc);
3700 		wake_up_interruptible_sync(&target_thread->wait);
3701 		binder_free_transaction(in_reply_to);
3702 	} else if (!(t->flags & TF_ONE_WAY)) {
3703 		BUG_ON(t->buffer->async_transaction != 0);
3704 		binder_inner_proc_lock(proc);
3705 		/*
3706 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3707 		 * userspace immediately; this allows the target process to
3708 		 * immediately start processing this transaction, reducing
3709 		 * latency. We will then return the TRANSACTION_COMPLETE when
3710 		 * the target replies (or there is an error).
3711 		 */
3712 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3713 		t->need_reply = 1;
3714 		t->from_parent = thread->transaction_stack;
3715 		thread->transaction_stack = t;
3716 		binder_inner_proc_unlock(proc);
3717 		return_error = binder_proc_transaction(t,
3718 				target_proc, target_thread);
3719 		if (return_error) {
3720 			binder_inner_proc_lock(proc);
3721 			binder_pop_transaction_ilocked(thread, t);
3722 			binder_inner_proc_unlock(proc);
3723 			goto err_dead_proc_or_thread;
3724 		}
3725 	} else {
3726 		BUG_ON(target_node == NULL);
3727 		BUG_ON(t->buffer->async_transaction != 1);
3728 		return_error = binder_proc_transaction(t, target_proc, NULL);
3729 		/*
3730 		 * Let the caller know when async transaction reaches a frozen
3731 		 * process and is put in a pending queue, waiting for the target
3732 		 * process to be unfrozen.
3733 		 */
3734 		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3735 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3736 		binder_enqueue_thread_work(thread, tcomplete);
3737 		if (return_error &&
3738 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3739 			goto err_dead_proc_or_thread;
3740 	}
3741 	if (target_thread)
3742 		binder_thread_dec_tmpref(target_thread);
3743 	binder_proc_dec_tmpref(target_proc);
3744 	if (target_node)
3745 		binder_dec_node_tmpref(target_node);
3746 	/*
3747 	 * write barrier to synchronize with initialization
3748 	 * of log entry
3749 	 */
3750 	smp_wmb();
3751 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3752 	return;
3753 
3754 err_dead_proc_or_thread:
3755 	binder_txn_error("%d:%d dead process or thread\n",
3756 		thread->pid, proc->pid);
3757 	return_error_line = __LINE__;
3758 	binder_dequeue_work(proc, tcomplete);
3759 err_translate_failed:
3760 err_bad_object_type:
3761 err_bad_offset:
3762 err_bad_parent:
3763 err_copy_data_failed:
3764 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3765 	binder_free_txn_fixups(t);
3766 	trace_binder_transaction_failed_buffer_release(t->buffer);
3767 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3768 					  buffer_offset, true);
3769 	if (target_node)
3770 		binder_dec_node_tmpref(target_node);
3771 	target_node = NULL;
3772 	t->buffer->transaction = NULL;
3773 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3774 err_binder_alloc_buf_failed:
3775 err_bad_extra_size:
3776 	if (lsmctx.context)
3777 		security_release_secctx(&lsmctx);
3778 err_get_secctx_failed:
3779 	kfree(tcomplete);
3780 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3781 err_alloc_tcomplete_failed:
3782 	if (trace_binder_txn_latency_free_enabled())
3783 		binder_txn_latency_free(t);
3784 	kfree(t);
3785 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3786 err_alloc_t_failed:
3787 err_bad_todo_list:
3788 err_bad_call_stack:
3789 err_empty_call_stack:
3790 err_dead_binder:
3791 err_invalid_target_handle:
3792 	if (target_node) {
3793 		binder_dec_node(target_node, 1, 0);
3794 		binder_dec_node_tmpref(target_node);
3795 	}
3796 
3797 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3798 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3799 		     proc->pid, thread->pid, reply ? "reply" :
3800 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3801 		     target_proc ? target_proc->pid : 0,
3802 		     target_thread ? target_thread->pid : 0,
3803 		     t_debug_id, return_error, return_error_param,
3804 		     tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3805 		     return_error_line);
3806 
3807 	if (target_thread)
3808 		binder_thread_dec_tmpref(target_thread);
3809 	if (target_proc)
3810 		binder_proc_dec_tmpref(target_proc);
3811 
3812 	{
3813 		struct binder_transaction_log_entry *fe;
3814 
3815 		e->return_error = return_error;
3816 		e->return_error_param = return_error_param;
3817 		e->return_error_line = return_error_line;
3818 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3819 		*fe = *e;
3820 		/*
3821 		 * write barrier to synchronize with initialization
3822 		 * of log entry
3823 		 */
3824 		smp_wmb();
3825 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3826 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3827 	}
3828 
3829 	BUG_ON(thread->return_error.cmd != BR_OK);
3830 	if (in_reply_to) {
3831 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3832 				return_error, return_error_param);
3833 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3834 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3835 		binder_send_failed_reply(in_reply_to, return_error);
3836 	} else {
3837 		binder_inner_proc_lock(proc);
3838 		binder_set_extended_error(&thread->ee, t_debug_id,
3839 				return_error, return_error_param);
3840 		binder_inner_proc_unlock(proc);
3841 		thread->return_error.cmd = return_error;
3842 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3843 	}
3844 }
3845 
3846 static int
3847 binder_request_freeze_notification(struct binder_proc *proc,
3848 				   struct binder_thread *thread,
3849 				   struct binder_handle_cookie *handle_cookie)
3850 {
3851 	struct binder_ref_freeze *freeze;
3852 	struct binder_ref *ref;
3853 
3854 	freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3855 	if (!freeze)
3856 		return -ENOMEM;
3857 	binder_proc_lock(proc);
3858 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3859 	if (!ref) {
3860 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3861 				  proc->pid, thread->pid, handle_cookie->handle);
3862 		binder_proc_unlock(proc);
3863 		kfree(freeze);
3864 		return -EINVAL;
3865 	}
3866 
3867 	binder_node_lock(ref->node);
3868 	if (ref->freeze) {
3869 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3870 				  proc->pid, thread->pid);
3871 		binder_node_unlock(ref->node);
3872 		binder_proc_unlock(proc);
3873 		kfree(freeze);
3874 		return -EINVAL;
3875 	}
3876 
3877 	binder_stats_created(BINDER_STAT_FREEZE);
3878 	INIT_LIST_HEAD(&freeze->work.entry);
3879 	freeze->cookie = handle_cookie->cookie;
3880 	freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3881 	ref->freeze = freeze;
3882 
3883 	if (ref->node->proc) {
3884 		binder_inner_proc_lock(ref->node->proc);
3885 		freeze->is_frozen = ref->node->proc->is_frozen;
3886 		binder_inner_proc_unlock(ref->node->proc);
3887 
3888 		binder_inner_proc_lock(proc);
3889 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3890 		binder_wakeup_proc_ilocked(proc);
3891 		binder_inner_proc_unlock(proc);
3892 	}
3893 
3894 	binder_node_unlock(ref->node);
3895 	binder_proc_unlock(proc);
3896 	return 0;
3897 }
3898 
3899 static int
3900 binder_clear_freeze_notification(struct binder_proc *proc,
3901 				 struct binder_thread *thread,
3902 				 struct binder_handle_cookie *handle_cookie)
3903 {
3904 	struct binder_ref_freeze *freeze;
3905 	struct binder_ref *ref;
3906 
3907 	binder_proc_lock(proc);
3908 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3909 	if (!ref) {
3910 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3911 				  proc->pid, thread->pid, handle_cookie->handle);
3912 		binder_proc_unlock(proc);
3913 		return -EINVAL;
3914 	}
3915 
3916 	binder_node_lock(ref->node);
3917 
3918 	if (!ref->freeze) {
3919 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3920 				  proc->pid, thread->pid);
3921 		binder_node_unlock(ref->node);
3922 		binder_proc_unlock(proc);
3923 		return -EINVAL;
3924 	}
3925 	freeze = ref->freeze;
3926 	binder_inner_proc_lock(proc);
3927 	if (freeze->cookie != handle_cookie->cookie) {
3928 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3929 				  proc->pid, thread->pid, (u64)freeze->cookie,
3930 				  (u64)handle_cookie->cookie);
3931 		binder_inner_proc_unlock(proc);
3932 		binder_node_unlock(ref->node);
3933 		binder_proc_unlock(proc);
3934 		return -EINVAL;
3935 	}
3936 	ref->freeze = NULL;
3937 	/*
3938 	 * Take the existing freeze object and overwrite its work type. There are three cases here:
3939 	 * 1. No pending notification. In this case just add the work to the queue.
3940 	 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3941 	 *    should resend with the new work type.
3942 	 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3943 	 *    needs to be done here.
3944 	 */
3945 	freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
3946 	if (list_empty(&freeze->work.entry)) {
3947 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3948 		binder_wakeup_proc_ilocked(proc);
3949 	} else if (freeze->sent) {
3950 		freeze->resend = true;
3951 	}
3952 	binder_inner_proc_unlock(proc);
3953 	binder_node_unlock(ref->node);
3954 	binder_proc_unlock(proc);
3955 	return 0;
3956 }
3957 
3958 static int
3959 binder_freeze_notification_done(struct binder_proc *proc,
3960 				struct binder_thread *thread,
3961 				binder_uintptr_t cookie)
3962 {
3963 	struct binder_ref_freeze *freeze = NULL;
3964 	struct binder_work *w;
3965 
3966 	binder_inner_proc_lock(proc);
3967 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
3968 		struct binder_ref_freeze *tmp_freeze =
3969 			container_of(w, struct binder_ref_freeze, work);
3970 
3971 		if (tmp_freeze->cookie == cookie) {
3972 			freeze = tmp_freeze;
3973 			break;
3974 		}
3975 	}
3976 	if (!freeze) {
3977 		binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3978 				  proc->pid, thread->pid, (u64)cookie);
3979 		binder_inner_proc_unlock(proc);
3980 		return -EINVAL;
3981 	}
3982 	binder_dequeue_work_ilocked(&freeze->work);
3983 	freeze->sent = false;
3984 	if (freeze->resend) {
3985 		freeze->resend = false;
3986 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3987 		binder_wakeup_proc_ilocked(proc);
3988 	}
3989 	binder_inner_proc_unlock(proc);
3990 	return 0;
3991 }
3992 
3993 /**
3994  * binder_free_buf() - free the specified buffer
3995  * @proc:	binder proc that owns buffer
3996  * @buffer:	buffer to be freed
3997  * @is_failure:	failed to send transaction
3998  *
3999  * If buffer for an async transaction, enqueue the next async
4000  * transaction from the node.
4001  *
4002  * Cleanup buffer and free it.
4003  */
4004 static void
4005 binder_free_buf(struct binder_proc *proc,
4006 		struct binder_thread *thread,
4007 		struct binder_buffer *buffer, bool is_failure)
4008 {
4009 	binder_inner_proc_lock(proc);
4010 	if (buffer->transaction) {
4011 		buffer->transaction->buffer = NULL;
4012 		buffer->transaction = NULL;
4013 	}
4014 	binder_inner_proc_unlock(proc);
4015 	if (buffer->async_transaction && buffer->target_node) {
4016 		struct binder_node *buf_node;
4017 		struct binder_work *w;
4018 
4019 		buf_node = buffer->target_node;
4020 		binder_node_inner_lock(buf_node);
4021 		BUG_ON(!buf_node->has_async_transaction);
4022 		BUG_ON(buf_node->proc != proc);
4023 		w = binder_dequeue_work_head_ilocked(
4024 				&buf_node->async_todo);
4025 		if (!w) {
4026 			buf_node->has_async_transaction = false;
4027 		} else {
4028 			binder_enqueue_work_ilocked(
4029 					w, &proc->todo);
4030 			binder_wakeup_proc_ilocked(proc);
4031 		}
4032 		binder_node_inner_unlock(buf_node);
4033 	}
4034 	trace_binder_transaction_buffer_release(buffer);
4035 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
4036 	binder_alloc_free_buf(&proc->alloc, buffer);
4037 }
4038 
4039 static int binder_thread_write(struct binder_proc *proc,
4040 			struct binder_thread *thread,
4041 			binder_uintptr_t binder_buffer, size_t size,
4042 			binder_size_t *consumed)
4043 {
4044 	uint32_t cmd;
4045 	struct binder_context *context = proc->context;
4046 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4047 	void __user *ptr = buffer + *consumed;
4048 	void __user *end = buffer + size;
4049 
4050 	while (ptr < end && thread->return_error.cmd == BR_OK) {
4051 		int ret;
4052 
4053 		if (get_user(cmd, (uint32_t __user *)ptr))
4054 			return -EFAULT;
4055 		ptr += sizeof(uint32_t);
4056 		trace_binder_command(cmd);
4057 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4058 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4059 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4060 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4061 		}
4062 		switch (cmd) {
4063 		case BC_INCREFS:
4064 		case BC_ACQUIRE:
4065 		case BC_RELEASE:
4066 		case BC_DECREFS: {
4067 			uint32_t target;
4068 			const char *debug_string;
4069 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4070 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4071 			struct binder_ref_data rdata;
4072 
4073 			if (get_user(target, (uint32_t __user *)ptr))
4074 				return -EFAULT;
4075 
4076 			ptr += sizeof(uint32_t);
4077 			ret = -1;
4078 			if (increment && !target) {
4079 				struct binder_node *ctx_mgr_node;
4080 
4081 				mutex_lock(&context->context_mgr_node_lock);
4082 				ctx_mgr_node = context->binder_context_mgr_node;
4083 				if (ctx_mgr_node) {
4084 					if (ctx_mgr_node->proc == proc) {
4085 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4086 								  proc->pid, thread->pid);
4087 						mutex_unlock(&context->context_mgr_node_lock);
4088 						return -EINVAL;
4089 					}
4090 					ret = binder_inc_ref_for_node(
4091 							proc, ctx_mgr_node,
4092 							strong, NULL, &rdata);
4093 				}
4094 				mutex_unlock(&context->context_mgr_node_lock);
4095 			}
4096 			if (ret)
4097 				ret = binder_update_ref_for_handle(
4098 						proc, target, increment, strong,
4099 						&rdata);
4100 			if (!ret && rdata.desc != target) {
4101 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4102 					proc->pid, thread->pid,
4103 					target, rdata.desc);
4104 			}
4105 			switch (cmd) {
4106 			case BC_INCREFS:
4107 				debug_string = "IncRefs";
4108 				break;
4109 			case BC_ACQUIRE:
4110 				debug_string = "Acquire";
4111 				break;
4112 			case BC_RELEASE:
4113 				debug_string = "Release";
4114 				break;
4115 			case BC_DECREFS:
4116 			default:
4117 				debug_string = "DecRefs";
4118 				break;
4119 			}
4120 			if (ret) {
4121 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4122 					proc->pid, thread->pid, debug_string,
4123 					strong, target, ret);
4124 				break;
4125 			}
4126 			binder_debug(BINDER_DEBUG_USER_REFS,
4127 				     "%d:%d %s ref %d desc %d s %d w %d\n",
4128 				     proc->pid, thread->pid, debug_string,
4129 				     rdata.debug_id, rdata.desc, rdata.strong,
4130 				     rdata.weak);
4131 			break;
4132 		}
4133 		case BC_INCREFS_DONE:
4134 		case BC_ACQUIRE_DONE: {
4135 			binder_uintptr_t node_ptr;
4136 			binder_uintptr_t cookie;
4137 			struct binder_node *node;
4138 			bool free_node;
4139 
4140 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4141 				return -EFAULT;
4142 			ptr += sizeof(binder_uintptr_t);
4143 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4144 				return -EFAULT;
4145 			ptr += sizeof(binder_uintptr_t);
4146 			node = binder_get_node(proc, node_ptr);
4147 			if (node == NULL) {
4148 				binder_user_error("%d:%d %s u%016llx no match\n",
4149 					proc->pid, thread->pid,
4150 					cmd == BC_INCREFS_DONE ?
4151 					"BC_INCREFS_DONE" :
4152 					"BC_ACQUIRE_DONE",
4153 					(u64)node_ptr);
4154 				break;
4155 			}
4156 			if (cookie != node->cookie) {
4157 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4158 					proc->pid, thread->pid,
4159 					cmd == BC_INCREFS_DONE ?
4160 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4161 					(u64)node_ptr, node->debug_id,
4162 					(u64)cookie, (u64)node->cookie);
4163 				binder_put_node(node);
4164 				break;
4165 			}
4166 			binder_node_inner_lock(node);
4167 			if (cmd == BC_ACQUIRE_DONE) {
4168 				if (node->pending_strong_ref == 0) {
4169 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4170 						proc->pid, thread->pid,
4171 						node->debug_id);
4172 					binder_node_inner_unlock(node);
4173 					binder_put_node(node);
4174 					break;
4175 				}
4176 				node->pending_strong_ref = 0;
4177 			} else {
4178 				if (node->pending_weak_ref == 0) {
4179 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4180 						proc->pid, thread->pid,
4181 						node->debug_id);
4182 					binder_node_inner_unlock(node);
4183 					binder_put_node(node);
4184 					break;
4185 				}
4186 				node->pending_weak_ref = 0;
4187 			}
4188 			free_node = binder_dec_node_nilocked(node,
4189 					cmd == BC_ACQUIRE_DONE, 0);
4190 			WARN_ON(free_node);
4191 			binder_debug(BINDER_DEBUG_USER_REFS,
4192 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4193 				     proc->pid, thread->pid,
4194 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4195 				     node->debug_id, node->local_strong_refs,
4196 				     node->local_weak_refs, node->tmp_refs);
4197 			binder_node_inner_unlock(node);
4198 			binder_put_node(node);
4199 			break;
4200 		}
4201 		case BC_ATTEMPT_ACQUIRE:
4202 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4203 			return -EINVAL;
4204 		case BC_ACQUIRE_RESULT:
4205 			pr_err("BC_ACQUIRE_RESULT not supported\n");
4206 			return -EINVAL;
4207 
4208 		case BC_FREE_BUFFER: {
4209 			binder_uintptr_t data_ptr;
4210 			struct binder_buffer *buffer;
4211 
4212 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4213 				return -EFAULT;
4214 			ptr += sizeof(binder_uintptr_t);
4215 
4216 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4217 							      data_ptr);
4218 			if (IS_ERR_OR_NULL(buffer)) {
4219 				if (PTR_ERR(buffer) == -EPERM) {
4220 					binder_user_error(
4221 						"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4222 						proc->pid, thread->pid,
4223 						(unsigned long)data_ptr - proc->alloc.vm_start);
4224 				} else {
4225 					binder_user_error(
4226 						"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4227 						proc->pid, thread->pid,
4228 						(unsigned long)data_ptr - proc->alloc.vm_start);
4229 				}
4230 				break;
4231 			}
4232 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4233 				     "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4234 				     proc->pid, thread->pid,
4235 				     (unsigned long)data_ptr - proc->alloc.vm_start,
4236 				     buffer->debug_id,
4237 				     buffer->transaction ? "active" : "finished");
4238 			binder_free_buf(proc, thread, buffer, false);
4239 			break;
4240 		}
4241 
4242 		case BC_TRANSACTION_SG:
4243 		case BC_REPLY_SG: {
4244 			struct binder_transaction_data_sg tr;
4245 
4246 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4247 				return -EFAULT;
4248 			ptr += sizeof(tr);
4249 			binder_transaction(proc, thread, &tr.transaction_data,
4250 					   cmd == BC_REPLY_SG, tr.buffers_size);
4251 			break;
4252 		}
4253 		case BC_TRANSACTION:
4254 		case BC_REPLY: {
4255 			struct binder_transaction_data tr;
4256 
4257 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4258 				return -EFAULT;
4259 			ptr += sizeof(tr);
4260 			binder_transaction(proc, thread, &tr,
4261 					   cmd == BC_REPLY, 0);
4262 			break;
4263 		}
4264 
4265 		case BC_REGISTER_LOOPER:
4266 			binder_debug(BINDER_DEBUG_THREADS,
4267 				     "%d:%d BC_REGISTER_LOOPER\n",
4268 				     proc->pid, thread->pid);
4269 			binder_inner_proc_lock(proc);
4270 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4271 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4272 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4273 					proc->pid, thread->pid);
4274 			} else if (proc->requested_threads == 0) {
4275 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4276 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4277 					proc->pid, thread->pid);
4278 			} else {
4279 				proc->requested_threads--;
4280 				proc->requested_threads_started++;
4281 			}
4282 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4283 			binder_inner_proc_unlock(proc);
4284 			break;
4285 		case BC_ENTER_LOOPER:
4286 			binder_debug(BINDER_DEBUG_THREADS,
4287 				     "%d:%d BC_ENTER_LOOPER\n",
4288 				     proc->pid, thread->pid);
4289 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4290 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4291 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4292 					proc->pid, thread->pid);
4293 			}
4294 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4295 			break;
4296 		case BC_EXIT_LOOPER:
4297 			binder_debug(BINDER_DEBUG_THREADS,
4298 				     "%d:%d BC_EXIT_LOOPER\n",
4299 				     proc->pid, thread->pid);
4300 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4301 			break;
4302 
4303 		case BC_REQUEST_DEATH_NOTIFICATION:
4304 		case BC_CLEAR_DEATH_NOTIFICATION: {
4305 			uint32_t target;
4306 			binder_uintptr_t cookie;
4307 			struct binder_ref *ref;
4308 			struct binder_ref_death *death = NULL;
4309 
4310 			if (get_user(target, (uint32_t __user *)ptr))
4311 				return -EFAULT;
4312 			ptr += sizeof(uint32_t);
4313 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4314 				return -EFAULT;
4315 			ptr += sizeof(binder_uintptr_t);
4316 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4317 				/*
4318 				 * Allocate memory for death notification
4319 				 * before taking lock
4320 				 */
4321 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4322 				if (death == NULL) {
4323 					WARN_ON(thread->return_error.cmd !=
4324 						BR_OK);
4325 					thread->return_error.cmd = BR_ERROR;
4326 					binder_enqueue_thread_work(
4327 						thread,
4328 						&thread->return_error.work);
4329 					binder_debug(
4330 						BINDER_DEBUG_FAILED_TRANSACTION,
4331 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4332 						proc->pid, thread->pid);
4333 					break;
4334 				}
4335 			}
4336 			binder_proc_lock(proc);
4337 			ref = binder_get_ref_olocked(proc, target, false);
4338 			if (ref == NULL) {
4339 				binder_user_error("%d:%d %s invalid ref %d\n",
4340 					proc->pid, thread->pid,
4341 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4342 					"BC_REQUEST_DEATH_NOTIFICATION" :
4343 					"BC_CLEAR_DEATH_NOTIFICATION",
4344 					target);
4345 				binder_proc_unlock(proc);
4346 				kfree(death);
4347 				break;
4348 			}
4349 
4350 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4351 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4352 				     proc->pid, thread->pid,
4353 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4354 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4355 				     "BC_CLEAR_DEATH_NOTIFICATION",
4356 				     (u64)cookie, ref->data.debug_id,
4357 				     ref->data.desc, ref->data.strong,
4358 				     ref->data.weak, ref->node->debug_id);
4359 
4360 			binder_node_lock(ref->node);
4361 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4362 				if (ref->death) {
4363 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4364 						proc->pid, thread->pid);
4365 					binder_node_unlock(ref->node);
4366 					binder_proc_unlock(proc);
4367 					kfree(death);
4368 					break;
4369 				}
4370 				binder_stats_created(BINDER_STAT_DEATH);
4371 				INIT_LIST_HEAD(&death->work.entry);
4372 				death->cookie = cookie;
4373 				ref->death = death;
4374 				if (ref->node->proc == NULL) {
4375 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4376 
4377 					binder_inner_proc_lock(proc);
4378 					binder_enqueue_work_ilocked(
4379 						&ref->death->work, &proc->todo);
4380 					binder_wakeup_proc_ilocked(proc);
4381 					binder_inner_proc_unlock(proc);
4382 				}
4383 			} else {
4384 				if (ref->death == NULL) {
4385 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4386 						proc->pid, thread->pid);
4387 					binder_node_unlock(ref->node);
4388 					binder_proc_unlock(proc);
4389 					break;
4390 				}
4391 				death = ref->death;
4392 				if (death->cookie != cookie) {
4393 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4394 						proc->pid, thread->pid,
4395 						(u64)death->cookie,
4396 						(u64)cookie);
4397 					binder_node_unlock(ref->node);
4398 					binder_proc_unlock(proc);
4399 					break;
4400 				}
4401 				ref->death = NULL;
4402 				binder_inner_proc_lock(proc);
4403 				if (list_empty(&death->work.entry)) {
4404 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4405 					if (thread->looper &
4406 					    (BINDER_LOOPER_STATE_REGISTERED |
4407 					     BINDER_LOOPER_STATE_ENTERED))
4408 						binder_enqueue_thread_work_ilocked(
4409 								thread,
4410 								&death->work);
4411 					else {
4412 						binder_enqueue_work_ilocked(
4413 								&death->work,
4414 								&proc->todo);
4415 						binder_wakeup_proc_ilocked(
4416 								proc);
4417 					}
4418 				} else {
4419 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4420 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4421 				}
4422 				binder_inner_proc_unlock(proc);
4423 			}
4424 			binder_node_unlock(ref->node);
4425 			binder_proc_unlock(proc);
4426 		} break;
4427 		case BC_DEAD_BINDER_DONE: {
4428 			struct binder_work *w;
4429 			binder_uintptr_t cookie;
4430 			struct binder_ref_death *death = NULL;
4431 
4432 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4433 				return -EFAULT;
4434 
4435 			ptr += sizeof(cookie);
4436 			binder_inner_proc_lock(proc);
4437 			list_for_each_entry(w, &proc->delivered_death,
4438 					    entry) {
4439 				struct binder_ref_death *tmp_death =
4440 					container_of(w,
4441 						     struct binder_ref_death,
4442 						     work);
4443 
4444 				if (tmp_death->cookie == cookie) {
4445 					death = tmp_death;
4446 					break;
4447 				}
4448 			}
4449 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4450 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4451 				     proc->pid, thread->pid, (u64)cookie,
4452 				     death);
4453 			if (death == NULL) {
4454 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4455 					proc->pid, thread->pid, (u64)cookie);
4456 				binder_inner_proc_unlock(proc);
4457 				break;
4458 			}
4459 			binder_dequeue_work_ilocked(&death->work);
4460 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4461 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4462 				if (thread->looper &
4463 					(BINDER_LOOPER_STATE_REGISTERED |
4464 					 BINDER_LOOPER_STATE_ENTERED))
4465 					binder_enqueue_thread_work_ilocked(
4466 						thread, &death->work);
4467 				else {
4468 					binder_enqueue_work_ilocked(
4469 							&death->work,
4470 							&proc->todo);
4471 					binder_wakeup_proc_ilocked(proc);
4472 				}
4473 			}
4474 			binder_inner_proc_unlock(proc);
4475 		} break;
4476 
4477 		case BC_REQUEST_FREEZE_NOTIFICATION: {
4478 			struct binder_handle_cookie handle_cookie;
4479 			int error;
4480 
4481 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4482 				return -EFAULT;
4483 			ptr += sizeof(handle_cookie);
4484 			error = binder_request_freeze_notification(proc, thread,
4485 								   &handle_cookie);
4486 			if (error)
4487 				return error;
4488 		} break;
4489 
4490 		case BC_CLEAR_FREEZE_NOTIFICATION: {
4491 			struct binder_handle_cookie handle_cookie;
4492 			int error;
4493 
4494 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4495 				return -EFAULT;
4496 			ptr += sizeof(handle_cookie);
4497 			error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4498 			if (error)
4499 				return error;
4500 		} break;
4501 
4502 		case BC_FREEZE_NOTIFICATION_DONE: {
4503 			binder_uintptr_t cookie;
4504 			int error;
4505 
4506 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4507 				return -EFAULT;
4508 
4509 			ptr += sizeof(cookie);
4510 			error = binder_freeze_notification_done(proc, thread, cookie);
4511 			if (error)
4512 				return error;
4513 		} break;
4514 
4515 		default:
4516 			pr_err("%d:%d unknown command %u\n",
4517 			       proc->pid, thread->pid, cmd);
4518 			return -EINVAL;
4519 		}
4520 		*consumed = ptr - buffer;
4521 	}
4522 	return 0;
4523 }
4524 
4525 static void binder_stat_br(struct binder_proc *proc,
4526 			   struct binder_thread *thread, uint32_t cmd)
4527 {
4528 	trace_binder_return(cmd);
4529 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4530 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4531 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4532 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4533 	}
4534 }
4535 
4536 static int binder_put_node_cmd(struct binder_proc *proc,
4537 			       struct binder_thread *thread,
4538 			       void __user **ptrp,
4539 			       binder_uintptr_t node_ptr,
4540 			       binder_uintptr_t node_cookie,
4541 			       int node_debug_id,
4542 			       uint32_t cmd, const char *cmd_name)
4543 {
4544 	void __user *ptr = *ptrp;
4545 
4546 	if (put_user(cmd, (uint32_t __user *)ptr))
4547 		return -EFAULT;
4548 	ptr += sizeof(uint32_t);
4549 
4550 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4551 		return -EFAULT;
4552 	ptr += sizeof(binder_uintptr_t);
4553 
4554 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4555 		return -EFAULT;
4556 	ptr += sizeof(binder_uintptr_t);
4557 
4558 	binder_stat_br(proc, thread, cmd);
4559 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4560 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4561 		     (u64)node_ptr, (u64)node_cookie);
4562 
4563 	*ptrp = ptr;
4564 	return 0;
4565 }
4566 
4567 static int binder_wait_for_work(struct binder_thread *thread,
4568 				bool do_proc_work)
4569 {
4570 	DEFINE_WAIT(wait);
4571 	struct binder_proc *proc = thread->proc;
4572 	int ret = 0;
4573 
4574 	binder_inner_proc_lock(proc);
4575 	for (;;) {
4576 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4577 		if (binder_has_work_ilocked(thread, do_proc_work))
4578 			break;
4579 		if (do_proc_work)
4580 			list_add(&thread->waiting_thread_node,
4581 				 &proc->waiting_threads);
4582 		binder_inner_proc_unlock(proc);
4583 		schedule();
4584 		binder_inner_proc_lock(proc);
4585 		list_del_init(&thread->waiting_thread_node);
4586 		if (signal_pending(current)) {
4587 			ret = -EINTR;
4588 			break;
4589 		}
4590 	}
4591 	finish_wait(&thread->wait, &wait);
4592 	binder_inner_proc_unlock(proc);
4593 
4594 	return ret;
4595 }
4596 
4597 /**
4598  * binder_apply_fd_fixups() - finish fd translation
4599  * @proc:         binder_proc associated @t->buffer
4600  * @t:	binder transaction with list of fd fixups
4601  *
4602  * Now that we are in the context of the transaction target
4603  * process, we can allocate and install fds. Process the
4604  * list of fds to translate and fixup the buffer with the
4605  * new fds first and only then install the files.
4606  *
4607  * If we fail to allocate an fd, skip the install and release
4608  * any fds that have already been allocated.
4609  */
4610 static int binder_apply_fd_fixups(struct binder_proc *proc,
4611 				  struct binder_transaction *t)
4612 {
4613 	struct binder_txn_fd_fixup *fixup, *tmp;
4614 	int ret = 0;
4615 
4616 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4617 		int fd = get_unused_fd_flags(O_CLOEXEC);
4618 
4619 		if (fd < 0) {
4620 			binder_debug(BINDER_DEBUG_TRANSACTION,
4621 				     "failed fd fixup txn %d fd %d\n",
4622 				     t->debug_id, fd);
4623 			ret = -ENOMEM;
4624 			goto err;
4625 		}
4626 		binder_debug(BINDER_DEBUG_TRANSACTION,
4627 			     "fd fixup txn %d fd %d\n",
4628 			     t->debug_id, fd);
4629 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4630 		fixup->target_fd = fd;
4631 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4632 						fixup->offset, &fd,
4633 						sizeof(u32))) {
4634 			ret = -EINVAL;
4635 			goto err;
4636 		}
4637 	}
4638 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4639 		fd_install(fixup->target_fd, fixup->file);
4640 		list_del(&fixup->fixup_entry);
4641 		kfree(fixup);
4642 	}
4643 
4644 	return ret;
4645 
4646 err:
4647 	binder_free_txn_fixups(t);
4648 	return ret;
4649 }
4650 
4651 static int binder_thread_read(struct binder_proc *proc,
4652 			      struct binder_thread *thread,
4653 			      binder_uintptr_t binder_buffer, size_t size,
4654 			      binder_size_t *consumed, int non_block)
4655 {
4656 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4657 	void __user *ptr = buffer + *consumed;
4658 	void __user *end = buffer + size;
4659 
4660 	int ret = 0;
4661 	int wait_for_proc_work;
4662 
4663 	if (*consumed == 0) {
4664 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4665 			return -EFAULT;
4666 		ptr += sizeof(uint32_t);
4667 	}
4668 
4669 retry:
4670 	binder_inner_proc_lock(proc);
4671 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4672 	binder_inner_proc_unlock(proc);
4673 
4674 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4675 
4676 	trace_binder_wait_for_work(wait_for_proc_work,
4677 				   !!thread->transaction_stack,
4678 				   !binder_worklist_empty(proc, &thread->todo));
4679 	if (wait_for_proc_work) {
4680 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4681 					BINDER_LOOPER_STATE_ENTERED))) {
4682 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4683 				proc->pid, thread->pid, thread->looper);
4684 			wait_event_interruptible(binder_user_error_wait,
4685 						 binder_stop_on_user_error < 2);
4686 		}
4687 		binder_set_nice(proc->default_priority);
4688 	}
4689 
4690 	if (non_block) {
4691 		if (!binder_has_work(thread, wait_for_proc_work))
4692 			ret = -EAGAIN;
4693 	} else {
4694 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4695 	}
4696 
4697 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4698 
4699 	if (ret)
4700 		return ret;
4701 
4702 	while (1) {
4703 		uint32_t cmd;
4704 		struct binder_transaction_data_secctx tr;
4705 		struct binder_transaction_data *trd = &tr.transaction_data;
4706 		struct binder_work *w = NULL;
4707 		struct list_head *list = NULL;
4708 		struct binder_transaction *t = NULL;
4709 		struct binder_thread *t_from;
4710 		size_t trsize = sizeof(*trd);
4711 
4712 		binder_inner_proc_lock(proc);
4713 		if (!binder_worklist_empty_ilocked(&thread->todo))
4714 			list = &thread->todo;
4715 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4716 			   wait_for_proc_work)
4717 			list = &proc->todo;
4718 		else {
4719 			binder_inner_proc_unlock(proc);
4720 
4721 			/* no data added */
4722 			if (ptr - buffer == 4 && !thread->looper_need_return)
4723 				goto retry;
4724 			break;
4725 		}
4726 
4727 		if (end - ptr < sizeof(tr) + 4) {
4728 			binder_inner_proc_unlock(proc);
4729 			break;
4730 		}
4731 		w = binder_dequeue_work_head_ilocked(list);
4732 		if (binder_worklist_empty_ilocked(&thread->todo))
4733 			thread->process_todo = false;
4734 
4735 		switch (w->type) {
4736 		case BINDER_WORK_TRANSACTION: {
4737 			binder_inner_proc_unlock(proc);
4738 			t = container_of(w, struct binder_transaction, work);
4739 		} break;
4740 		case BINDER_WORK_RETURN_ERROR: {
4741 			struct binder_error *e = container_of(
4742 					w, struct binder_error, work);
4743 
4744 			WARN_ON(e->cmd == BR_OK);
4745 			binder_inner_proc_unlock(proc);
4746 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4747 				return -EFAULT;
4748 			cmd = e->cmd;
4749 			e->cmd = BR_OK;
4750 			ptr += sizeof(uint32_t);
4751 
4752 			binder_stat_br(proc, thread, cmd);
4753 		} break;
4754 		case BINDER_WORK_TRANSACTION_COMPLETE:
4755 		case BINDER_WORK_TRANSACTION_PENDING:
4756 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4757 			if (proc->oneway_spam_detection_enabled &&
4758 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4759 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4760 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4761 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4762 			else
4763 				cmd = BR_TRANSACTION_COMPLETE;
4764 			binder_inner_proc_unlock(proc);
4765 			kfree(w);
4766 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4767 			if (put_user(cmd, (uint32_t __user *)ptr))
4768 				return -EFAULT;
4769 			ptr += sizeof(uint32_t);
4770 
4771 			binder_stat_br(proc, thread, cmd);
4772 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4773 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4774 				     proc->pid, thread->pid);
4775 		} break;
4776 		case BINDER_WORK_NODE: {
4777 			struct binder_node *node = container_of(w, struct binder_node, work);
4778 			int strong, weak;
4779 			binder_uintptr_t node_ptr = node->ptr;
4780 			binder_uintptr_t node_cookie = node->cookie;
4781 			int node_debug_id = node->debug_id;
4782 			int has_weak_ref;
4783 			int has_strong_ref;
4784 			void __user *orig_ptr = ptr;
4785 
4786 			BUG_ON(proc != node->proc);
4787 			strong = node->internal_strong_refs ||
4788 					node->local_strong_refs;
4789 			weak = !hlist_empty(&node->refs) ||
4790 					node->local_weak_refs ||
4791 					node->tmp_refs || strong;
4792 			has_strong_ref = node->has_strong_ref;
4793 			has_weak_ref = node->has_weak_ref;
4794 
4795 			if (weak && !has_weak_ref) {
4796 				node->has_weak_ref = 1;
4797 				node->pending_weak_ref = 1;
4798 				node->local_weak_refs++;
4799 			}
4800 			if (strong && !has_strong_ref) {
4801 				node->has_strong_ref = 1;
4802 				node->pending_strong_ref = 1;
4803 				node->local_strong_refs++;
4804 			}
4805 			if (!strong && has_strong_ref)
4806 				node->has_strong_ref = 0;
4807 			if (!weak && has_weak_ref)
4808 				node->has_weak_ref = 0;
4809 			if (!weak && !strong) {
4810 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4811 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4812 					     proc->pid, thread->pid,
4813 					     node_debug_id,
4814 					     (u64)node_ptr,
4815 					     (u64)node_cookie);
4816 				rb_erase(&node->rb_node, &proc->nodes);
4817 				binder_inner_proc_unlock(proc);
4818 				binder_node_lock(node);
4819 				/*
4820 				 * Acquire the node lock before freeing the
4821 				 * node to serialize with other threads that
4822 				 * may have been holding the node lock while
4823 				 * decrementing this node (avoids race where
4824 				 * this thread frees while the other thread
4825 				 * is unlocking the node after the final
4826 				 * decrement)
4827 				 */
4828 				binder_node_unlock(node);
4829 				binder_free_node(node);
4830 			} else
4831 				binder_inner_proc_unlock(proc);
4832 
4833 			if (weak && !has_weak_ref)
4834 				ret = binder_put_node_cmd(
4835 						proc, thread, &ptr, node_ptr,
4836 						node_cookie, node_debug_id,
4837 						BR_INCREFS, "BR_INCREFS");
4838 			if (!ret && strong && !has_strong_ref)
4839 				ret = binder_put_node_cmd(
4840 						proc, thread, &ptr, node_ptr,
4841 						node_cookie, node_debug_id,
4842 						BR_ACQUIRE, "BR_ACQUIRE");
4843 			if (!ret && !strong && has_strong_ref)
4844 				ret = binder_put_node_cmd(
4845 						proc, thread, &ptr, node_ptr,
4846 						node_cookie, node_debug_id,
4847 						BR_RELEASE, "BR_RELEASE");
4848 			if (!ret && !weak && has_weak_ref)
4849 				ret = binder_put_node_cmd(
4850 						proc, thread, &ptr, node_ptr,
4851 						node_cookie, node_debug_id,
4852 						BR_DECREFS, "BR_DECREFS");
4853 			if (orig_ptr == ptr)
4854 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4855 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4856 					     proc->pid, thread->pid,
4857 					     node_debug_id,
4858 					     (u64)node_ptr,
4859 					     (u64)node_cookie);
4860 			if (ret)
4861 				return ret;
4862 		} break;
4863 		case BINDER_WORK_DEAD_BINDER:
4864 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4865 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4866 			struct binder_ref_death *death;
4867 			uint32_t cmd;
4868 			binder_uintptr_t cookie;
4869 
4870 			death = container_of(w, struct binder_ref_death, work);
4871 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4872 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4873 			else
4874 				cmd = BR_DEAD_BINDER;
4875 			cookie = death->cookie;
4876 
4877 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4878 				     "%d:%d %s %016llx\n",
4879 				      proc->pid, thread->pid,
4880 				      cmd == BR_DEAD_BINDER ?
4881 				      "BR_DEAD_BINDER" :
4882 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4883 				      (u64)cookie);
4884 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4885 				binder_inner_proc_unlock(proc);
4886 				kfree(death);
4887 				binder_stats_deleted(BINDER_STAT_DEATH);
4888 			} else {
4889 				binder_enqueue_work_ilocked(
4890 						w, &proc->delivered_death);
4891 				binder_inner_proc_unlock(proc);
4892 			}
4893 			if (put_user(cmd, (uint32_t __user *)ptr))
4894 				return -EFAULT;
4895 			ptr += sizeof(uint32_t);
4896 			if (put_user(cookie,
4897 				     (binder_uintptr_t __user *)ptr))
4898 				return -EFAULT;
4899 			ptr += sizeof(binder_uintptr_t);
4900 			binder_stat_br(proc, thread, cmd);
4901 			if (cmd == BR_DEAD_BINDER)
4902 				goto done; /* DEAD_BINDER notifications can cause transactions */
4903 		} break;
4904 
4905 		case BINDER_WORK_FROZEN_BINDER: {
4906 			struct binder_ref_freeze *freeze;
4907 			struct binder_frozen_state_info info;
4908 
4909 			memset(&info, 0, sizeof(info));
4910 			freeze = container_of(w, struct binder_ref_freeze, work);
4911 			info.is_frozen = freeze->is_frozen;
4912 			info.cookie = freeze->cookie;
4913 			freeze->sent = true;
4914 			binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4915 			binder_inner_proc_unlock(proc);
4916 
4917 			if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4918 				return -EFAULT;
4919 			ptr += sizeof(uint32_t);
4920 			if (copy_to_user(ptr, &info, sizeof(info)))
4921 				return -EFAULT;
4922 			ptr += sizeof(info);
4923 			binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4924 			goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4925 		} break;
4926 
4927 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4928 			struct binder_ref_freeze *freeze =
4929 			    container_of(w, struct binder_ref_freeze, work);
4930 			binder_uintptr_t cookie = freeze->cookie;
4931 
4932 			binder_inner_proc_unlock(proc);
4933 			kfree(freeze);
4934 			binder_stats_deleted(BINDER_STAT_FREEZE);
4935 			if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4936 				return -EFAULT;
4937 			ptr += sizeof(uint32_t);
4938 			if (put_user(cookie, (binder_uintptr_t __user *)ptr))
4939 				return -EFAULT;
4940 			ptr += sizeof(binder_uintptr_t);
4941 			binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
4942 		} break;
4943 
4944 		default:
4945 			binder_inner_proc_unlock(proc);
4946 			pr_err("%d:%d: bad work type %d\n",
4947 			       proc->pid, thread->pid, w->type);
4948 			break;
4949 		}
4950 
4951 		if (!t)
4952 			continue;
4953 
4954 		BUG_ON(t->buffer == NULL);
4955 		if (t->buffer->target_node) {
4956 			struct binder_node *target_node = t->buffer->target_node;
4957 
4958 			trd->target.ptr = target_node->ptr;
4959 			trd->cookie =  target_node->cookie;
4960 			t->saved_priority = task_nice(current);
4961 			if (t->priority < target_node->min_priority &&
4962 			    !(t->flags & TF_ONE_WAY))
4963 				binder_set_nice(t->priority);
4964 			else if (!(t->flags & TF_ONE_WAY) ||
4965 				 t->saved_priority > target_node->min_priority)
4966 				binder_set_nice(target_node->min_priority);
4967 			cmd = BR_TRANSACTION;
4968 		} else {
4969 			trd->target.ptr = 0;
4970 			trd->cookie = 0;
4971 			cmd = BR_REPLY;
4972 		}
4973 		trd->code = t->code;
4974 		trd->flags = t->flags;
4975 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4976 
4977 		t_from = binder_get_txn_from(t);
4978 		if (t_from) {
4979 			struct task_struct *sender = t_from->proc->tsk;
4980 
4981 			trd->sender_pid =
4982 				task_tgid_nr_ns(sender,
4983 						task_active_pid_ns(current));
4984 		} else {
4985 			trd->sender_pid = 0;
4986 		}
4987 
4988 		ret = binder_apply_fd_fixups(proc, t);
4989 		if (ret) {
4990 			struct binder_buffer *buffer = t->buffer;
4991 			bool oneway = !!(t->flags & TF_ONE_WAY);
4992 			int tid = t->debug_id;
4993 
4994 			if (t_from)
4995 				binder_thread_dec_tmpref(t_from);
4996 			buffer->transaction = NULL;
4997 			binder_cleanup_transaction(t, "fd fixups failed",
4998 						   BR_FAILED_REPLY);
4999 			binder_free_buf(proc, thread, buffer, true);
5000 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5001 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5002 				     proc->pid, thread->pid,
5003 				     oneway ? "async " :
5004 					(cmd == BR_REPLY ? "reply " : ""),
5005 				     tid, BR_FAILED_REPLY, ret, __LINE__);
5006 			if (cmd == BR_REPLY) {
5007 				cmd = BR_FAILED_REPLY;
5008 				if (put_user(cmd, (uint32_t __user *)ptr))
5009 					return -EFAULT;
5010 				ptr += sizeof(uint32_t);
5011 				binder_stat_br(proc, thread, cmd);
5012 				break;
5013 			}
5014 			continue;
5015 		}
5016 		trd->data_size = t->buffer->data_size;
5017 		trd->offsets_size = t->buffer->offsets_size;
5018 		trd->data.ptr.buffer = t->buffer->user_data;
5019 		trd->data.ptr.offsets = trd->data.ptr.buffer +
5020 					ALIGN(t->buffer->data_size,
5021 					    sizeof(void *));
5022 
5023 		tr.secctx = t->security_ctx;
5024 		if (t->security_ctx) {
5025 			cmd = BR_TRANSACTION_SEC_CTX;
5026 			trsize = sizeof(tr);
5027 		}
5028 		if (put_user(cmd, (uint32_t __user *)ptr)) {
5029 			if (t_from)
5030 				binder_thread_dec_tmpref(t_from);
5031 
5032 			binder_cleanup_transaction(t, "put_user failed",
5033 						   BR_FAILED_REPLY);
5034 
5035 			return -EFAULT;
5036 		}
5037 		ptr += sizeof(uint32_t);
5038 		if (copy_to_user(ptr, &tr, trsize)) {
5039 			if (t_from)
5040 				binder_thread_dec_tmpref(t_from);
5041 
5042 			binder_cleanup_transaction(t, "copy_to_user failed",
5043 						   BR_FAILED_REPLY);
5044 
5045 			return -EFAULT;
5046 		}
5047 		ptr += trsize;
5048 
5049 		trace_binder_transaction_received(t);
5050 		binder_stat_br(proc, thread, cmd);
5051 		binder_debug(BINDER_DEBUG_TRANSACTION,
5052 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5053 			     proc->pid, thread->pid,
5054 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5055 				(cmd == BR_TRANSACTION_SEC_CTX) ?
5056 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5057 			     t->debug_id, t_from ? t_from->proc->pid : 0,
5058 			     t_from ? t_from->pid : 0, cmd,
5059 			     t->buffer->data_size, t->buffer->offsets_size);
5060 
5061 		if (t_from)
5062 			binder_thread_dec_tmpref(t_from);
5063 		t->buffer->allow_user_free = 1;
5064 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5065 			binder_inner_proc_lock(thread->proc);
5066 			t->to_parent = thread->transaction_stack;
5067 			t->to_thread = thread;
5068 			thread->transaction_stack = t;
5069 			binder_inner_proc_unlock(thread->proc);
5070 		} else {
5071 			binder_free_transaction(t);
5072 		}
5073 		break;
5074 	}
5075 
5076 done:
5077 
5078 	*consumed = ptr - buffer;
5079 	binder_inner_proc_lock(proc);
5080 	if (proc->requested_threads == 0 &&
5081 	    list_empty(&thread->proc->waiting_threads) &&
5082 	    proc->requested_threads_started < proc->max_threads &&
5083 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5084 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5085 	     /*spawn a new thread if we leave this out */) {
5086 		proc->requested_threads++;
5087 		binder_inner_proc_unlock(proc);
5088 		binder_debug(BINDER_DEBUG_THREADS,
5089 			     "%d:%d BR_SPAWN_LOOPER\n",
5090 			     proc->pid, thread->pid);
5091 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5092 			return -EFAULT;
5093 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5094 	} else
5095 		binder_inner_proc_unlock(proc);
5096 	return 0;
5097 }
5098 
5099 static void binder_release_work(struct binder_proc *proc,
5100 				struct list_head *list)
5101 {
5102 	struct binder_work *w;
5103 	enum binder_work_type wtype;
5104 
5105 	while (1) {
5106 		binder_inner_proc_lock(proc);
5107 		w = binder_dequeue_work_head_ilocked(list);
5108 		wtype = w ? w->type : 0;
5109 		binder_inner_proc_unlock(proc);
5110 		if (!w)
5111 			return;
5112 
5113 		switch (wtype) {
5114 		case BINDER_WORK_TRANSACTION: {
5115 			struct binder_transaction *t;
5116 
5117 			t = container_of(w, struct binder_transaction, work);
5118 
5119 			binder_cleanup_transaction(t, "process died.",
5120 						   BR_DEAD_REPLY);
5121 		} break;
5122 		case BINDER_WORK_RETURN_ERROR: {
5123 			struct binder_error *e = container_of(
5124 					w, struct binder_error, work);
5125 
5126 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5127 				"undelivered TRANSACTION_ERROR: %u\n",
5128 				e->cmd);
5129 		} break;
5130 		case BINDER_WORK_TRANSACTION_PENDING:
5131 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5132 		case BINDER_WORK_TRANSACTION_COMPLETE: {
5133 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5134 				"undelivered TRANSACTION_COMPLETE\n");
5135 			kfree(w);
5136 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5137 		} break;
5138 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5139 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5140 			struct binder_ref_death *death;
5141 
5142 			death = container_of(w, struct binder_ref_death, work);
5143 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5144 				"undelivered death notification, %016llx\n",
5145 				(u64)death->cookie);
5146 			kfree(death);
5147 			binder_stats_deleted(BINDER_STAT_DEATH);
5148 		} break;
5149 		case BINDER_WORK_NODE:
5150 			break;
5151 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5152 			struct binder_ref_freeze *freeze;
5153 
5154 			freeze = container_of(w, struct binder_ref_freeze, work);
5155 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5156 				     "undelivered freeze notification, %016llx\n",
5157 				     (u64)freeze->cookie);
5158 			kfree(freeze);
5159 			binder_stats_deleted(BINDER_STAT_FREEZE);
5160 		} break;
5161 		default:
5162 			pr_err("unexpected work type, %d, not freed\n",
5163 			       wtype);
5164 			break;
5165 		}
5166 	}
5167 
5168 }
5169 
5170 static struct binder_thread *binder_get_thread_ilocked(
5171 		struct binder_proc *proc, struct binder_thread *new_thread)
5172 {
5173 	struct binder_thread *thread = NULL;
5174 	struct rb_node *parent = NULL;
5175 	struct rb_node **p = &proc->threads.rb_node;
5176 
5177 	while (*p) {
5178 		parent = *p;
5179 		thread = rb_entry(parent, struct binder_thread, rb_node);
5180 
5181 		if (current->pid < thread->pid)
5182 			p = &(*p)->rb_left;
5183 		else if (current->pid > thread->pid)
5184 			p = &(*p)->rb_right;
5185 		else
5186 			return thread;
5187 	}
5188 	if (!new_thread)
5189 		return NULL;
5190 	thread = new_thread;
5191 	binder_stats_created(BINDER_STAT_THREAD);
5192 	thread->proc = proc;
5193 	thread->pid = current->pid;
5194 	atomic_set(&thread->tmp_ref, 0);
5195 	init_waitqueue_head(&thread->wait);
5196 	INIT_LIST_HEAD(&thread->todo);
5197 	rb_link_node(&thread->rb_node, parent, p);
5198 	rb_insert_color(&thread->rb_node, &proc->threads);
5199 	thread->looper_need_return = true;
5200 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5201 	thread->return_error.cmd = BR_OK;
5202 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5203 	thread->reply_error.cmd = BR_OK;
5204 	thread->ee.command = BR_OK;
5205 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5206 	return thread;
5207 }
5208 
5209 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5210 {
5211 	struct binder_thread *thread;
5212 	struct binder_thread *new_thread;
5213 
5214 	binder_inner_proc_lock(proc);
5215 	thread = binder_get_thread_ilocked(proc, NULL);
5216 	binder_inner_proc_unlock(proc);
5217 	if (!thread) {
5218 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5219 		if (new_thread == NULL)
5220 			return NULL;
5221 		binder_inner_proc_lock(proc);
5222 		thread = binder_get_thread_ilocked(proc, new_thread);
5223 		binder_inner_proc_unlock(proc);
5224 		if (thread != new_thread)
5225 			kfree(new_thread);
5226 	}
5227 	return thread;
5228 }
5229 
5230 static void binder_free_proc(struct binder_proc *proc)
5231 {
5232 	struct binder_device *device;
5233 
5234 	BUG_ON(!list_empty(&proc->todo));
5235 	BUG_ON(!list_empty(&proc->delivered_death));
5236 	if (proc->outstanding_txns)
5237 		pr_warn("%s: Unexpected outstanding_txns %d\n",
5238 			__func__, proc->outstanding_txns);
5239 	device = container_of(proc->context, struct binder_device, context);
5240 	if (refcount_dec_and_test(&device->ref)) {
5241 		binder_remove_device(device);
5242 		kfree(proc->context->name);
5243 		kfree(device);
5244 	}
5245 	binder_alloc_deferred_release(&proc->alloc);
5246 	put_task_struct(proc->tsk);
5247 	put_cred(proc->cred);
5248 	binder_stats_deleted(BINDER_STAT_PROC);
5249 	dbitmap_free(&proc->dmap);
5250 	kfree(proc);
5251 }
5252 
5253 static void binder_free_thread(struct binder_thread *thread)
5254 {
5255 	BUG_ON(!list_empty(&thread->todo));
5256 	binder_stats_deleted(BINDER_STAT_THREAD);
5257 	binder_proc_dec_tmpref(thread->proc);
5258 	kfree(thread);
5259 }
5260 
5261 static int binder_thread_release(struct binder_proc *proc,
5262 				 struct binder_thread *thread)
5263 {
5264 	struct binder_transaction *t;
5265 	struct binder_transaction *send_reply = NULL;
5266 	int active_transactions = 0;
5267 	struct binder_transaction *last_t = NULL;
5268 
5269 	binder_inner_proc_lock(thread->proc);
5270 	/*
5271 	 * take a ref on the proc so it survives
5272 	 * after we remove this thread from proc->threads.
5273 	 * The corresponding dec is when we actually
5274 	 * free the thread in binder_free_thread()
5275 	 */
5276 	proc->tmp_ref++;
5277 	/*
5278 	 * take a ref on this thread to ensure it
5279 	 * survives while we are releasing it
5280 	 */
5281 	atomic_inc(&thread->tmp_ref);
5282 	rb_erase(&thread->rb_node, &proc->threads);
5283 	t = thread->transaction_stack;
5284 	if (t) {
5285 		spin_lock(&t->lock);
5286 		if (t->to_thread == thread)
5287 			send_reply = t;
5288 	} else {
5289 		__acquire(&t->lock);
5290 	}
5291 	thread->is_dead = true;
5292 
5293 	while (t) {
5294 		last_t = t;
5295 		active_transactions++;
5296 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5297 			     "release %d:%d transaction %d %s, still active\n",
5298 			      proc->pid, thread->pid,
5299 			     t->debug_id,
5300 			     (t->to_thread == thread) ? "in" : "out");
5301 
5302 		if (t->to_thread == thread) {
5303 			thread->proc->outstanding_txns--;
5304 			t->to_proc = NULL;
5305 			t->to_thread = NULL;
5306 			if (t->buffer) {
5307 				t->buffer->transaction = NULL;
5308 				t->buffer = NULL;
5309 			}
5310 			t = t->to_parent;
5311 		} else if (t->from == thread) {
5312 			t->from = NULL;
5313 			t = t->from_parent;
5314 		} else
5315 			BUG();
5316 		spin_unlock(&last_t->lock);
5317 		if (t)
5318 			spin_lock(&t->lock);
5319 		else
5320 			__acquire(&t->lock);
5321 	}
5322 	/* annotation for sparse, lock not acquired in last iteration above */
5323 	__release(&t->lock);
5324 
5325 	/*
5326 	 * If this thread used poll, make sure we remove the waitqueue from any
5327 	 * poll data structures holding it.
5328 	 */
5329 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5330 		wake_up_pollfree(&thread->wait);
5331 
5332 	binder_inner_proc_unlock(thread->proc);
5333 
5334 	/*
5335 	 * This is needed to avoid races between wake_up_pollfree() above and
5336 	 * someone else removing the last entry from the queue for other reasons
5337 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5338 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5339 	 * we can be sure they're done after we call synchronize_rcu().
5340 	 */
5341 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5342 		synchronize_rcu();
5343 
5344 	if (send_reply)
5345 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5346 	binder_release_work(proc, &thread->todo);
5347 	binder_thread_dec_tmpref(thread);
5348 	return active_transactions;
5349 }
5350 
5351 static __poll_t binder_poll(struct file *filp,
5352 				struct poll_table_struct *wait)
5353 {
5354 	struct binder_proc *proc = filp->private_data;
5355 	struct binder_thread *thread = NULL;
5356 	bool wait_for_proc_work;
5357 
5358 	thread = binder_get_thread(proc);
5359 	if (!thread)
5360 		return EPOLLERR;
5361 
5362 	binder_inner_proc_lock(thread->proc);
5363 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5364 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5365 
5366 	binder_inner_proc_unlock(thread->proc);
5367 
5368 	poll_wait(filp, &thread->wait, wait);
5369 
5370 	if (binder_has_work(thread, wait_for_proc_work))
5371 		return EPOLLIN;
5372 
5373 	return 0;
5374 }
5375 
5376 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5377 				struct binder_thread *thread)
5378 {
5379 	int ret = 0;
5380 	struct binder_proc *proc = filp->private_data;
5381 	void __user *ubuf = (void __user *)arg;
5382 	struct binder_write_read bwr;
5383 
5384 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5385 		ret = -EFAULT;
5386 		goto out;
5387 	}
5388 	binder_debug(BINDER_DEBUG_READ_WRITE,
5389 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5390 		     proc->pid, thread->pid,
5391 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5392 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5393 
5394 	if (bwr.write_size > 0) {
5395 		ret = binder_thread_write(proc, thread,
5396 					  bwr.write_buffer,
5397 					  bwr.write_size,
5398 					  &bwr.write_consumed);
5399 		trace_binder_write_done(ret);
5400 		if (ret < 0) {
5401 			bwr.read_consumed = 0;
5402 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5403 				ret = -EFAULT;
5404 			goto out;
5405 		}
5406 	}
5407 	if (bwr.read_size > 0) {
5408 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5409 					 bwr.read_size,
5410 					 &bwr.read_consumed,
5411 					 filp->f_flags & O_NONBLOCK);
5412 		trace_binder_read_done(ret);
5413 		binder_inner_proc_lock(proc);
5414 		if (!binder_worklist_empty_ilocked(&proc->todo))
5415 			binder_wakeup_proc_ilocked(proc);
5416 		binder_inner_proc_unlock(proc);
5417 		if (ret < 0) {
5418 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5419 				ret = -EFAULT;
5420 			goto out;
5421 		}
5422 	}
5423 	binder_debug(BINDER_DEBUG_READ_WRITE,
5424 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5425 		     proc->pid, thread->pid,
5426 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5427 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5428 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5429 		ret = -EFAULT;
5430 		goto out;
5431 	}
5432 out:
5433 	return ret;
5434 }
5435 
5436 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5437 				    struct flat_binder_object *fbo)
5438 {
5439 	int ret = 0;
5440 	struct binder_proc *proc = filp->private_data;
5441 	struct binder_context *context = proc->context;
5442 	struct binder_node *new_node;
5443 	kuid_t curr_euid = current_euid();
5444 
5445 	guard(mutex)(&context->context_mgr_node_lock);
5446 	if (context->binder_context_mgr_node) {
5447 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5448 		return -EBUSY;
5449 	}
5450 	ret = security_binder_set_context_mgr(proc->cred);
5451 	if (ret < 0)
5452 		return ret;
5453 	if (uid_valid(context->binder_context_mgr_uid)) {
5454 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5455 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5456 			       from_kuid(&init_user_ns, curr_euid),
5457 			       from_kuid(&init_user_ns,
5458 					 context->binder_context_mgr_uid));
5459 			return -EPERM;
5460 		}
5461 	} else {
5462 		context->binder_context_mgr_uid = curr_euid;
5463 	}
5464 	new_node = binder_new_node(proc, fbo);
5465 	if (!new_node)
5466 		return -ENOMEM;
5467 	binder_node_lock(new_node);
5468 	new_node->local_weak_refs++;
5469 	new_node->local_strong_refs++;
5470 	new_node->has_strong_ref = 1;
5471 	new_node->has_weak_ref = 1;
5472 	context->binder_context_mgr_node = new_node;
5473 	binder_node_unlock(new_node);
5474 	binder_put_node(new_node);
5475 	return ret;
5476 }
5477 
5478 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5479 		struct binder_node_info_for_ref *info)
5480 {
5481 	struct binder_node *node;
5482 	struct binder_context *context = proc->context;
5483 	__u32 handle = info->handle;
5484 
5485 	if (info->strong_count || info->weak_count || info->reserved1 ||
5486 	    info->reserved2 || info->reserved3) {
5487 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5488 				  proc->pid);
5489 		return -EINVAL;
5490 	}
5491 
5492 	/* This ioctl may only be used by the context manager */
5493 	mutex_lock(&context->context_mgr_node_lock);
5494 	if (!context->binder_context_mgr_node ||
5495 		context->binder_context_mgr_node->proc != proc) {
5496 		mutex_unlock(&context->context_mgr_node_lock);
5497 		return -EPERM;
5498 	}
5499 	mutex_unlock(&context->context_mgr_node_lock);
5500 
5501 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5502 	if (!node)
5503 		return -EINVAL;
5504 
5505 	info->strong_count = node->local_strong_refs +
5506 		node->internal_strong_refs;
5507 	info->weak_count = node->local_weak_refs;
5508 
5509 	binder_put_node(node);
5510 
5511 	return 0;
5512 }
5513 
5514 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5515 				struct binder_node_debug_info *info)
5516 {
5517 	struct rb_node *n;
5518 	binder_uintptr_t ptr = info->ptr;
5519 
5520 	memset(info, 0, sizeof(*info));
5521 
5522 	binder_inner_proc_lock(proc);
5523 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5524 		struct binder_node *node = rb_entry(n, struct binder_node,
5525 						    rb_node);
5526 		if (node->ptr > ptr) {
5527 			info->ptr = node->ptr;
5528 			info->cookie = node->cookie;
5529 			info->has_strong_ref = node->has_strong_ref;
5530 			info->has_weak_ref = node->has_weak_ref;
5531 			break;
5532 		}
5533 	}
5534 	binder_inner_proc_unlock(proc);
5535 
5536 	return 0;
5537 }
5538 
5539 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5540 {
5541 	struct rb_node *n;
5542 	struct binder_thread *thread;
5543 
5544 	if (proc->outstanding_txns > 0)
5545 		return true;
5546 
5547 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5548 		thread = rb_entry(n, struct binder_thread, rb_node);
5549 		if (thread->transaction_stack)
5550 			return true;
5551 	}
5552 	return false;
5553 }
5554 
5555 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5556 {
5557 	struct binder_node *prev = NULL;
5558 	struct rb_node *n;
5559 	struct binder_ref *ref;
5560 
5561 	binder_inner_proc_lock(proc);
5562 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5563 		struct binder_node *node;
5564 
5565 		node = rb_entry(n, struct binder_node, rb_node);
5566 		binder_inc_node_tmpref_ilocked(node);
5567 		binder_inner_proc_unlock(proc);
5568 		if (prev)
5569 			binder_put_node(prev);
5570 		binder_node_lock(node);
5571 		hlist_for_each_entry(ref, &node->refs, node_entry) {
5572 			/*
5573 			 * Need the node lock to synchronize
5574 			 * with new notification requests and the
5575 			 * inner lock to synchronize with queued
5576 			 * freeze notifications.
5577 			 */
5578 			binder_inner_proc_lock(ref->proc);
5579 			if (!ref->freeze) {
5580 				binder_inner_proc_unlock(ref->proc);
5581 				continue;
5582 			}
5583 			ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5584 			if (list_empty(&ref->freeze->work.entry)) {
5585 				ref->freeze->is_frozen = is_frozen;
5586 				binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5587 				binder_wakeup_proc_ilocked(ref->proc);
5588 			} else {
5589 				if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5590 					ref->freeze->resend = true;
5591 				ref->freeze->is_frozen = is_frozen;
5592 			}
5593 			binder_inner_proc_unlock(ref->proc);
5594 		}
5595 		prev = node;
5596 		binder_node_unlock(node);
5597 		binder_inner_proc_lock(proc);
5598 		if (proc->is_dead)
5599 			break;
5600 	}
5601 	binder_inner_proc_unlock(proc);
5602 	if (prev)
5603 		binder_put_node(prev);
5604 }
5605 
5606 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5607 			       struct binder_proc *target_proc)
5608 {
5609 	int ret = 0;
5610 
5611 	if (!info->enable) {
5612 		binder_inner_proc_lock(target_proc);
5613 		target_proc->sync_recv = false;
5614 		target_proc->async_recv = false;
5615 		target_proc->is_frozen = false;
5616 		binder_inner_proc_unlock(target_proc);
5617 		binder_add_freeze_work(target_proc, false);
5618 		return 0;
5619 	}
5620 
5621 	/*
5622 	 * Freezing the target. Prevent new transactions by
5623 	 * setting frozen state. If timeout specified, wait
5624 	 * for transactions to drain.
5625 	 */
5626 	binder_inner_proc_lock(target_proc);
5627 	target_proc->sync_recv = false;
5628 	target_proc->async_recv = false;
5629 	target_proc->is_frozen = true;
5630 	binder_inner_proc_unlock(target_proc);
5631 
5632 	if (info->timeout_ms > 0)
5633 		ret = wait_event_interruptible_timeout(
5634 			target_proc->freeze_wait,
5635 			(!target_proc->outstanding_txns),
5636 			msecs_to_jiffies(info->timeout_ms));
5637 
5638 	/* Check pending transactions that wait for reply */
5639 	if (ret >= 0) {
5640 		binder_inner_proc_lock(target_proc);
5641 		if (binder_txns_pending_ilocked(target_proc))
5642 			ret = -EAGAIN;
5643 		binder_inner_proc_unlock(target_proc);
5644 	}
5645 
5646 	if (ret < 0) {
5647 		binder_inner_proc_lock(target_proc);
5648 		target_proc->is_frozen = false;
5649 		binder_inner_proc_unlock(target_proc);
5650 	} else {
5651 		binder_add_freeze_work(target_proc, true);
5652 	}
5653 
5654 	return ret;
5655 }
5656 
5657 static int binder_ioctl_get_freezer_info(
5658 				struct binder_frozen_status_info *info)
5659 {
5660 	struct binder_proc *target_proc;
5661 	bool found = false;
5662 	__u32 txns_pending;
5663 
5664 	info->sync_recv = 0;
5665 	info->async_recv = 0;
5666 
5667 	mutex_lock(&binder_procs_lock);
5668 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5669 		if (target_proc->pid == info->pid) {
5670 			found = true;
5671 			binder_inner_proc_lock(target_proc);
5672 			txns_pending = binder_txns_pending_ilocked(target_proc);
5673 			info->sync_recv |= target_proc->sync_recv |
5674 					(txns_pending << 1);
5675 			info->async_recv |= target_proc->async_recv;
5676 			binder_inner_proc_unlock(target_proc);
5677 		}
5678 	}
5679 	mutex_unlock(&binder_procs_lock);
5680 
5681 	if (!found)
5682 		return -EINVAL;
5683 
5684 	return 0;
5685 }
5686 
5687 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5688 					   void __user *ubuf)
5689 {
5690 	struct binder_extended_error ee;
5691 
5692 	binder_inner_proc_lock(thread->proc);
5693 	ee = thread->ee;
5694 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5695 	binder_inner_proc_unlock(thread->proc);
5696 
5697 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5698 		return -EFAULT;
5699 
5700 	return 0;
5701 }
5702 
5703 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5704 {
5705 	int ret;
5706 	struct binder_proc *proc = filp->private_data;
5707 	struct binder_thread *thread;
5708 	void __user *ubuf = (void __user *)arg;
5709 
5710 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5711 			proc->pid, current->pid, cmd, arg);*/
5712 
5713 	binder_selftest_alloc(&proc->alloc);
5714 
5715 	trace_binder_ioctl(cmd, arg);
5716 
5717 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5718 	if (ret)
5719 		goto err_unlocked;
5720 
5721 	thread = binder_get_thread(proc);
5722 	if (thread == NULL) {
5723 		ret = -ENOMEM;
5724 		goto err;
5725 	}
5726 
5727 	switch (cmd) {
5728 	case BINDER_WRITE_READ:
5729 		ret = binder_ioctl_write_read(filp, arg, thread);
5730 		if (ret)
5731 			goto err;
5732 		break;
5733 	case BINDER_SET_MAX_THREADS: {
5734 		u32 max_threads;
5735 
5736 		if (copy_from_user(&max_threads, ubuf,
5737 				   sizeof(max_threads))) {
5738 			ret = -EINVAL;
5739 			goto err;
5740 		}
5741 		binder_inner_proc_lock(proc);
5742 		proc->max_threads = max_threads;
5743 		binder_inner_proc_unlock(proc);
5744 		break;
5745 	}
5746 	case BINDER_SET_CONTEXT_MGR_EXT: {
5747 		struct flat_binder_object fbo;
5748 
5749 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5750 			ret = -EINVAL;
5751 			goto err;
5752 		}
5753 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5754 		if (ret)
5755 			goto err;
5756 		break;
5757 	}
5758 	case BINDER_SET_CONTEXT_MGR:
5759 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5760 		if (ret)
5761 			goto err;
5762 		break;
5763 	case BINDER_THREAD_EXIT:
5764 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5765 			     proc->pid, thread->pid);
5766 		binder_thread_release(proc, thread);
5767 		thread = NULL;
5768 		break;
5769 	case BINDER_VERSION: {
5770 		struct binder_version __user *ver = ubuf;
5771 
5772 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5773 			     &ver->protocol_version)) {
5774 			ret = -EINVAL;
5775 			goto err;
5776 		}
5777 		break;
5778 	}
5779 	case BINDER_GET_NODE_INFO_FOR_REF: {
5780 		struct binder_node_info_for_ref info;
5781 
5782 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5783 			ret = -EFAULT;
5784 			goto err;
5785 		}
5786 
5787 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5788 		if (ret < 0)
5789 			goto err;
5790 
5791 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5792 			ret = -EFAULT;
5793 			goto err;
5794 		}
5795 
5796 		break;
5797 	}
5798 	case BINDER_GET_NODE_DEBUG_INFO: {
5799 		struct binder_node_debug_info info;
5800 
5801 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5802 			ret = -EFAULT;
5803 			goto err;
5804 		}
5805 
5806 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5807 		if (ret < 0)
5808 			goto err;
5809 
5810 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5811 			ret = -EFAULT;
5812 			goto err;
5813 		}
5814 		break;
5815 	}
5816 	case BINDER_FREEZE: {
5817 		struct binder_freeze_info info;
5818 		struct binder_proc **target_procs = NULL, *target_proc;
5819 		int target_procs_count = 0, i = 0;
5820 
5821 		ret = 0;
5822 
5823 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5824 			ret = -EFAULT;
5825 			goto err;
5826 		}
5827 
5828 		mutex_lock(&binder_procs_lock);
5829 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5830 			if (target_proc->pid == info.pid)
5831 				target_procs_count++;
5832 		}
5833 
5834 		if (target_procs_count == 0) {
5835 			mutex_unlock(&binder_procs_lock);
5836 			ret = -EINVAL;
5837 			goto err;
5838 		}
5839 
5840 		target_procs = kcalloc(target_procs_count,
5841 				       sizeof(struct binder_proc *),
5842 				       GFP_KERNEL);
5843 
5844 		if (!target_procs) {
5845 			mutex_unlock(&binder_procs_lock);
5846 			ret = -ENOMEM;
5847 			goto err;
5848 		}
5849 
5850 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5851 			if (target_proc->pid != info.pid)
5852 				continue;
5853 
5854 			binder_inner_proc_lock(target_proc);
5855 			target_proc->tmp_ref++;
5856 			binder_inner_proc_unlock(target_proc);
5857 
5858 			target_procs[i++] = target_proc;
5859 		}
5860 		mutex_unlock(&binder_procs_lock);
5861 
5862 		for (i = 0; i < target_procs_count; i++) {
5863 			if (ret >= 0)
5864 				ret = binder_ioctl_freeze(&info,
5865 							  target_procs[i]);
5866 
5867 			binder_proc_dec_tmpref(target_procs[i]);
5868 		}
5869 
5870 		kfree(target_procs);
5871 
5872 		if (ret < 0)
5873 			goto err;
5874 		break;
5875 	}
5876 	case BINDER_GET_FROZEN_INFO: {
5877 		struct binder_frozen_status_info info;
5878 
5879 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5880 			ret = -EFAULT;
5881 			goto err;
5882 		}
5883 
5884 		ret = binder_ioctl_get_freezer_info(&info);
5885 		if (ret < 0)
5886 			goto err;
5887 
5888 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5889 			ret = -EFAULT;
5890 			goto err;
5891 		}
5892 		break;
5893 	}
5894 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5895 		uint32_t enable;
5896 
5897 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5898 			ret = -EFAULT;
5899 			goto err;
5900 		}
5901 		binder_inner_proc_lock(proc);
5902 		proc->oneway_spam_detection_enabled = (bool)enable;
5903 		binder_inner_proc_unlock(proc);
5904 		break;
5905 	}
5906 	case BINDER_GET_EXTENDED_ERROR:
5907 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5908 		if (ret < 0)
5909 			goto err;
5910 		break;
5911 	default:
5912 		ret = -EINVAL;
5913 		goto err;
5914 	}
5915 	ret = 0;
5916 err:
5917 	if (thread)
5918 		thread->looper_need_return = false;
5919 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5920 	if (ret && ret != -EINTR)
5921 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5922 err_unlocked:
5923 	trace_binder_ioctl_done(ret);
5924 	return ret;
5925 }
5926 
5927 static void binder_vma_open(struct vm_area_struct *vma)
5928 {
5929 	struct binder_proc *proc = vma->vm_private_data;
5930 
5931 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5932 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5933 		     proc->pid, vma->vm_start, vma->vm_end,
5934 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5935 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5936 }
5937 
5938 static void binder_vma_close(struct vm_area_struct *vma)
5939 {
5940 	struct binder_proc *proc = vma->vm_private_data;
5941 
5942 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5943 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5944 		     proc->pid, vma->vm_start, vma->vm_end,
5945 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5946 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5947 	binder_alloc_vma_close(&proc->alloc);
5948 }
5949 
5950 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5951 {
5952 	return VM_FAULT_SIGBUS;
5953 }
5954 
5955 static const struct vm_operations_struct binder_vm_ops = {
5956 	.open = binder_vma_open,
5957 	.close = binder_vma_close,
5958 	.fault = binder_vm_fault,
5959 };
5960 
5961 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5962 {
5963 	struct binder_proc *proc = filp->private_data;
5964 
5965 	if (proc->tsk != current->group_leader)
5966 		return -EINVAL;
5967 
5968 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5969 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5970 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5971 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5972 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5973 
5974 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5975 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5976 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5977 		return -EPERM;
5978 	}
5979 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5980 
5981 	vma->vm_ops = &binder_vm_ops;
5982 	vma->vm_private_data = proc;
5983 
5984 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5985 }
5986 
5987 static int binder_open(struct inode *nodp, struct file *filp)
5988 {
5989 	struct binder_proc *proc, *itr;
5990 	struct binder_device *binder_dev;
5991 	struct binderfs_info *info;
5992 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5993 	bool existing_pid = false;
5994 
5995 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5996 		     current->group_leader->pid, current->pid);
5997 
5998 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5999 	if (proc == NULL)
6000 		return -ENOMEM;
6001 
6002 	dbitmap_init(&proc->dmap);
6003 	spin_lock_init(&proc->inner_lock);
6004 	spin_lock_init(&proc->outer_lock);
6005 	get_task_struct(current->group_leader);
6006 	proc->tsk = current->group_leader;
6007 	proc->cred = get_cred(filp->f_cred);
6008 	INIT_LIST_HEAD(&proc->todo);
6009 	init_waitqueue_head(&proc->freeze_wait);
6010 	proc->default_priority = task_nice(current);
6011 	/* binderfs stashes devices in i_private */
6012 	if (is_binderfs_device(nodp)) {
6013 		binder_dev = nodp->i_private;
6014 		info = nodp->i_sb->s_fs_info;
6015 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
6016 	} else {
6017 		binder_dev = container_of(filp->private_data,
6018 					  struct binder_device, miscdev);
6019 	}
6020 	refcount_inc(&binder_dev->ref);
6021 	proc->context = &binder_dev->context;
6022 	binder_alloc_init(&proc->alloc);
6023 
6024 	binder_stats_created(BINDER_STAT_PROC);
6025 	proc->pid = current->group_leader->pid;
6026 	INIT_LIST_HEAD(&proc->delivered_death);
6027 	INIT_LIST_HEAD(&proc->delivered_freeze);
6028 	INIT_LIST_HEAD(&proc->waiting_threads);
6029 	filp->private_data = proc;
6030 
6031 	mutex_lock(&binder_procs_lock);
6032 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6033 		if (itr->pid == proc->pid) {
6034 			existing_pid = true;
6035 			break;
6036 		}
6037 	}
6038 	hlist_add_head(&proc->proc_node, &binder_procs);
6039 	mutex_unlock(&binder_procs_lock);
6040 
6041 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
6042 		char strbuf[11];
6043 
6044 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6045 		/*
6046 		 * proc debug entries are shared between contexts.
6047 		 * Only create for the first PID to avoid debugfs log spamming
6048 		 * The printing code will anyway print all contexts for a given
6049 		 * PID so this is not a problem.
6050 		 */
6051 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6052 			binder_debugfs_dir_entry_proc,
6053 			(void *)(unsigned long)proc->pid,
6054 			&proc_fops);
6055 	}
6056 
6057 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
6058 		char strbuf[11];
6059 		struct dentry *binderfs_entry;
6060 
6061 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6062 		/*
6063 		 * Similar to debugfs, the process specific log file is shared
6064 		 * between contexts. Only create for the first PID.
6065 		 * This is ok since same as debugfs, the log file will contain
6066 		 * information on all contexts of a given PID.
6067 		 */
6068 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6069 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6070 		if (!IS_ERR(binderfs_entry)) {
6071 			proc->binderfs_entry = binderfs_entry;
6072 		} else {
6073 			int error;
6074 
6075 			error = PTR_ERR(binderfs_entry);
6076 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
6077 				strbuf, error);
6078 		}
6079 	}
6080 
6081 	return 0;
6082 }
6083 
6084 static int binder_flush(struct file *filp, fl_owner_t id)
6085 {
6086 	struct binder_proc *proc = filp->private_data;
6087 
6088 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6089 
6090 	return 0;
6091 }
6092 
6093 static void binder_deferred_flush(struct binder_proc *proc)
6094 {
6095 	struct rb_node *n;
6096 	int wake_count = 0;
6097 
6098 	binder_inner_proc_lock(proc);
6099 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6100 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6101 
6102 		thread->looper_need_return = true;
6103 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6104 			wake_up_interruptible(&thread->wait);
6105 			wake_count++;
6106 		}
6107 	}
6108 	binder_inner_proc_unlock(proc);
6109 
6110 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6111 		     "binder_flush: %d woke %d threads\n", proc->pid,
6112 		     wake_count);
6113 }
6114 
6115 static int binder_release(struct inode *nodp, struct file *filp)
6116 {
6117 	struct binder_proc *proc = filp->private_data;
6118 
6119 	debugfs_remove(proc->debugfs_entry);
6120 
6121 	if (proc->binderfs_entry) {
6122 		binderfs_remove_file(proc->binderfs_entry);
6123 		proc->binderfs_entry = NULL;
6124 	}
6125 
6126 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6127 
6128 	return 0;
6129 }
6130 
6131 static int binder_node_release(struct binder_node *node, int refs)
6132 {
6133 	struct binder_ref *ref;
6134 	int death = 0;
6135 	struct binder_proc *proc = node->proc;
6136 
6137 	binder_release_work(proc, &node->async_todo);
6138 
6139 	binder_node_lock(node);
6140 	binder_inner_proc_lock(proc);
6141 	binder_dequeue_work_ilocked(&node->work);
6142 	/*
6143 	 * The caller must have taken a temporary ref on the node,
6144 	 */
6145 	BUG_ON(!node->tmp_refs);
6146 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6147 		binder_inner_proc_unlock(proc);
6148 		binder_node_unlock(node);
6149 		binder_free_node(node);
6150 
6151 		return refs;
6152 	}
6153 
6154 	node->proc = NULL;
6155 	node->local_strong_refs = 0;
6156 	node->local_weak_refs = 0;
6157 	binder_inner_proc_unlock(proc);
6158 
6159 	spin_lock(&binder_dead_nodes_lock);
6160 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
6161 	spin_unlock(&binder_dead_nodes_lock);
6162 
6163 	hlist_for_each_entry(ref, &node->refs, node_entry) {
6164 		refs++;
6165 		/*
6166 		 * Need the node lock to synchronize
6167 		 * with new notification requests and the
6168 		 * inner lock to synchronize with queued
6169 		 * death notifications.
6170 		 */
6171 		binder_inner_proc_lock(ref->proc);
6172 		if (!ref->death) {
6173 			binder_inner_proc_unlock(ref->proc);
6174 			continue;
6175 		}
6176 
6177 		death++;
6178 
6179 		BUG_ON(!list_empty(&ref->death->work.entry));
6180 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6181 		binder_enqueue_work_ilocked(&ref->death->work,
6182 					    &ref->proc->todo);
6183 		binder_wakeup_proc_ilocked(ref->proc);
6184 		binder_inner_proc_unlock(ref->proc);
6185 	}
6186 
6187 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
6188 		     "node %d now dead, refs %d, death %d\n",
6189 		     node->debug_id, refs, death);
6190 	binder_node_unlock(node);
6191 	binder_put_node(node);
6192 
6193 	return refs;
6194 }
6195 
6196 static void binder_deferred_release(struct binder_proc *proc)
6197 {
6198 	struct binder_context *context = proc->context;
6199 	struct rb_node *n;
6200 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6201 
6202 	mutex_lock(&binder_procs_lock);
6203 	hlist_del(&proc->proc_node);
6204 	mutex_unlock(&binder_procs_lock);
6205 
6206 	mutex_lock(&context->context_mgr_node_lock);
6207 	if (context->binder_context_mgr_node &&
6208 	    context->binder_context_mgr_node->proc == proc) {
6209 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
6210 			     "%s: %d context_mgr_node gone\n",
6211 			     __func__, proc->pid);
6212 		context->binder_context_mgr_node = NULL;
6213 	}
6214 	mutex_unlock(&context->context_mgr_node_lock);
6215 	binder_inner_proc_lock(proc);
6216 	/*
6217 	 * Make sure proc stays alive after we
6218 	 * remove all the threads
6219 	 */
6220 	proc->tmp_ref++;
6221 
6222 	proc->is_dead = true;
6223 	proc->is_frozen = false;
6224 	proc->sync_recv = false;
6225 	proc->async_recv = false;
6226 	threads = 0;
6227 	active_transactions = 0;
6228 	while ((n = rb_first(&proc->threads))) {
6229 		struct binder_thread *thread;
6230 
6231 		thread = rb_entry(n, struct binder_thread, rb_node);
6232 		binder_inner_proc_unlock(proc);
6233 		threads++;
6234 		active_transactions += binder_thread_release(proc, thread);
6235 		binder_inner_proc_lock(proc);
6236 	}
6237 
6238 	nodes = 0;
6239 	incoming_refs = 0;
6240 	while ((n = rb_first(&proc->nodes))) {
6241 		struct binder_node *node;
6242 
6243 		node = rb_entry(n, struct binder_node, rb_node);
6244 		nodes++;
6245 		/*
6246 		 * take a temporary ref on the node before
6247 		 * calling binder_node_release() which will either
6248 		 * kfree() the node or call binder_put_node()
6249 		 */
6250 		binder_inc_node_tmpref_ilocked(node);
6251 		rb_erase(&node->rb_node, &proc->nodes);
6252 		binder_inner_proc_unlock(proc);
6253 		incoming_refs = binder_node_release(node, incoming_refs);
6254 		binder_inner_proc_lock(proc);
6255 	}
6256 	binder_inner_proc_unlock(proc);
6257 
6258 	outgoing_refs = 0;
6259 	binder_proc_lock(proc);
6260 	while ((n = rb_first(&proc->refs_by_desc))) {
6261 		struct binder_ref *ref;
6262 
6263 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
6264 		outgoing_refs++;
6265 		binder_cleanup_ref_olocked(ref);
6266 		binder_proc_unlock(proc);
6267 		binder_free_ref(ref);
6268 		binder_proc_lock(proc);
6269 	}
6270 	binder_proc_unlock(proc);
6271 
6272 	binder_release_work(proc, &proc->todo);
6273 	binder_release_work(proc, &proc->delivered_death);
6274 	binder_release_work(proc, &proc->delivered_freeze);
6275 
6276 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6277 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6278 		     __func__, proc->pid, threads, nodes, incoming_refs,
6279 		     outgoing_refs, active_transactions);
6280 
6281 	binder_proc_dec_tmpref(proc);
6282 }
6283 
6284 static void binder_deferred_func(struct work_struct *work)
6285 {
6286 	struct binder_proc *proc;
6287 
6288 	int defer;
6289 
6290 	do {
6291 		mutex_lock(&binder_deferred_lock);
6292 		if (!hlist_empty(&binder_deferred_list)) {
6293 			proc = hlist_entry(binder_deferred_list.first,
6294 					struct binder_proc, deferred_work_node);
6295 			hlist_del_init(&proc->deferred_work_node);
6296 			defer = proc->deferred_work;
6297 			proc->deferred_work = 0;
6298 		} else {
6299 			proc = NULL;
6300 			defer = 0;
6301 		}
6302 		mutex_unlock(&binder_deferred_lock);
6303 
6304 		if (defer & BINDER_DEFERRED_FLUSH)
6305 			binder_deferred_flush(proc);
6306 
6307 		if (defer & BINDER_DEFERRED_RELEASE)
6308 			binder_deferred_release(proc); /* frees proc */
6309 	} while (proc);
6310 }
6311 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6312 
6313 static void
6314 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6315 {
6316 	guard(mutex)(&binder_deferred_lock);
6317 	proc->deferred_work |= defer;
6318 	if (hlist_unhashed(&proc->deferred_work_node)) {
6319 		hlist_add_head(&proc->deferred_work_node,
6320 				&binder_deferred_list);
6321 		schedule_work(&binder_deferred_work);
6322 	}
6323 }
6324 
6325 static void print_binder_transaction_ilocked(struct seq_file *m,
6326 					     struct binder_proc *proc,
6327 					     const char *prefix,
6328 					     struct binder_transaction *t)
6329 {
6330 	struct binder_proc *to_proc;
6331 	struct binder_buffer *buffer = t->buffer;
6332 	ktime_t current_time = ktime_get();
6333 
6334 	spin_lock(&t->lock);
6335 	to_proc = t->to_proc;
6336 	seq_printf(m,
6337 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6338 		   prefix, t->debug_id, t,
6339 		   t->from_pid,
6340 		   t->from_tid,
6341 		   to_proc ? to_proc->pid : 0,
6342 		   t->to_thread ? t->to_thread->pid : 0,
6343 		   t->code, t->flags, t->priority, t->need_reply,
6344 		   ktime_ms_delta(current_time, t->start_time));
6345 	spin_unlock(&t->lock);
6346 
6347 	if (proc != to_proc) {
6348 		/*
6349 		 * Can only safely deref buffer if we are holding the
6350 		 * correct proc inner lock for this node
6351 		 */
6352 		seq_puts(m, "\n");
6353 		return;
6354 	}
6355 
6356 	if (buffer == NULL) {
6357 		seq_puts(m, " buffer free\n");
6358 		return;
6359 	}
6360 	if (buffer->target_node)
6361 		seq_printf(m, " node %d", buffer->target_node->debug_id);
6362 	seq_printf(m, " size %zd:%zd offset %lx\n",
6363 		   buffer->data_size, buffer->offsets_size,
6364 		   buffer->user_data - proc->alloc.vm_start);
6365 }
6366 
6367 static void print_binder_work_ilocked(struct seq_file *m,
6368 				      struct binder_proc *proc,
6369 				      const char *prefix,
6370 				      const char *transaction_prefix,
6371 				      struct binder_work *w, bool hash_ptrs)
6372 {
6373 	struct binder_node *node;
6374 	struct binder_transaction *t;
6375 
6376 	switch (w->type) {
6377 	case BINDER_WORK_TRANSACTION:
6378 		t = container_of(w, struct binder_transaction, work);
6379 		print_binder_transaction_ilocked(
6380 				m, proc, transaction_prefix, t);
6381 		break;
6382 	case BINDER_WORK_RETURN_ERROR: {
6383 		struct binder_error *e = container_of(
6384 				w, struct binder_error, work);
6385 
6386 		seq_printf(m, "%stransaction error: %u\n",
6387 			   prefix, e->cmd);
6388 	} break;
6389 	case BINDER_WORK_TRANSACTION_COMPLETE:
6390 		seq_printf(m, "%stransaction complete\n", prefix);
6391 		break;
6392 	case BINDER_WORK_NODE:
6393 		node = container_of(w, struct binder_node, work);
6394 		if (hash_ptrs)
6395 			seq_printf(m, "%snode work %d: u%p c%p\n",
6396 				   prefix, node->debug_id,
6397 				   (void *)(long)node->ptr,
6398 				   (void *)(long)node->cookie);
6399 		else
6400 			seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6401 				   prefix, node->debug_id,
6402 				   (u64)node->ptr, (u64)node->cookie);
6403 		break;
6404 	case BINDER_WORK_DEAD_BINDER:
6405 		seq_printf(m, "%shas dead binder\n", prefix);
6406 		break;
6407 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6408 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6409 		break;
6410 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6411 		seq_printf(m, "%shas cleared death notification\n", prefix);
6412 		break;
6413 	case BINDER_WORK_FROZEN_BINDER:
6414 		seq_printf(m, "%shas frozen binder\n", prefix);
6415 		break;
6416 	case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6417 		seq_printf(m, "%shas cleared freeze notification\n", prefix);
6418 		break;
6419 	default:
6420 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6421 		break;
6422 	}
6423 }
6424 
6425 static void print_binder_thread_ilocked(struct seq_file *m,
6426 					struct binder_thread *thread,
6427 					bool print_always, bool hash_ptrs)
6428 {
6429 	struct binder_transaction *t;
6430 	struct binder_work *w;
6431 	size_t start_pos = m->count;
6432 	size_t header_pos;
6433 
6434 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6435 			thread->pid, thread->looper,
6436 			thread->looper_need_return,
6437 			atomic_read(&thread->tmp_ref));
6438 	header_pos = m->count;
6439 	t = thread->transaction_stack;
6440 	while (t) {
6441 		if (t->from == thread) {
6442 			print_binder_transaction_ilocked(m, thread->proc,
6443 					"    outgoing transaction", t);
6444 			t = t->from_parent;
6445 		} else if (t->to_thread == thread) {
6446 			print_binder_transaction_ilocked(m, thread->proc,
6447 						 "    incoming transaction", t);
6448 			t = t->to_parent;
6449 		} else {
6450 			print_binder_transaction_ilocked(m, thread->proc,
6451 					"    bad transaction", t);
6452 			t = NULL;
6453 		}
6454 	}
6455 	list_for_each_entry(w, &thread->todo, entry) {
6456 		print_binder_work_ilocked(m, thread->proc, "    ",
6457 					  "    pending transaction",
6458 					  w, hash_ptrs);
6459 	}
6460 	if (!print_always && m->count == header_pos)
6461 		m->count = start_pos;
6462 }
6463 
6464 static void print_binder_node_nilocked(struct seq_file *m,
6465 				       struct binder_node *node,
6466 				       bool hash_ptrs)
6467 {
6468 	struct binder_ref *ref;
6469 	struct binder_work *w;
6470 	int count;
6471 
6472 	count = hlist_count_nodes(&node->refs);
6473 
6474 	if (hash_ptrs)
6475 		seq_printf(m, "  node %d: u%p c%p", node->debug_id,
6476 			   (void *)(long)node->ptr, (void *)(long)node->cookie);
6477 	else
6478 		seq_printf(m, "  node %d: u%016llx c%016llx", node->debug_id,
6479 			   (u64)node->ptr, (u64)node->cookie);
6480 	seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6481 		   node->has_strong_ref, node->has_weak_ref,
6482 		   node->local_strong_refs, node->local_weak_refs,
6483 		   node->internal_strong_refs, count, node->tmp_refs);
6484 	if (count) {
6485 		seq_puts(m, " proc");
6486 		hlist_for_each_entry(ref, &node->refs, node_entry)
6487 			seq_printf(m, " %d", ref->proc->pid);
6488 	}
6489 	seq_puts(m, "\n");
6490 	if (node->proc) {
6491 		list_for_each_entry(w, &node->async_todo, entry)
6492 			print_binder_work_ilocked(m, node->proc, "    ",
6493 					  "    pending async transaction",
6494 					  w, hash_ptrs);
6495 	}
6496 }
6497 
6498 static void print_binder_ref_olocked(struct seq_file *m,
6499 				     struct binder_ref *ref)
6500 {
6501 	binder_node_lock(ref->node);
6502 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6503 		   ref->data.debug_id, ref->data.desc,
6504 		   ref->node->proc ? "" : "dead ",
6505 		   ref->node->debug_id, ref->data.strong,
6506 		   ref->data.weak, ref->death);
6507 	binder_node_unlock(ref->node);
6508 }
6509 
6510 /**
6511  * print_next_binder_node_ilocked() - Print binder_node from a locked list
6512  * @m:          struct seq_file for output via seq_printf()
6513  * @proc:       struct binder_proc we hold the inner_proc_lock to (if any)
6514  * @node:       struct binder_node to print fields of
6515  * @prev_node:	struct binder_node we hold a temporary reference to (if any)
6516  * @hash_ptrs:  whether to hash @node's binder_uintptr_t fields
6517  *
6518  * Helper function to handle synchronization around printing a struct
6519  * binder_node while iterating through @proc->nodes or the dead nodes list.
6520  * Caller must hold either @proc->inner_lock (for live nodes) or
6521  * binder_dead_nodes_lock. This lock will be released during the body of this
6522  * function, but it will be reacquired before returning to the caller.
6523  *
6524  * Return:	pointer to the struct binder_node we hold a tmpref on
6525  */
6526 static struct binder_node *
6527 print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6528 			       struct binder_node *node,
6529 			       struct binder_node *prev_node, bool hash_ptrs)
6530 {
6531 	/*
6532 	 * Take a temporary reference on the node so that isn't freed while
6533 	 * we print it.
6534 	 */
6535 	binder_inc_node_tmpref_ilocked(node);
6536 	/*
6537 	 * Live nodes need to drop the inner proc lock and dead nodes need to
6538 	 * drop the binder_dead_nodes_lock before trying to take the node lock.
6539 	 */
6540 	if (proc)
6541 		binder_inner_proc_unlock(proc);
6542 	else
6543 		spin_unlock(&binder_dead_nodes_lock);
6544 	if (prev_node)
6545 		binder_put_node(prev_node);
6546 	binder_node_inner_lock(node);
6547 	print_binder_node_nilocked(m, node, hash_ptrs);
6548 	binder_node_inner_unlock(node);
6549 	if (proc)
6550 		binder_inner_proc_lock(proc);
6551 	else
6552 		spin_lock(&binder_dead_nodes_lock);
6553 	return node;
6554 }
6555 
6556 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6557 			      bool print_all, bool hash_ptrs)
6558 {
6559 	struct binder_work *w;
6560 	struct rb_node *n;
6561 	size_t start_pos = m->count;
6562 	size_t header_pos;
6563 	struct binder_node *last_node = NULL;
6564 
6565 	seq_printf(m, "proc %d\n", proc->pid);
6566 	seq_printf(m, "context %s\n", proc->context->name);
6567 	header_pos = m->count;
6568 
6569 	binder_inner_proc_lock(proc);
6570 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6571 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6572 						rb_node), print_all, hash_ptrs);
6573 
6574 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6575 		struct binder_node *node = rb_entry(n, struct binder_node,
6576 						    rb_node);
6577 		if (!print_all && !node->has_async_transaction)
6578 			continue;
6579 
6580 		last_node = print_next_binder_node_ilocked(m, proc, node,
6581 							   last_node,
6582 							   hash_ptrs);
6583 	}
6584 	binder_inner_proc_unlock(proc);
6585 	if (last_node)
6586 		binder_put_node(last_node);
6587 
6588 	if (print_all) {
6589 		binder_proc_lock(proc);
6590 		for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6591 			print_binder_ref_olocked(m, rb_entry(n,
6592 							     struct binder_ref,
6593 							     rb_node_desc));
6594 		binder_proc_unlock(proc);
6595 	}
6596 	binder_alloc_print_allocated(m, &proc->alloc);
6597 	binder_inner_proc_lock(proc);
6598 	list_for_each_entry(w, &proc->todo, entry)
6599 		print_binder_work_ilocked(m, proc, "  ",
6600 					  "  pending transaction", w,
6601 					  hash_ptrs);
6602 	list_for_each_entry(w, &proc->delivered_death, entry) {
6603 		seq_puts(m, "  has delivered dead binder\n");
6604 		break;
6605 	}
6606 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
6607 		seq_puts(m, "  has delivered freeze binder\n");
6608 		break;
6609 	}
6610 	binder_inner_proc_unlock(proc);
6611 	if (!print_all && m->count == header_pos)
6612 		m->count = start_pos;
6613 }
6614 
6615 static const char * const binder_return_strings[] = {
6616 	"BR_ERROR",
6617 	"BR_OK",
6618 	"BR_TRANSACTION",
6619 	"BR_REPLY",
6620 	"BR_ACQUIRE_RESULT",
6621 	"BR_DEAD_REPLY",
6622 	"BR_TRANSACTION_COMPLETE",
6623 	"BR_INCREFS",
6624 	"BR_ACQUIRE",
6625 	"BR_RELEASE",
6626 	"BR_DECREFS",
6627 	"BR_ATTEMPT_ACQUIRE",
6628 	"BR_NOOP",
6629 	"BR_SPAWN_LOOPER",
6630 	"BR_FINISHED",
6631 	"BR_DEAD_BINDER",
6632 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6633 	"BR_FAILED_REPLY",
6634 	"BR_FROZEN_REPLY",
6635 	"BR_ONEWAY_SPAM_SUSPECT",
6636 	"BR_TRANSACTION_PENDING_FROZEN",
6637 	"BR_FROZEN_BINDER",
6638 	"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6639 };
6640 
6641 static const char * const binder_command_strings[] = {
6642 	"BC_TRANSACTION",
6643 	"BC_REPLY",
6644 	"BC_ACQUIRE_RESULT",
6645 	"BC_FREE_BUFFER",
6646 	"BC_INCREFS",
6647 	"BC_ACQUIRE",
6648 	"BC_RELEASE",
6649 	"BC_DECREFS",
6650 	"BC_INCREFS_DONE",
6651 	"BC_ACQUIRE_DONE",
6652 	"BC_ATTEMPT_ACQUIRE",
6653 	"BC_REGISTER_LOOPER",
6654 	"BC_ENTER_LOOPER",
6655 	"BC_EXIT_LOOPER",
6656 	"BC_REQUEST_DEATH_NOTIFICATION",
6657 	"BC_CLEAR_DEATH_NOTIFICATION",
6658 	"BC_DEAD_BINDER_DONE",
6659 	"BC_TRANSACTION_SG",
6660 	"BC_REPLY_SG",
6661 	"BC_REQUEST_FREEZE_NOTIFICATION",
6662 	"BC_CLEAR_FREEZE_NOTIFICATION",
6663 	"BC_FREEZE_NOTIFICATION_DONE",
6664 };
6665 
6666 static const char * const binder_objstat_strings[] = {
6667 	"proc",
6668 	"thread",
6669 	"node",
6670 	"ref",
6671 	"death",
6672 	"transaction",
6673 	"transaction_complete",
6674 	"freeze",
6675 };
6676 
6677 static void print_binder_stats(struct seq_file *m, const char *prefix,
6678 			       struct binder_stats *stats)
6679 {
6680 	int i;
6681 
6682 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6683 		     ARRAY_SIZE(binder_command_strings));
6684 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6685 		int temp = atomic_read(&stats->bc[i]);
6686 
6687 		if (temp)
6688 			seq_printf(m, "%s%s: %d\n", prefix,
6689 				   binder_command_strings[i], temp);
6690 	}
6691 
6692 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6693 		     ARRAY_SIZE(binder_return_strings));
6694 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6695 		int temp = atomic_read(&stats->br[i]);
6696 
6697 		if (temp)
6698 			seq_printf(m, "%s%s: %d\n", prefix,
6699 				   binder_return_strings[i], temp);
6700 	}
6701 
6702 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6703 		     ARRAY_SIZE(binder_objstat_strings));
6704 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6705 		     ARRAY_SIZE(stats->obj_deleted));
6706 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6707 		int created = atomic_read(&stats->obj_created[i]);
6708 		int deleted = atomic_read(&stats->obj_deleted[i]);
6709 
6710 		if (created || deleted)
6711 			seq_printf(m, "%s%s: active %d total %d\n",
6712 				prefix,
6713 				binder_objstat_strings[i],
6714 				created - deleted,
6715 				created);
6716 	}
6717 }
6718 
6719 static void print_binder_proc_stats(struct seq_file *m,
6720 				    struct binder_proc *proc)
6721 {
6722 	struct binder_work *w;
6723 	struct binder_thread *thread;
6724 	struct rb_node *n;
6725 	int count, strong, weak, ready_threads;
6726 	size_t free_async_space =
6727 		binder_alloc_get_free_async_space(&proc->alloc);
6728 
6729 	seq_printf(m, "proc %d\n", proc->pid);
6730 	seq_printf(m, "context %s\n", proc->context->name);
6731 	count = 0;
6732 	ready_threads = 0;
6733 	binder_inner_proc_lock(proc);
6734 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6735 		count++;
6736 
6737 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6738 		ready_threads++;
6739 
6740 	seq_printf(m, "  threads: %d\n", count);
6741 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6742 			"  ready threads %d\n"
6743 			"  free async space %zd\n", proc->requested_threads,
6744 			proc->requested_threads_started, proc->max_threads,
6745 			ready_threads,
6746 			free_async_space);
6747 	count = 0;
6748 	for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6749 		count++;
6750 	binder_inner_proc_unlock(proc);
6751 	seq_printf(m, "  nodes: %d\n", count);
6752 	count = 0;
6753 	strong = 0;
6754 	weak = 0;
6755 	binder_proc_lock(proc);
6756 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6757 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6758 						  rb_node_desc);
6759 		count++;
6760 		strong += ref->data.strong;
6761 		weak += ref->data.weak;
6762 	}
6763 	binder_proc_unlock(proc);
6764 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6765 
6766 	count = binder_alloc_get_allocated_count(&proc->alloc);
6767 	seq_printf(m, "  buffers: %d\n", count);
6768 
6769 	binder_alloc_print_pages(m, &proc->alloc);
6770 
6771 	count = 0;
6772 	binder_inner_proc_lock(proc);
6773 	list_for_each_entry(w, &proc->todo, entry) {
6774 		if (w->type == BINDER_WORK_TRANSACTION)
6775 			count++;
6776 	}
6777 	binder_inner_proc_unlock(proc);
6778 	seq_printf(m, "  pending transactions: %d\n", count);
6779 
6780 	print_binder_stats(m, "  ", &proc->stats);
6781 }
6782 
6783 static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6784 {
6785 	struct binder_proc *proc;
6786 	struct binder_node *node;
6787 	struct binder_node *last_node = NULL;
6788 
6789 	seq_puts(m, "binder state:\n");
6790 
6791 	spin_lock(&binder_dead_nodes_lock);
6792 	if (!hlist_empty(&binder_dead_nodes))
6793 		seq_puts(m, "dead nodes:\n");
6794 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6795 		last_node = print_next_binder_node_ilocked(m, NULL, node,
6796 							   last_node,
6797 							   hash_ptrs);
6798 	spin_unlock(&binder_dead_nodes_lock);
6799 	if (last_node)
6800 		binder_put_node(last_node);
6801 
6802 	mutex_lock(&binder_procs_lock);
6803 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6804 		print_binder_proc(m, proc, true, hash_ptrs);
6805 	mutex_unlock(&binder_procs_lock);
6806 }
6807 
6808 static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6809 {
6810 	struct binder_proc *proc;
6811 
6812 	seq_puts(m, "binder transactions:\n");
6813 	mutex_lock(&binder_procs_lock);
6814 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6815 		print_binder_proc(m, proc, false, hash_ptrs);
6816 	mutex_unlock(&binder_procs_lock);
6817 }
6818 
6819 static int state_show(struct seq_file *m, void *unused)
6820 {
6821 	print_binder_state(m, false);
6822 	return 0;
6823 }
6824 
6825 static int state_hashed_show(struct seq_file *m, void *unused)
6826 {
6827 	print_binder_state(m, true);
6828 	return 0;
6829 }
6830 
6831 static int stats_show(struct seq_file *m, void *unused)
6832 {
6833 	struct binder_proc *proc;
6834 
6835 	seq_puts(m, "binder stats:\n");
6836 
6837 	print_binder_stats(m, "", &binder_stats);
6838 
6839 	mutex_lock(&binder_procs_lock);
6840 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6841 		print_binder_proc_stats(m, proc);
6842 	mutex_unlock(&binder_procs_lock);
6843 
6844 	return 0;
6845 }
6846 
6847 static int transactions_show(struct seq_file *m, void *unused)
6848 {
6849 	print_binder_transactions(m, false);
6850 	return 0;
6851 }
6852 
6853 static int transactions_hashed_show(struct seq_file *m, void *unused)
6854 {
6855 	print_binder_transactions(m, true);
6856 	return 0;
6857 }
6858 
6859 static int proc_show(struct seq_file *m, void *unused)
6860 {
6861 	struct binder_proc *itr;
6862 	int pid = (unsigned long)m->private;
6863 
6864 	guard(mutex)(&binder_procs_lock);
6865 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6866 		if (itr->pid == pid) {
6867 			seq_puts(m, "binder proc state:\n");
6868 			print_binder_proc(m, itr, true, false);
6869 		}
6870 	}
6871 
6872 	return 0;
6873 }
6874 
6875 static void print_binder_transaction_log_entry(struct seq_file *m,
6876 					struct binder_transaction_log_entry *e)
6877 {
6878 	int debug_id = READ_ONCE(e->debug_id_done);
6879 	/*
6880 	 * read barrier to guarantee debug_id_done read before
6881 	 * we print the log values
6882 	 */
6883 	smp_rmb();
6884 	seq_printf(m,
6885 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6886 		   e->debug_id, (e->call_type == 2) ? "reply" :
6887 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6888 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6889 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6890 		   e->return_error, e->return_error_param,
6891 		   e->return_error_line);
6892 	/*
6893 	 * read-barrier to guarantee read of debug_id_done after
6894 	 * done printing the fields of the entry
6895 	 */
6896 	smp_rmb();
6897 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6898 			"\n" : " (incomplete)\n");
6899 }
6900 
6901 static int transaction_log_show(struct seq_file *m, void *unused)
6902 {
6903 	struct binder_transaction_log *log = m->private;
6904 	unsigned int log_cur = atomic_read(&log->cur);
6905 	unsigned int count;
6906 	unsigned int cur;
6907 	int i;
6908 
6909 	count = log_cur + 1;
6910 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6911 		0 : count % ARRAY_SIZE(log->entry);
6912 	if (count > ARRAY_SIZE(log->entry) || log->full)
6913 		count = ARRAY_SIZE(log->entry);
6914 	for (i = 0; i < count; i++) {
6915 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6916 
6917 		print_binder_transaction_log_entry(m, &log->entry[index]);
6918 	}
6919 	return 0;
6920 }
6921 
6922 const struct file_operations binder_fops = {
6923 	.owner = THIS_MODULE,
6924 	.poll = binder_poll,
6925 	.unlocked_ioctl = binder_ioctl,
6926 	.compat_ioctl = compat_ptr_ioctl,
6927 	.mmap = binder_mmap,
6928 	.open = binder_open,
6929 	.flush = binder_flush,
6930 	.release = binder_release,
6931 };
6932 
6933 DEFINE_SHOW_ATTRIBUTE(state);
6934 DEFINE_SHOW_ATTRIBUTE(state_hashed);
6935 DEFINE_SHOW_ATTRIBUTE(stats);
6936 DEFINE_SHOW_ATTRIBUTE(transactions);
6937 DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6938 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6939 
6940 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6941 	{
6942 		.name = "state",
6943 		.mode = 0444,
6944 		.fops = &state_fops,
6945 		.data = NULL,
6946 	},
6947 	{
6948 		.name = "state_hashed",
6949 		.mode = 0444,
6950 		.fops = &state_hashed_fops,
6951 		.data = NULL,
6952 	},
6953 	{
6954 		.name = "stats",
6955 		.mode = 0444,
6956 		.fops = &stats_fops,
6957 		.data = NULL,
6958 	},
6959 	{
6960 		.name = "transactions",
6961 		.mode = 0444,
6962 		.fops = &transactions_fops,
6963 		.data = NULL,
6964 	},
6965 	{
6966 		.name = "transactions_hashed",
6967 		.mode = 0444,
6968 		.fops = &transactions_hashed_fops,
6969 		.data = NULL,
6970 	},
6971 	{
6972 		.name = "transaction_log",
6973 		.mode = 0444,
6974 		.fops = &transaction_log_fops,
6975 		.data = &binder_transaction_log,
6976 	},
6977 	{
6978 		.name = "failed_transaction_log",
6979 		.mode = 0444,
6980 		.fops = &transaction_log_fops,
6981 		.data = &binder_transaction_log_failed,
6982 	},
6983 	{} /* terminator */
6984 };
6985 
6986 void binder_add_device(struct binder_device *device)
6987 {
6988 	guard(spinlock)(&binder_devices_lock);
6989 	hlist_add_head(&device->hlist, &binder_devices);
6990 }
6991 
6992 void binder_remove_device(struct binder_device *device)
6993 {
6994 	guard(spinlock)(&binder_devices_lock);
6995 	hlist_del_init(&device->hlist);
6996 }
6997 
6998 static int __init init_binder_device(const char *name)
6999 {
7000 	int ret;
7001 	struct binder_device *binder_device;
7002 
7003 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7004 	if (!binder_device)
7005 		return -ENOMEM;
7006 
7007 	binder_device->miscdev.fops = &binder_fops;
7008 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7009 	binder_device->miscdev.name = name;
7010 
7011 	refcount_set(&binder_device->ref, 1);
7012 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
7013 	binder_device->context.name = name;
7014 	mutex_init(&binder_device->context.context_mgr_node_lock);
7015 
7016 	ret = misc_register(&binder_device->miscdev);
7017 	if (ret < 0) {
7018 		kfree(binder_device);
7019 		return ret;
7020 	}
7021 
7022 	binder_add_device(binder_device);
7023 
7024 	return ret;
7025 }
7026 
7027 static int __init binder_init(void)
7028 {
7029 	int ret;
7030 	char *device_name, *device_tmp;
7031 	struct binder_device *device;
7032 	struct hlist_node *tmp;
7033 	char *device_names = NULL;
7034 	const struct binder_debugfs_entry *db_entry;
7035 
7036 	ret = binder_alloc_shrinker_init();
7037 	if (ret)
7038 		return ret;
7039 
7040 	atomic_set(&binder_transaction_log.cur, ~0U);
7041 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
7042 
7043 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7044 
7045 	binder_for_each_debugfs_entry(db_entry)
7046 		debugfs_create_file(db_entry->name,
7047 					db_entry->mode,
7048 					binder_debugfs_dir_entry_root,
7049 					db_entry->data,
7050 					db_entry->fops);
7051 
7052 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7053 						binder_debugfs_dir_entry_root);
7054 
7055 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7056 	    strcmp(binder_devices_param, "") != 0) {
7057 		/*
7058 		* Copy the module_parameter string, because we don't want to
7059 		* tokenize it in-place.
7060 		 */
7061 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7062 		if (!device_names) {
7063 			ret = -ENOMEM;
7064 			goto err_alloc_device_names_failed;
7065 		}
7066 
7067 		device_tmp = device_names;
7068 		while ((device_name = strsep(&device_tmp, ","))) {
7069 			ret = init_binder_device(device_name);
7070 			if (ret)
7071 				goto err_init_binder_device_failed;
7072 		}
7073 	}
7074 
7075 	ret = init_binderfs();
7076 	if (ret)
7077 		goto err_init_binder_device_failed;
7078 
7079 	return ret;
7080 
7081 err_init_binder_device_failed:
7082 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7083 		misc_deregister(&device->miscdev);
7084 		binder_remove_device(device);
7085 		kfree(device);
7086 	}
7087 
7088 	kfree(device_names);
7089 
7090 err_alloc_device_names_failed:
7091 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7092 	binder_alloc_shrinker_exit();
7093 
7094 	return ret;
7095 }
7096 
7097 device_initcall(binder_init);
7098 
7099 #define CREATE_TRACE_POINTS
7100 #include "binder_trace.h"
7101 
7102 MODULE_LICENSE("GPL v2");
7103