xref: /linux/drivers/android/binder.c (revision 1769f90e5ba2a6d24bb46b85da33fe861c68f005)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <kunit/visibility.h>
72 
73 #include <uapi/linux/android/binder.h>
74 
75 #include <linux/cacheflush.h>
76 
77 #include "binder_netlink.h"
78 #include "binder_internal.h"
79 #include "binder_trace.h"
80 
81 static HLIST_HEAD(binder_deferred_list);
82 static DEFINE_MUTEX(binder_deferred_lock);
83 
84 static HLIST_HEAD(binder_devices);
85 static DEFINE_SPINLOCK(binder_devices_lock);
86 
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89 
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92 
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96 
97 static int proc_show(struct seq_file *m, void *unused);
98 DEFINE_SHOW_ATTRIBUTE(proc);
99 
100 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
101 
102 enum {
103 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
104 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
105 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
106 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
107 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
108 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
109 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
110 	BINDER_DEBUG_USER_REFS              = 1U << 7,
111 	BINDER_DEBUG_THREADS                = 1U << 8,
112 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
113 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
114 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
115 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
116 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
117 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
118 };
119 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122 
123 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124 module_param_named(devices, binder_devices_param, charp, 0444);
125 
126 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127 static int binder_stop_on_user_error;
128 
129 static int binder_set_stop_on_user_error(const char *val,
130 					 const struct kernel_param *kp)
131 {
132 	int ret;
133 
134 	ret = param_set_int(val, kp);
135 	if (binder_stop_on_user_error < 2)
136 		wake_up(&binder_user_error_wait);
137 	return ret;
138 }
139 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 	param_get_int, &binder_stop_on_user_error, 0644);
141 
142 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
143 {
144 	struct va_format vaf;
145 	va_list args;
146 
147 	if (binder_debug_mask & mask) {
148 		va_start(args, format);
149 		vaf.va = &args;
150 		vaf.fmt = format;
151 		pr_info_ratelimited("%pV", &vaf);
152 		va_end(args);
153 	}
154 }
155 
156 #define binder_txn_error(x...) \
157 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
158 
159 static __printf(1, 2) void binder_user_error(const char *format, ...)
160 {
161 	struct va_format vaf;
162 	va_list args;
163 
164 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
165 		va_start(args, format);
166 		vaf.va = &args;
167 		vaf.fmt = format;
168 		pr_info_ratelimited("%pV", &vaf);
169 		va_end(args);
170 	}
171 
172 	if (binder_stop_on_user_error)
173 		binder_stop_on_user_error = 2;
174 }
175 
176 #define binder_set_extended_error(ee, _id, _command, _param) \
177 	do { \
178 		(ee)->id = _id; \
179 		(ee)->command = _command; \
180 		(ee)->param = _param; \
181 	} while (0)
182 
183 #define to_flat_binder_object(hdr) \
184 	container_of(hdr, struct flat_binder_object, hdr)
185 
186 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187 
188 #define to_binder_buffer_object(hdr) \
189 	container_of(hdr, struct binder_buffer_object, hdr)
190 
191 #define to_binder_fd_array_object(hdr) \
192 	container_of(hdr, struct binder_fd_array_object, hdr)
193 
194 static struct binder_stats binder_stats;
195 
196 static inline void binder_stats_deleted(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_deleted[type]);
199 }
200 
201 static inline void binder_stats_created(enum binder_stat_types type)
202 {
203 	atomic_inc(&binder_stats.obj_created[type]);
204 }
205 
206 struct binder_transaction_log_entry {
207 	int debug_id;
208 	int debug_id_done;
209 	int call_type;
210 	int from_proc;
211 	int from_thread;
212 	int target_handle;
213 	int to_proc;
214 	int to_thread;
215 	int to_node;
216 	int data_size;
217 	int offsets_size;
218 	int return_error_line;
219 	uint32_t return_error;
220 	uint32_t return_error_param;
221 	char context_name[BINDERFS_MAX_NAME + 1];
222 };
223 
224 struct binder_transaction_log {
225 	atomic_t cur;
226 	bool full;
227 	struct binder_transaction_log_entry entry[32];
228 };
229 
230 static struct binder_transaction_log binder_transaction_log;
231 static struct binder_transaction_log binder_transaction_log_failed;
232 
233 static struct binder_transaction_log_entry *binder_transaction_log_add(
234 	struct binder_transaction_log *log)
235 {
236 	struct binder_transaction_log_entry *e;
237 	unsigned int cur = atomic_inc_return(&log->cur);
238 
239 	if (cur >= ARRAY_SIZE(log->entry))
240 		log->full = true;
241 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242 	WRITE_ONCE(e->debug_id_done, 0);
243 	/*
244 	 * write-barrier to synchronize access to e->debug_id_done.
245 	 * We make sure the initialized 0 value is seen before
246 	 * memset() other fields are zeroed by memset.
247 	 */
248 	smp_wmb();
249 	memset(e, 0, sizeof(*e));
250 	return e;
251 }
252 
253 enum binder_deferred_state {
254 	BINDER_DEFERRED_FLUSH        = 0x01,
255 	BINDER_DEFERRED_RELEASE      = 0x02,
256 };
257 
258 enum {
259 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
260 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
261 	BINDER_LOOPER_STATE_EXITED      = 0x04,
262 	BINDER_LOOPER_STATE_INVALID     = 0x08,
263 	BINDER_LOOPER_STATE_WAITING     = 0x10,
264 	BINDER_LOOPER_STATE_POLL        = 0x20,
265 };
266 
267 /**
268  * binder_proc_lock() - Acquire outer lock for given binder_proc
269  * @proc:         struct binder_proc to acquire
270  *
271  * Acquires proc->outer_lock. Used to protect binder_ref
272  * structures associated with the given proc.
273  */
274 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
275 static void
276 _binder_proc_lock(struct binder_proc *proc, int line)
277 	__acquires(&proc->outer_lock)
278 {
279 	binder_debug(BINDER_DEBUG_SPINLOCKS,
280 		     "%s: line=%d\n", __func__, line);
281 	spin_lock(&proc->outer_lock);
282 }
283 
284 /**
285  * binder_proc_unlock() - Release outer lock for given binder_proc
286  * @proc:                struct binder_proc to acquire
287  *
288  * Release lock acquired via binder_proc_lock()
289  */
290 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
291 static void
292 _binder_proc_unlock(struct binder_proc *proc, int line)
293 	__releases(&proc->outer_lock)
294 {
295 	binder_debug(BINDER_DEBUG_SPINLOCKS,
296 		     "%s: line=%d\n", __func__, line);
297 	spin_unlock(&proc->outer_lock);
298 }
299 
300 /**
301  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
302  * @proc:         struct binder_proc to acquire
303  *
304  * Acquires proc->inner_lock. Used to protect todo lists
305  */
306 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
307 static void
308 _binder_inner_proc_lock(struct binder_proc *proc, int line)
309 	__acquires(&proc->inner_lock)
310 {
311 	binder_debug(BINDER_DEBUG_SPINLOCKS,
312 		     "%s: line=%d\n", __func__, line);
313 	spin_lock(&proc->inner_lock);
314 }
315 
316 /**
317  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
318  * @proc:         struct binder_proc to acquire
319  *
320  * Release lock acquired via binder_inner_proc_lock()
321  */
322 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
323 static void
324 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
325 	__releases(&proc->inner_lock)
326 {
327 	binder_debug(BINDER_DEBUG_SPINLOCKS,
328 		     "%s: line=%d\n", __func__, line);
329 	spin_unlock(&proc->inner_lock);
330 }
331 
332 /**
333  * binder_node_lock() - Acquire spinlock for given binder_node
334  * @node:         struct binder_node to acquire
335  *
336  * Acquires node->lock. Used to protect binder_node fields
337  */
338 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
339 static void
340 _binder_node_lock(struct binder_node *node, int line)
341 	__acquires(&node->lock)
342 {
343 	binder_debug(BINDER_DEBUG_SPINLOCKS,
344 		     "%s: line=%d\n", __func__, line);
345 	spin_lock(&node->lock);
346 }
347 
348 /**
349  * binder_node_unlock() - Release spinlock for given binder_proc
350  * @node:         struct binder_node to acquire
351  *
352  * Release lock acquired via binder_node_lock()
353  */
354 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
355 static void
356 _binder_node_unlock(struct binder_node *node, int line)
357 	__releases(&node->lock)
358 {
359 	binder_debug(BINDER_DEBUG_SPINLOCKS,
360 		     "%s: line=%d\n", __func__, line);
361 	spin_unlock(&node->lock);
362 }
363 
364 /**
365  * binder_node_inner_lock() - Acquire node and inner locks
366  * @node:         struct binder_node to acquire
367  *
368  * Acquires node->lock. If node->proc also acquires
369  * proc->inner_lock. Used to protect binder_node fields
370  */
371 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
372 static void
373 _binder_node_inner_lock(struct binder_node *node, int line)
374 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
375 {
376 	binder_debug(BINDER_DEBUG_SPINLOCKS,
377 		     "%s: line=%d\n", __func__, line);
378 	spin_lock(&node->lock);
379 	if (node->proc)
380 		binder_inner_proc_lock(node->proc);
381 	else
382 		/* annotation for sparse */
383 		__acquire(&node->proc->inner_lock);
384 }
385 
386 /**
387  * binder_node_inner_unlock() - Release node and inner locks
388  * @node:         struct binder_node to acquire
389  *
390  * Release lock acquired via binder_node_lock()
391  */
392 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
393 static void
394 _binder_node_inner_unlock(struct binder_node *node, int line)
395 	__releases(&node->lock) __releases(&node->proc->inner_lock)
396 {
397 	struct binder_proc *proc = node->proc;
398 
399 	binder_debug(BINDER_DEBUG_SPINLOCKS,
400 		     "%s: line=%d\n", __func__, line);
401 	if (proc)
402 		binder_inner_proc_unlock(proc);
403 	else
404 		/* annotation for sparse */
405 		__release(&node->proc->inner_lock);
406 	spin_unlock(&node->lock);
407 }
408 
409 static bool binder_worklist_empty_ilocked(struct list_head *list)
410 {
411 	return list_empty(list);
412 }
413 
414 /**
415  * binder_worklist_empty() - Check if no items on the work list
416  * @proc:       binder_proc associated with list
417  * @list:	list to check
418  *
419  * Return: true if there are no items on list, else false
420  */
421 static bool binder_worklist_empty(struct binder_proc *proc,
422 				  struct list_head *list)
423 {
424 	bool ret;
425 
426 	binder_inner_proc_lock(proc);
427 	ret = binder_worklist_empty_ilocked(list);
428 	binder_inner_proc_unlock(proc);
429 	return ret;
430 }
431 
432 /**
433  * binder_enqueue_work_ilocked() - Add an item to the work list
434  * @work:         struct binder_work to add to list
435  * @target_list:  list to add work to
436  *
437  * Adds the work to the specified list. Asserts that work
438  * is not already on a list.
439  *
440  * Requires the proc->inner_lock to be held.
441  */
442 static void
443 binder_enqueue_work_ilocked(struct binder_work *work,
444 			   struct list_head *target_list)
445 {
446 	BUG_ON(target_list == NULL);
447 	BUG_ON(work->entry.next && !list_empty(&work->entry));
448 	list_add_tail(&work->entry, target_list);
449 }
450 
451 /**
452  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
453  * @thread:       thread to queue work to
454  * @work:         struct binder_work to add to list
455  *
456  * Adds the work to the todo list of the thread. Doesn't set the process_todo
457  * flag, which means that (if it wasn't already set) the thread will go to
458  * sleep without handling this work when it calls read.
459  *
460  * Requires the proc->inner_lock to be held.
461  */
462 static void
463 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
464 					    struct binder_work *work)
465 {
466 	WARN_ON(!list_empty(&thread->waiting_thread_node));
467 	binder_enqueue_work_ilocked(work, &thread->todo);
468 }
469 
470 /**
471  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
472  * @thread:       thread to queue work to
473  * @work:         struct binder_work to add to list
474  *
475  * Adds the work to the todo list of the thread, and enables processing
476  * of the todo queue.
477  *
478  * Requires the proc->inner_lock to be held.
479  */
480 static void
481 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
482 				   struct binder_work *work)
483 {
484 	WARN_ON(!list_empty(&thread->waiting_thread_node));
485 	binder_enqueue_work_ilocked(work, &thread->todo);
486 
487 	/* (e)poll-based threads require an explicit wakeup signal when
488 	 * queuing their own work; they rely on these events to consume
489 	 * messages without I/O block. Without it, threads risk waiting
490 	 * indefinitely without handling the work.
491 	 */
492 	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
493 	    thread->pid == current->pid && !thread->process_todo)
494 		wake_up_interruptible_sync(&thread->wait);
495 
496 	thread->process_todo = true;
497 }
498 
499 /**
500  * binder_enqueue_thread_work() - Add an item to the thread work list
501  * @thread:       thread to queue work to
502  * @work:         struct binder_work to add to list
503  *
504  * Adds the work to the todo list of the thread, and enables processing
505  * of the todo queue.
506  */
507 static void
508 binder_enqueue_thread_work(struct binder_thread *thread,
509 			   struct binder_work *work)
510 {
511 	binder_inner_proc_lock(thread->proc);
512 	binder_enqueue_thread_work_ilocked(thread, work);
513 	binder_inner_proc_unlock(thread->proc);
514 }
515 
516 static void
517 binder_dequeue_work_ilocked(struct binder_work *work)
518 {
519 	list_del_init(&work->entry);
520 }
521 
522 /**
523  * binder_dequeue_work() - Removes an item from the work list
524  * @proc:         binder_proc associated with list
525  * @work:         struct binder_work to remove from list
526  *
527  * Removes the specified work item from whatever list it is on.
528  * Can safely be called if work is not on any list.
529  */
530 static void
531 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
532 {
533 	binder_inner_proc_lock(proc);
534 	binder_dequeue_work_ilocked(work);
535 	binder_inner_proc_unlock(proc);
536 }
537 
538 static struct binder_work *binder_dequeue_work_head_ilocked(
539 					struct list_head *list)
540 {
541 	struct binder_work *w;
542 
543 	w = list_first_entry_or_null(list, struct binder_work, entry);
544 	if (w)
545 		list_del_init(&w->entry);
546 	return w;
547 }
548 
549 static void
550 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
551 static void binder_free_thread(struct binder_thread *thread);
552 static void binder_free_proc(struct binder_proc *proc);
553 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
554 
555 static bool binder_has_work_ilocked(struct binder_thread *thread,
556 				    bool do_proc_work)
557 {
558 	return thread->process_todo ||
559 		thread->looper_need_return ||
560 		(do_proc_work &&
561 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
562 }
563 
564 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565 {
566 	bool has_work;
567 
568 	binder_inner_proc_lock(thread->proc);
569 	has_work = binder_has_work_ilocked(thread, do_proc_work);
570 	binder_inner_proc_unlock(thread->proc);
571 
572 	return has_work;
573 }
574 
575 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576 {
577 	return !thread->transaction_stack &&
578 		binder_worklist_empty_ilocked(&thread->todo);
579 }
580 
581 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582 					       bool sync)
583 {
584 	struct rb_node *n;
585 	struct binder_thread *thread;
586 
587 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588 		thread = rb_entry(n, struct binder_thread, rb_node);
589 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590 		    binder_available_for_proc_work_ilocked(thread)) {
591 			if (sync)
592 				wake_up_interruptible_sync(&thread->wait);
593 			else
594 				wake_up_interruptible(&thread->wait);
595 		}
596 	}
597 }
598 
599 /**
600  * binder_select_thread_ilocked() - selects a thread for doing proc work.
601  * @proc:	process to select a thread from
602  *
603  * Note that calling this function moves the thread off the waiting_threads
604  * list, so it can only be woken up by the caller of this function, or a
605  * signal. Therefore, callers *should* always wake up the thread this function
606  * returns.
607  *
608  * Return:	If there's a thread currently waiting for process work,
609  *		returns that thread. Otherwise returns NULL.
610  */
611 static struct binder_thread *
612 binder_select_thread_ilocked(struct binder_proc *proc)
613 {
614 	struct binder_thread *thread;
615 
616 	assert_spin_locked(&proc->inner_lock);
617 	thread = list_first_entry_or_null(&proc->waiting_threads,
618 					  struct binder_thread,
619 					  waiting_thread_node);
620 
621 	if (thread)
622 		list_del_init(&thread->waiting_thread_node);
623 
624 	return thread;
625 }
626 
627 /**
628  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629  * @proc:	process to wake up a thread in
630  * @thread:	specific thread to wake-up (may be NULL)
631  * @sync:	whether to do a synchronous wake-up
632  *
633  * This function wakes up a thread in the @proc process.
634  * The caller may provide a specific thread to wake-up in
635  * the @thread parameter. If @thread is NULL, this function
636  * will wake up threads that have called poll().
637  *
638  * Note that for this function to work as expected, callers
639  * should first call binder_select_thread() to find a thread
640  * to handle the work (if they don't have a thread already),
641  * and pass the result into the @thread parameter.
642  */
643 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644 					 struct binder_thread *thread,
645 					 bool sync)
646 {
647 	assert_spin_locked(&proc->inner_lock);
648 
649 	if (thread) {
650 		if (sync)
651 			wake_up_interruptible_sync(&thread->wait);
652 		else
653 			wake_up_interruptible(&thread->wait);
654 		return;
655 	}
656 
657 	/* Didn't find a thread waiting for proc work; this can happen
658 	 * in two scenarios:
659 	 * 1. All threads are busy handling transactions
660 	 *    In that case, one of those threads should call back into
661 	 *    the kernel driver soon and pick up this work.
662 	 * 2. Threads are using the (e)poll interface, in which case
663 	 *    they may be blocked on the waitqueue without having been
664 	 *    added to waiting_threads. For this case, we just iterate
665 	 *    over all threads not handling transaction work, and
666 	 *    wake them all up. We wake all because we don't know whether
667 	 *    a thread that called into (e)poll is handling non-binder
668 	 *    work currently.
669 	 */
670 	binder_wakeup_poll_threads_ilocked(proc, sync);
671 }
672 
673 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674 {
675 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
676 
677 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678 }
679 
680 static void binder_set_nice(long nice)
681 {
682 	long min_nice;
683 
684 	if (can_nice(current, nice)) {
685 		set_user_nice(current, nice);
686 		return;
687 	}
688 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
689 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
690 		     "%d: nice value %ld not allowed use %ld instead\n",
691 		      current->pid, nice, min_nice);
692 	set_user_nice(current, min_nice);
693 	if (min_nice <= MAX_NICE)
694 		return;
695 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
696 }
697 
698 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
699 						   binder_uintptr_t ptr)
700 {
701 	struct rb_node *n = proc->nodes.rb_node;
702 	struct binder_node *node;
703 
704 	assert_spin_locked(&proc->inner_lock);
705 
706 	while (n) {
707 		node = rb_entry(n, struct binder_node, rb_node);
708 
709 		if (ptr < node->ptr)
710 			n = n->rb_left;
711 		else if (ptr > node->ptr)
712 			n = n->rb_right;
713 		else {
714 			/*
715 			 * take an implicit weak reference
716 			 * to ensure node stays alive until
717 			 * call to binder_put_node()
718 			 */
719 			binder_inc_node_tmpref_ilocked(node);
720 			return node;
721 		}
722 	}
723 	return NULL;
724 }
725 
726 static struct binder_node *binder_get_node(struct binder_proc *proc,
727 					   binder_uintptr_t ptr)
728 {
729 	struct binder_node *node;
730 
731 	binder_inner_proc_lock(proc);
732 	node = binder_get_node_ilocked(proc, ptr);
733 	binder_inner_proc_unlock(proc);
734 	return node;
735 }
736 
737 static struct binder_node *binder_init_node_ilocked(
738 						struct binder_proc *proc,
739 						struct binder_node *new_node,
740 						struct flat_binder_object *fp)
741 {
742 	struct rb_node **p = &proc->nodes.rb_node;
743 	struct rb_node *parent = NULL;
744 	struct binder_node *node;
745 	binder_uintptr_t ptr = fp ? fp->binder : 0;
746 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
747 	__u32 flags = fp ? fp->flags : 0;
748 
749 	assert_spin_locked(&proc->inner_lock);
750 
751 	while (*p) {
752 
753 		parent = *p;
754 		node = rb_entry(parent, struct binder_node, rb_node);
755 
756 		if (ptr < node->ptr)
757 			p = &(*p)->rb_left;
758 		else if (ptr > node->ptr)
759 			p = &(*p)->rb_right;
760 		else {
761 			/*
762 			 * A matching node is already in
763 			 * the rb tree. Abandon the init
764 			 * and return it.
765 			 */
766 			binder_inc_node_tmpref_ilocked(node);
767 			return node;
768 		}
769 	}
770 	node = new_node;
771 	binder_stats_created(BINDER_STAT_NODE);
772 	node->tmp_refs++;
773 	rb_link_node(&node->rb_node, parent, p);
774 	rb_insert_color(&node->rb_node, &proc->nodes);
775 	node->debug_id = atomic_inc_return(&binder_last_id);
776 	node->proc = proc;
777 	node->ptr = ptr;
778 	node->cookie = cookie;
779 	node->work.type = BINDER_WORK_NODE;
780 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
781 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
782 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
783 	spin_lock_init(&node->lock);
784 	INIT_LIST_HEAD(&node->work.entry);
785 	INIT_LIST_HEAD(&node->async_todo);
786 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
787 		     "%d:%d node %d u%016llx c%016llx created\n",
788 		     proc->pid, current->pid, node->debug_id,
789 		     (u64)node->ptr, (u64)node->cookie);
790 
791 	return node;
792 }
793 
794 static struct binder_node *binder_new_node(struct binder_proc *proc,
795 					   struct flat_binder_object *fp)
796 {
797 	struct binder_node *node;
798 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
799 
800 	if (!new_node)
801 		return NULL;
802 	binder_inner_proc_lock(proc);
803 	node = binder_init_node_ilocked(proc, new_node, fp);
804 	binder_inner_proc_unlock(proc);
805 	if (node != new_node)
806 		/*
807 		 * The node was already added by another thread
808 		 */
809 		kfree(new_node);
810 
811 	return node;
812 }
813 
814 static void binder_free_node(struct binder_node *node)
815 {
816 	kfree(node);
817 	binder_stats_deleted(BINDER_STAT_NODE);
818 }
819 
820 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
821 				    int internal,
822 				    struct list_head *target_list)
823 {
824 	struct binder_proc *proc = node->proc;
825 
826 	assert_spin_locked(&node->lock);
827 	if (proc)
828 		assert_spin_locked(&proc->inner_lock);
829 	if (strong) {
830 		if (internal) {
831 			if (target_list == NULL &&
832 			    node->internal_strong_refs == 0 &&
833 			    !(node->proc &&
834 			      node == node->proc->context->binder_context_mgr_node &&
835 			      node->has_strong_ref)) {
836 				pr_err("invalid inc strong node for %d\n",
837 					node->debug_id);
838 				return -EINVAL;
839 			}
840 			node->internal_strong_refs++;
841 		} else
842 			node->local_strong_refs++;
843 		if (!node->has_strong_ref && target_list) {
844 			struct binder_thread *thread = container_of(target_list,
845 						    struct binder_thread, todo);
846 			binder_dequeue_work_ilocked(&node->work);
847 			BUG_ON(&thread->todo != target_list);
848 			binder_enqueue_deferred_thread_work_ilocked(thread,
849 								   &node->work);
850 		}
851 	} else {
852 		if (!internal)
853 			node->local_weak_refs++;
854 		if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
855 			binder_enqueue_work_ilocked(&node->work, target_list);
856 	}
857 	return 0;
858 }
859 
860 static int binder_inc_node(struct binder_node *node, int strong, int internal,
861 			   struct list_head *target_list)
862 {
863 	int ret;
864 
865 	binder_node_inner_lock(node);
866 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
867 	binder_node_inner_unlock(node);
868 
869 	return ret;
870 }
871 
872 static bool binder_dec_node_nilocked(struct binder_node *node,
873 				     int strong, int internal)
874 {
875 	struct binder_proc *proc = node->proc;
876 
877 	assert_spin_locked(&node->lock);
878 	if (proc)
879 		assert_spin_locked(&proc->inner_lock);
880 	if (strong) {
881 		if (internal)
882 			node->internal_strong_refs--;
883 		else
884 			node->local_strong_refs--;
885 		if (node->local_strong_refs || node->internal_strong_refs)
886 			return false;
887 	} else {
888 		if (!internal)
889 			node->local_weak_refs--;
890 		if (node->local_weak_refs || node->tmp_refs ||
891 				!hlist_empty(&node->refs))
892 			return false;
893 	}
894 
895 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
896 		if (list_empty(&node->work.entry)) {
897 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
898 			binder_wakeup_proc_ilocked(proc);
899 		}
900 	} else {
901 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
902 		    !node->local_weak_refs && !node->tmp_refs) {
903 			if (proc) {
904 				binder_dequeue_work_ilocked(&node->work);
905 				rb_erase(&node->rb_node, &proc->nodes);
906 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
907 					     "refless node %d deleted\n",
908 					     node->debug_id);
909 			} else {
910 				BUG_ON(!list_empty(&node->work.entry));
911 				spin_lock(&binder_dead_nodes_lock);
912 				/*
913 				 * tmp_refs could have changed so
914 				 * check it again
915 				 */
916 				if (node->tmp_refs) {
917 					spin_unlock(&binder_dead_nodes_lock);
918 					return false;
919 				}
920 				hlist_del(&node->dead_node);
921 				spin_unlock(&binder_dead_nodes_lock);
922 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
923 					     "dead node %d deleted\n",
924 					     node->debug_id);
925 			}
926 			return true;
927 		}
928 	}
929 	return false;
930 }
931 
932 static void binder_dec_node(struct binder_node *node, int strong, int internal)
933 {
934 	bool free_node;
935 
936 	binder_node_inner_lock(node);
937 	free_node = binder_dec_node_nilocked(node, strong, internal);
938 	binder_node_inner_unlock(node);
939 	if (free_node)
940 		binder_free_node(node);
941 }
942 
943 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
944 {
945 	/*
946 	 * No call to binder_inc_node() is needed since we
947 	 * don't need to inform userspace of any changes to
948 	 * tmp_refs
949 	 */
950 	node->tmp_refs++;
951 }
952 
953 /**
954  * binder_inc_node_tmpref() - take a temporary reference on node
955  * @node:	node to reference
956  *
957  * Take reference on node to prevent the node from being freed
958  * while referenced only by a local variable. The inner lock is
959  * needed to serialize with the node work on the queue (which
960  * isn't needed after the node is dead). If the node is dead
961  * (node->proc is NULL), use binder_dead_nodes_lock to protect
962  * node->tmp_refs against dead-node-only cases where the node
963  * lock cannot be acquired (eg traversing the dead node list to
964  * print nodes)
965  */
966 static void binder_inc_node_tmpref(struct binder_node *node)
967 {
968 	binder_node_lock(node);
969 	if (node->proc)
970 		binder_inner_proc_lock(node->proc);
971 	else
972 		spin_lock(&binder_dead_nodes_lock);
973 	binder_inc_node_tmpref_ilocked(node);
974 	if (node->proc)
975 		binder_inner_proc_unlock(node->proc);
976 	else
977 		spin_unlock(&binder_dead_nodes_lock);
978 	binder_node_unlock(node);
979 }
980 
981 /**
982  * binder_dec_node_tmpref() - remove a temporary reference on node
983  * @node:	node to reference
984  *
985  * Release temporary reference on node taken via binder_inc_node_tmpref()
986  */
987 static void binder_dec_node_tmpref(struct binder_node *node)
988 {
989 	bool free_node;
990 
991 	binder_node_inner_lock(node);
992 	if (!node->proc)
993 		spin_lock(&binder_dead_nodes_lock);
994 	else
995 		__acquire(&binder_dead_nodes_lock);
996 	node->tmp_refs--;
997 	BUG_ON(node->tmp_refs < 0);
998 	if (!node->proc)
999 		spin_unlock(&binder_dead_nodes_lock);
1000 	else
1001 		__release(&binder_dead_nodes_lock);
1002 	/*
1003 	 * Call binder_dec_node() to check if all refcounts are 0
1004 	 * and cleanup is needed. Calling with strong=0 and internal=1
1005 	 * causes no actual reference to be released in binder_dec_node().
1006 	 * If that changes, a change is needed here too.
1007 	 */
1008 	free_node = binder_dec_node_nilocked(node, 0, 1);
1009 	binder_node_inner_unlock(node);
1010 	if (free_node)
1011 		binder_free_node(node);
1012 }
1013 
1014 static void binder_put_node(struct binder_node *node)
1015 {
1016 	binder_dec_node_tmpref(node);
1017 }
1018 
1019 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1020 						 u32 desc, bool need_strong_ref)
1021 {
1022 	struct rb_node *n = proc->refs_by_desc.rb_node;
1023 	struct binder_ref *ref;
1024 
1025 	while (n) {
1026 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1027 
1028 		if (desc < ref->data.desc) {
1029 			n = n->rb_left;
1030 		} else if (desc > ref->data.desc) {
1031 			n = n->rb_right;
1032 		} else if (need_strong_ref && !ref->data.strong) {
1033 			binder_user_error("tried to use weak ref as strong ref\n");
1034 			return NULL;
1035 		} else {
1036 			return ref;
1037 		}
1038 	}
1039 	return NULL;
1040 }
1041 
1042 /* Find the smallest unused descriptor the "slow way" */
1043 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1044 {
1045 	struct binder_ref *ref;
1046 	struct rb_node *n;
1047 	u32 desc;
1048 
1049 	desc = offset;
1050 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1051 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1052 		if (ref->data.desc > desc)
1053 			break;
1054 		desc = ref->data.desc + 1;
1055 	}
1056 
1057 	return desc;
1058 }
1059 
1060 /*
1061  * Find an available reference descriptor ID. The proc->outer_lock might
1062  * be released in the process, in which case -EAGAIN is returned and the
1063  * @desc should be considered invalid.
1064  */
1065 static int get_ref_desc_olocked(struct binder_proc *proc,
1066 				struct binder_node *node,
1067 				u32 *desc)
1068 {
1069 	struct dbitmap *dmap = &proc->dmap;
1070 	unsigned int nbits, offset;
1071 	unsigned long *new, bit;
1072 
1073 	/* 0 is reserved for the context manager */
1074 	offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1075 
1076 	if (!dbitmap_enabled(dmap)) {
1077 		*desc = slow_desc_lookup_olocked(proc, offset);
1078 		return 0;
1079 	}
1080 
1081 	if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1082 		*desc = bit;
1083 		return 0;
1084 	}
1085 
1086 	/*
1087 	 * The dbitmap is full and needs to grow. The proc->outer_lock
1088 	 * is briefly released to allocate the new bitmap safely.
1089 	 */
1090 	nbits = dbitmap_grow_nbits(dmap);
1091 	binder_proc_unlock(proc);
1092 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1093 	binder_proc_lock(proc);
1094 	dbitmap_grow(dmap, new, nbits);
1095 
1096 	return -EAGAIN;
1097 }
1098 
1099 /**
1100  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1101  * @proc:	binder_proc that owns the ref
1102  * @node:	binder_node of target
1103  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1104  *
1105  * Look up the ref for the given node and return it if it exists
1106  *
1107  * If it doesn't exist and the caller provides a newly allocated
1108  * ref, initialize the fields of the newly allocated ref and insert
1109  * into the given proc rb_trees and node refs list.
1110  *
1111  * Return:	the ref for node. It is possible that another thread
1112  *		allocated/initialized the ref first in which case the
1113  *		returned ref would be different than the passed-in
1114  *		new_ref. new_ref must be kfree'd by the caller in
1115  *		this case.
1116  */
1117 static struct binder_ref *binder_get_ref_for_node_olocked(
1118 					struct binder_proc *proc,
1119 					struct binder_node *node,
1120 					struct binder_ref *new_ref)
1121 {
1122 	struct binder_ref *ref;
1123 	struct rb_node *parent;
1124 	struct rb_node **p;
1125 	u32 desc;
1126 
1127 retry:
1128 	p = &proc->refs_by_node.rb_node;
1129 	parent = NULL;
1130 	while (*p) {
1131 		parent = *p;
1132 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1133 
1134 		if (node < ref->node)
1135 			p = &(*p)->rb_left;
1136 		else if (node > ref->node)
1137 			p = &(*p)->rb_right;
1138 		else
1139 			return ref;
1140 	}
1141 	if (!new_ref)
1142 		return NULL;
1143 
1144 	/* might release the proc->outer_lock */
1145 	if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1146 		goto retry;
1147 
1148 	binder_stats_created(BINDER_STAT_REF);
1149 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1150 	new_ref->proc = proc;
1151 	new_ref->node = node;
1152 	rb_link_node(&new_ref->rb_node_node, parent, p);
1153 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1154 
1155 	new_ref->data.desc = desc;
1156 	p = &proc->refs_by_desc.rb_node;
1157 	while (*p) {
1158 		parent = *p;
1159 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1160 
1161 		if (new_ref->data.desc < ref->data.desc)
1162 			p = &(*p)->rb_left;
1163 		else if (new_ref->data.desc > ref->data.desc)
1164 			p = &(*p)->rb_right;
1165 		else
1166 			BUG();
1167 	}
1168 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1169 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1170 
1171 	binder_node_lock(node);
1172 	hlist_add_head(&new_ref->node_entry, &node->refs);
1173 
1174 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 		     "%d new ref %d desc %d for node %d\n",
1176 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1177 		      node->debug_id);
1178 	binder_node_unlock(node);
1179 	return new_ref;
1180 }
1181 
1182 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1183 {
1184 	struct dbitmap *dmap = &ref->proc->dmap;
1185 	bool delete_node = false;
1186 
1187 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1188 		     "%d delete ref %d desc %d for node %d\n",
1189 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1190 		      ref->node->debug_id);
1191 
1192 	if (dbitmap_enabled(dmap))
1193 		dbitmap_clear_bit(dmap, ref->data.desc);
1194 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1195 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1196 
1197 	binder_node_inner_lock(ref->node);
1198 	if (ref->data.strong)
1199 		binder_dec_node_nilocked(ref->node, 1, 1);
1200 
1201 	hlist_del(&ref->node_entry);
1202 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1203 	binder_node_inner_unlock(ref->node);
1204 	/*
1205 	 * Clear ref->node unless we want the caller to free the node
1206 	 */
1207 	if (!delete_node) {
1208 		/*
1209 		 * The caller uses ref->node to determine
1210 		 * whether the node needs to be freed. Clear
1211 		 * it since the node is still alive.
1212 		 */
1213 		ref->node = NULL;
1214 	}
1215 
1216 	if (ref->death) {
1217 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1218 			     "%d delete ref %d desc %d has death notification\n",
1219 			      ref->proc->pid, ref->data.debug_id,
1220 			      ref->data.desc);
1221 		binder_dequeue_work(ref->proc, &ref->death->work);
1222 		binder_stats_deleted(BINDER_STAT_DEATH);
1223 	}
1224 
1225 	if (ref->freeze) {
1226 		binder_dequeue_work(ref->proc, &ref->freeze->work);
1227 		binder_stats_deleted(BINDER_STAT_FREEZE);
1228 	}
1229 
1230 	binder_stats_deleted(BINDER_STAT_REF);
1231 }
1232 
1233 /**
1234  * binder_inc_ref_olocked() - increment the ref for given handle
1235  * @ref:         ref to be incremented
1236  * @strong:      if true, strong increment, else weak
1237  * @target_list: list to queue node work on
1238  *
1239  * Increment the ref. @ref->proc->outer_lock must be held on entry
1240  *
1241  * Return: 0, if successful, else errno
1242  */
1243 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1244 				  struct list_head *target_list)
1245 {
1246 	int ret;
1247 
1248 	if (strong) {
1249 		if (ref->data.strong == 0) {
1250 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1251 			if (ret)
1252 				return ret;
1253 		}
1254 		ref->data.strong++;
1255 	} else {
1256 		if (ref->data.weak == 0) {
1257 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1258 			if (ret)
1259 				return ret;
1260 		}
1261 		ref->data.weak++;
1262 	}
1263 	return 0;
1264 }
1265 
1266 /**
1267  * binder_dec_ref_olocked() - dec the ref for given handle
1268  * @ref:	ref to be decremented
1269  * @strong:	if true, strong decrement, else weak
1270  *
1271  * Decrement the ref.
1272  *
1273  * Return: %true if ref is cleaned up and ready to be freed.
1274  */
1275 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1276 {
1277 	if (strong) {
1278 		if (ref->data.strong == 0) {
1279 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1280 					  ref->proc->pid, ref->data.debug_id,
1281 					  ref->data.desc, ref->data.strong,
1282 					  ref->data.weak);
1283 			return false;
1284 		}
1285 		ref->data.strong--;
1286 		if (ref->data.strong == 0)
1287 			binder_dec_node(ref->node, strong, 1);
1288 	} else {
1289 		if (ref->data.weak == 0) {
1290 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1291 					  ref->proc->pid, ref->data.debug_id,
1292 					  ref->data.desc, ref->data.strong,
1293 					  ref->data.weak);
1294 			return false;
1295 		}
1296 		ref->data.weak--;
1297 	}
1298 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1299 		binder_cleanup_ref_olocked(ref);
1300 		return true;
1301 	}
1302 	return false;
1303 }
1304 
1305 /**
1306  * binder_get_node_from_ref() - get the node from the given proc/desc
1307  * @proc:	proc containing the ref
1308  * @desc:	the handle associated with the ref
1309  * @need_strong_ref: if true, only return node if ref is strong
1310  * @rdata:	the id/refcount data for the ref
1311  *
1312  * Given a proc and ref handle, return the associated binder_node
1313  *
1314  * Return: a binder_node or NULL if not found or not strong when strong required
1315  */
1316 static struct binder_node *binder_get_node_from_ref(
1317 		struct binder_proc *proc,
1318 		u32 desc, bool need_strong_ref,
1319 		struct binder_ref_data *rdata)
1320 {
1321 	struct binder_node *node;
1322 	struct binder_ref *ref;
1323 
1324 	binder_proc_lock(proc);
1325 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1326 	if (!ref)
1327 		goto err_no_ref;
1328 	node = ref->node;
1329 	/*
1330 	 * Take an implicit reference on the node to ensure
1331 	 * it stays alive until the call to binder_put_node()
1332 	 */
1333 	binder_inc_node_tmpref(node);
1334 	if (rdata)
1335 		*rdata = ref->data;
1336 	binder_proc_unlock(proc);
1337 
1338 	return node;
1339 
1340 err_no_ref:
1341 	binder_proc_unlock(proc);
1342 	return NULL;
1343 }
1344 
1345 /**
1346  * binder_free_ref() - free the binder_ref
1347  * @ref:	ref to free
1348  *
1349  * Free the binder_ref. Free the binder_node indicated by ref->node
1350  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1351  */
1352 static void binder_free_ref(struct binder_ref *ref)
1353 {
1354 	if (ref->node)
1355 		binder_free_node(ref->node);
1356 	kfree(ref->death);
1357 	kfree(ref->freeze);
1358 	kfree(ref);
1359 }
1360 
1361 /* shrink descriptor bitmap if needed */
1362 static void try_shrink_dmap(struct binder_proc *proc)
1363 {
1364 	unsigned long *new;
1365 	int nbits;
1366 
1367 	binder_proc_lock(proc);
1368 	nbits = dbitmap_shrink_nbits(&proc->dmap);
1369 	binder_proc_unlock(proc);
1370 
1371 	if (!nbits)
1372 		return;
1373 
1374 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1375 	binder_proc_lock(proc);
1376 	dbitmap_shrink(&proc->dmap, new, nbits);
1377 	binder_proc_unlock(proc);
1378 }
1379 
1380 /**
1381  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1382  * @proc:	proc containing the ref
1383  * @desc:	the handle associated with the ref
1384  * @increment:	true=inc reference, false=dec reference
1385  * @strong:	true=strong reference, false=weak reference
1386  * @rdata:	the id/refcount data for the ref
1387  *
1388  * Given a proc and ref handle, increment or decrement the ref
1389  * according to "increment" arg.
1390  *
1391  * Return: 0 if successful, else errno
1392  */
1393 static int binder_update_ref_for_handle(struct binder_proc *proc,
1394 		uint32_t desc, bool increment, bool strong,
1395 		struct binder_ref_data *rdata)
1396 {
1397 	int ret = 0;
1398 	struct binder_ref *ref;
1399 	bool delete_ref = false;
1400 
1401 	binder_proc_lock(proc);
1402 	ref = binder_get_ref_olocked(proc, desc, strong);
1403 	if (!ref) {
1404 		ret = -EINVAL;
1405 		goto err_no_ref;
1406 	}
1407 	if (increment)
1408 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1409 	else
1410 		delete_ref = binder_dec_ref_olocked(ref, strong);
1411 
1412 	if (rdata)
1413 		*rdata = ref->data;
1414 	binder_proc_unlock(proc);
1415 
1416 	if (delete_ref) {
1417 		binder_free_ref(ref);
1418 		try_shrink_dmap(proc);
1419 	}
1420 	return ret;
1421 
1422 err_no_ref:
1423 	binder_proc_unlock(proc);
1424 	return ret;
1425 }
1426 
1427 /**
1428  * binder_dec_ref_for_handle() - dec the ref for given handle
1429  * @proc:	proc containing the ref
1430  * @desc:	the handle associated with the ref
1431  * @strong:	true=strong reference, false=weak reference
1432  * @rdata:	the id/refcount data for the ref
1433  *
1434  * Just calls binder_update_ref_for_handle() to decrement the ref.
1435  *
1436  * Return: 0 if successful, else errno
1437  */
1438 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1439 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1440 {
1441 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1442 }
1443 
1444 
1445 /**
1446  * binder_inc_ref_for_node() - increment the ref for given proc/node
1447  * @proc:	 proc containing the ref
1448  * @node:	 target node
1449  * @strong:	 true=strong reference, false=weak reference
1450  * @target_list: worklist to use if node is incremented
1451  * @rdata:	 the id/refcount data for the ref
1452  *
1453  * Given a proc and node, increment the ref. Create the ref if it
1454  * doesn't already exist
1455  *
1456  * Return: 0 if successful, else errno
1457  */
1458 static int binder_inc_ref_for_node(struct binder_proc *proc,
1459 			struct binder_node *node,
1460 			bool strong,
1461 			struct list_head *target_list,
1462 			struct binder_ref_data *rdata)
1463 {
1464 	struct binder_ref *ref;
1465 	struct binder_ref *new_ref = NULL;
1466 	int ret = 0;
1467 
1468 	binder_proc_lock(proc);
1469 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1470 	if (!ref) {
1471 		binder_proc_unlock(proc);
1472 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1473 		if (!new_ref)
1474 			return -ENOMEM;
1475 		binder_proc_lock(proc);
1476 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1477 	}
1478 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1479 	*rdata = ref->data;
1480 	if (ret && ref == new_ref) {
1481 		/*
1482 		 * Cleanup the failed reference here as the target
1483 		 * could now be dead and have already released its
1484 		 * references by now. Calling on the new reference
1485 		 * with strong=0 and a tmp_refs will not decrement
1486 		 * the node. The new_ref gets kfree'd below.
1487 		 */
1488 		binder_cleanup_ref_olocked(new_ref);
1489 		ref = NULL;
1490 	}
1491 
1492 	binder_proc_unlock(proc);
1493 	if (new_ref && ref != new_ref)
1494 		/*
1495 		 * Another thread created the ref first so
1496 		 * free the one we allocated
1497 		 */
1498 		kfree(new_ref);
1499 	return ret;
1500 }
1501 
1502 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1503 					   struct binder_transaction *t)
1504 {
1505 	BUG_ON(!target_thread);
1506 	assert_spin_locked(&target_thread->proc->inner_lock);
1507 	BUG_ON(target_thread->transaction_stack != t);
1508 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1509 	target_thread->transaction_stack =
1510 		target_thread->transaction_stack->from_parent;
1511 	t->from = NULL;
1512 }
1513 
1514 /**
1515  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1516  * @thread:	thread to decrement
1517  *
1518  * A thread needs to be kept alive while being used to create or
1519  * handle a transaction. binder_get_txn_from() is used to safely
1520  * extract t->from from a binder_transaction and keep the thread
1521  * indicated by t->from from being freed. When done with that
1522  * binder_thread, this function is called to decrement the
1523  * tmp_ref and free if appropriate (thread has been released
1524  * and no transaction being processed by the driver)
1525  */
1526 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1527 {
1528 	/*
1529 	 * atomic is used to protect the counter value while
1530 	 * it cannot reach zero or thread->is_dead is false
1531 	 */
1532 	binder_inner_proc_lock(thread->proc);
1533 	atomic_dec(&thread->tmp_ref);
1534 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1535 		binder_inner_proc_unlock(thread->proc);
1536 		binder_free_thread(thread);
1537 		return;
1538 	}
1539 	binder_inner_proc_unlock(thread->proc);
1540 }
1541 
1542 /**
1543  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1544  * @proc:	proc to decrement
1545  *
1546  * A binder_proc needs to be kept alive while being used to create or
1547  * handle a transaction. proc->tmp_ref is incremented when
1548  * creating a new transaction or the binder_proc is currently in-use
1549  * by threads that are being released. When done with the binder_proc,
1550  * this function is called to decrement the counter and free the
1551  * proc if appropriate (proc has been released, all threads have
1552  * been released and not currently in-use to process a transaction).
1553  */
1554 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1555 {
1556 	binder_inner_proc_lock(proc);
1557 	proc->tmp_ref--;
1558 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1559 			!proc->tmp_ref) {
1560 		binder_inner_proc_unlock(proc);
1561 		binder_free_proc(proc);
1562 		return;
1563 	}
1564 	binder_inner_proc_unlock(proc);
1565 }
1566 
1567 /**
1568  * binder_get_txn_from() - safely extract the "from" thread in transaction
1569  * @t:	binder transaction for t->from
1570  *
1571  * Atomically return the "from" thread and increment the tmp_ref
1572  * count for the thread to ensure it stays alive until
1573  * binder_thread_dec_tmpref() is called.
1574  *
1575  * Return: the value of t->from
1576  */
1577 static struct binder_thread *binder_get_txn_from(
1578 		struct binder_transaction *t)
1579 {
1580 	struct binder_thread *from;
1581 
1582 	guard(spinlock)(&t->lock);
1583 	from = t->from;
1584 	if (from)
1585 		atomic_inc(&from->tmp_ref);
1586 	return from;
1587 }
1588 
1589 /**
1590  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1591  * @t:	binder transaction for t->from
1592  *
1593  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1594  * to guarantee that the thread cannot be released while operating on it.
1595  * The caller must call binder_inner_proc_unlock() to release the inner lock
1596  * as well as call binder_dec_thread_txn() to release the reference.
1597  *
1598  * Return: the value of t->from
1599  */
1600 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1601 		struct binder_transaction *t)
1602 	__acquires(&t->from->proc->inner_lock)
1603 {
1604 	struct binder_thread *from;
1605 
1606 	from = binder_get_txn_from(t);
1607 	if (!from) {
1608 		__acquire(&from->proc->inner_lock);
1609 		return NULL;
1610 	}
1611 	binder_inner_proc_lock(from->proc);
1612 	if (t->from) {
1613 		BUG_ON(from != t->from);
1614 		return from;
1615 	}
1616 	binder_inner_proc_unlock(from->proc);
1617 	__acquire(&from->proc->inner_lock);
1618 	binder_thread_dec_tmpref(from);
1619 	return NULL;
1620 }
1621 
1622 /**
1623  * binder_free_txn_fixups() - free unprocessed fd fixups
1624  * @t:	binder transaction for t->from
1625  *
1626  * If the transaction is being torn down prior to being
1627  * processed by the target process, free all of the
1628  * fd fixups and fput the file structs. It is safe to
1629  * call this function after the fixups have been
1630  * processed -- in that case, the list will be empty.
1631  */
1632 static void binder_free_txn_fixups(struct binder_transaction *t)
1633 {
1634 	struct binder_txn_fd_fixup *fixup, *tmp;
1635 
1636 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1637 		fput(fixup->file);
1638 		if (fixup->target_fd >= 0)
1639 			put_unused_fd(fixup->target_fd);
1640 		list_del(&fixup->fixup_entry);
1641 		kfree(fixup);
1642 	}
1643 }
1644 
1645 static void binder_txn_latency_free(struct binder_transaction *t)
1646 {
1647 	int from_proc, from_thread, to_proc, to_thread;
1648 
1649 	spin_lock(&t->lock);
1650 	from_proc = t->from ? t->from->proc->pid : 0;
1651 	from_thread = t->from ? t->from->pid : 0;
1652 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1653 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1654 	spin_unlock(&t->lock);
1655 
1656 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1657 }
1658 
1659 static void binder_free_transaction(struct binder_transaction *t)
1660 {
1661 	struct binder_proc *target_proc = t->to_proc;
1662 
1663 	if (target_proc) {
1664 		binder_inner_proc_lock(target_proc);
1665 		target_proc->outstanding_txns--;
1666 		if (target_proc->outstanding_txns < 0)
1667 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1668 				__func__, target_proc->outstanding_txns);
1669 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1670 			wake_up_interruptible_all(&target_proc->freeze_wait);
1671 		if (t->buffer)
1672 			t->buffer->transaction = NULL;
1673 		binder_inner_proc_unlock(target_proc);
1674 	}
1675 	if (trace_binder_txn_latency_free_enabled())
1676 		binder_txn_latency_free(t);
1677 	/*
1678 	 * If the transaction has no target_proc, then
1679 	 * t->buffer->transaction has already been cleared.
1680 	 */
1681 	binder_free_txn_fixups(t);
1682 	kfree(t);
1683 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1684 }
1685 
1686 static void binder_send_failed_reply(struct binder_transaction *t,
1687 				     uint32_t error_code)
1688 {
1689 	struct binder_thread *target_thread;
1690 	struct binder_transaction *next;
1691 
1692 	BUG_ON(t->flags & TF_ONE_WAY);
1693 	while (1) {
1694 		target_thread = binder_get_txn_from_and_acq_inner(t);
1695 		if (target_thread) {
1696 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1697 				     "send failed reply for transaction %d to %d:%d\n",
1698 				      t->debug_id,
1699 				      target_thread->proc->pid,
1700 				      target_thread->pid);
1701 
1702 			binder_pop_transaction_ilocked(target_thread, t);
1703 			if (target_thread->reply_error.cmd == BR_OK) {
1704 				target_thread->reply_error.cmd = error_code;
1705 				binder_enqueue_thread_work_ilocked(
1706 					target_thread,
1707 					&target_thread->reply_error.work);
1708 				wake_up_interruptible(&target_thread->wait);
1709 			} else {
1710 				/*
1711 				 * Cannot get here for normal operation, but
1712 				 * we can if multiple synchronous transactions
1713 				 * are sent without blocking for responses.
1714 				 * Just ignore the 2nd error in this case.
1715 				 */
1716 				pr_warn("Unexpected reply error: %u\n",
1717 					target_thread->reply_error.cmd);
1718 			}
1719 			binder_inner_proc_unlock(target_thread->proc);
1720 			binder_thread_dec_tmpref(target_thread);
1721 			binder_free_transaction(t);
1722 			return;
1723 		}
1724 		__release(&target_thread->proc->inner_lock);
1725 		next = t->from_parent;
1726 
1727 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1728 			     "send failed reply for transaction %d, target dead\n",
1729 			     t->debug_id);
1730 
1731 		binder_free_transaction(t);
1732 		if (next == NULL) {
1733 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1734 				     "reply failed, no target thread at root\n");
1735 			return;
1736 		}
1737 		t = next;
1738 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1739 			     "reply failed, no target thread -- retry %d\n",
1740 			      t->debug_id);
1741 	}
1742 }
1743 
1744 /**
1745  * binder_cleanup_transaction() - cleans up undelivered transaction
1746  * @t:		transaction that needs to be cleaned up
1747  * @reason:	reason the transaction wasn't delivered
1748  * @error_code:	error to return to caller (if synchronous call)
1749  */
1750 static void binder_cleanup_transaction(struct binder_transaction *t,
1751 				       const char *reason,
1752 				       uint32_t error_code)
1753 {
1754 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1755 		binder_send_failed_reply(t, error_code);
1756 	} else {
1757 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1758 			"undelivered transaction %d, %s\n",
1759 			t->debug_id, reason);
1760 		binder_free_transaction(t);
1761 	}
1762 }
1763 
1764 /**
1765  * binder_get_object() - gets object and checks for valid metadata
1766  * @proc:	binder_proc owning the buffer
1767  * @u:		sender's user pointer to base of buffer
1768  * @buffer:	binder_buffer that we're parsing.
1769  * @offset:	offset in the @buffer at which to validate an object.
1770  * @object:	struct binder_object to read into
1771  *
1772  * Copy the binder object at the given offset into @object. If @u is
1773  * provided then the copy is from the sender's buffer. If not, then
1774  * it is copied from the target's @buffer.
1775  *
1776  * Return:	If there's a valid metadata object at @offset, the
1777  *		size of that object. Otherwise, it returns zero. The object
1778  *		is read into the struct binder_object pointed to by @object.
1779  */
1780 static size_t binder_get_object(struct binder_proc *proc,
1781 				const void __user *u,
1782 				struct binder_buffer *buffer,
1783 				unsigned long offset,
1784 				struct binder_object *object)
1785 {
1786 	size_t read_size;
1787 	struct binder_object_header *hdr;
1788 	size_t object_size = 0;
1789 
1790 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1791 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1792 	    !IS_ALIGNED(offset, sizeof(u32)))
1793 		return 0;
1794 
1795 	if (u) {
1796 		if (copy_from_user(object, u + offset, read_size))
1797 			return 0;
1798 	} else {
1799 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1800 						  offset, read_size))
1801 			return 0;
1802 	}
1803 
1804 	/* Ok, now see if we read a complete object. */
1805 	hdr = &object->hdr;
1806 	switch (hdr->type) {
1807 	case BINDER_TYPE_BINDER:
1808 	case BINDER_TYPE_WEAK_BINDER:
1809 	case BINDER_TYPE_HANDLE:
1810 	case BINDER_TYPE_WEAK_HANDLE:
1811 		object_size = sizeof(struct flat_binder_object);
1812 		break;
1813 	case BINDER_TYPE_FD:
1814 		object_size = sizeof(struct binder_fd_object);
1815 		break;
1816 	case BINDER_TYPE_PTR:
1817 		object_size = sizeof(struct binder_buffer_object);
1818 		break;
1819 	case BINDER_TYPE_FDA:
1820 		object_size = sizeof(struct binder_fd_array_object);
1821 		break;
1822 	default:
1823 		return 0;
1824 	}
1825 	if (offset <= buffer->data_size - object_size &&
1826 	    buffer->data_size >= object_size)
1827 		return object_size;
1828 	else
1829 		return 0;
1830 }
1831 
1832 /**
1833  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1834  * @proc:	binder_proc owning the buffer
1835  * @b:		binder_buffer containing the object
1836  * @object:	struct binder_object to read into
1837  * @index:	index in offset array at which the binder_buffer_object is
1838  *		located
1839  * @start_offset: points to the start of the offset array
1840  * @object_offsetp: offset of @object read from @b
1841  * @num_valid:	the number of valid offsets in the offset array
1842  *
1843  * Return:	If @index is within the valid range of the offset array
1844  *		described by @start and @num_valid, and if there's a valid
1845  *		binder_buffer_object at the offset found in index @index
1846  *		of the offset array, that object is returned. Otherwise,
1847  *		%NULL is returned.
1848  *		Note that the offset found in index @index itself is not
1849  *		verified; this function assumes that @num_valid elements
1850  *		from @start were previously verified to have valid offsets.
1851  *		If @object_offsetp is non-NULL, then the offset within
1852  *		@b is written to it.
1853  */
1854 static struct binder_buffer_object *binder_validate_ptr(
1855 						struct binder_proc *proc,
1856 						struct binder_buffer *b,
1857 						struct binder_object *object,
1858 						binder_size_t index,
1859 						binder_size_t start_offset,
1860 						binder_size_t *object_offsetp,
1861 						binder_size_t num_valid)
1862 {
1863 	size_t object_size;
1864 	binder_size_t object_offset;
1865 	unsigned long buffer_offset;
1866 
1867 	if (index >= num_valid)
1868 		return NULL;
1869 
1870 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1871 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1872 					  b, buffer_offset,
1873 					  sizeof(object_offset)))
1874 		return NULL;
1875 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1876 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1877 		return NULL;
1878 	if (object_offsetp)
1879 		*object_offsetp = object_offset;
1880 
1881 	return &object->bbo;
1882 }
1883 
1884 /**
1885  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1886  * @proc:		binder_proc owning the buffer
1887  * @b:			transaction buffer
1888  * @objects_start_offset: offset to start of objects buffer
1889  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1890  * @fixup_offset:	start offset in @buffer to fix up
1891  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1892  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1893  *
1894  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1895  *			allowed.
1896  *
1897  * For safety reasons, we only allow fixups inside a buffer to happen
1898  * at increasing offsets; additionally, we only allow fixup on the last
1899  * buffer object that was verified, or one of its parents.
1900  *
1901  * Example of what is allowed:
1902  *
1903  * A
1904  *   B (parent = A, offset = 0)
1905  *   C (parent = A, offset = 16)
1906  *     D (parent = C, offset = 0)
1907  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1908  *
1909  * Examples of what is not allowed:
1910  *
1911  * Decreasing offsets within the same parent:
1912  * A
1913  *   C (parent = A, offset = 16)
1914  *   B (parent = A, offset = 0) // decreasing offset within A
1915  *
1916  * Referring to a parent that wasn't the last object or any of its parents:
1917  * A
1918  *   B (parent = A, offset = 0)
1919  *   C (parent = A, offset = 0)
1920  *   C (parent = A, offset = 16)
1921  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1922  */
1923 static bool binder_validate_fixup(struct binder_proc *proc,
1924 				  struct binder_buffer *b,
1925 				  binder_size_t objects_start_offset,
1926 				  binder_size_t buffer_obj_offset,
1927 				  binder_size_t fixup_offset,
1928 				  binder_size_t last_obj_offset,
1929 				  binder_size_t last_min_offset)
1930 {
1931 	if (!last_obj_offset) {
1932 		/* Nothing to fix up in */
1933 		return false;
1934 	}
1935 
1936 	while (last_obj_offset != buffer_obj_offset) {
1937 		unsigned long buffer_offset;
1938 		struct binder_object last_object;
1939 		struct binder_buffer_object *last_bbo;
1940 		size_t object_size = binder_get_object(proc, NULL, b,
1941 						       last_obj_offset,
1942 						       &last_object);
1943 		if (object_size != sizeof(*last_bbo))
1944 			return false;
1945 
1946 		last_bbo = &last_object.bbo;
1947 		/*
1948 		 * Safe to retrieve the parent of last_obj, since it
1949 		 * was already previously verified by the driver.
1950 		 */
1951 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1952 			return false;
1953 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1954 		buffer_offset = objects_start_offset +
1955 			sizeof(binder_size_t) * last_bbo->parent;
1956 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1957 						  &last_obj_offset,
1958 						  b, buffer_offset,
1959 						  sizeof(last_obj_offset)))
1960 			return false;
1961 	}
1962 	return (fixup_offset >= last_min_offset);
1963 }
1964 
1965 /**
1966  * struct binder_task_work_cb - for deferred close
1967  *
1968  * @twork:                callback_head for task work
1969  * @file:                 file to close
1970  *
1971  * Structure to pass task work to be handled after
1972  * returning from binder_ioctl() via task_work_add().
1973  */
1974 struct binder_task_work_cb {
1975 	struct callback_head twork;
1976 	struct file *file;
1977 };
1978 
1979 /**
1980  * binder_do_fd_close() - close list of file descriptors
1981  * @twork:	callback head for task work
1982  *
1983  * It is not safe to call ksys_close() during the binder_ioctl()
1984  * function if there is a chance that binder's own file descriptor
1985  * might be closed. This is to meet the requirements for using
1986  * fdget() (see comments for __fget_light()). Therefore use
1987  * task_work_add() to schedule the close operation once we have
1988  * returned from binder_ioctl(). This function is a callback
1989  * for that mechanism and does the actual ksys_close() on the
1990  * given file descriptor.
1991  */
1992 static void binder_do_fd_close(struct callback_head *twork)
1993 {
1994 	struct binder_task_work_cb *twcb = container_of(twork,
1995 			struct binder_task_work_cb, twork);
1996 
1997 	fput(twcb->file);
1998 	kfree(twcb);
1999 }
2000 
2001 /**
2002  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2003  * @fd:		file-descriptor to close
2004  *
2005  * See comments in binder_do_fd_close(). This function is used to schedule
2006  * a file-descriptor to be closed after returning from binder_ioctl().
2007  */
2008 static void binder_deferred_fd_close(int fd)
2009 {
2010 	struct binder_task_work_cb *twcb;
2011 
2012 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2013 	if (!twcb)
2014 		return;
2015 	init_task_work(&twcb->twork, binder_do_fd_close);
2016 	twcb->file = file_close_fd(fd);
2017 	if (twcb->file) {
2018 		// pin it until binder_do_fd_close(); see comments there
2019 		get_file(twcb->file);
2020 		filp_close(twcb->file, current->files);
2021 		task_work_add(current, &twcb->twork, TWA_RESUME);
2022 	} else {
2023 		kfree(twcb);
2024 	}
2025 }
2026 
2027 static void binder_transaction_buffer_release(struct binder_proc *proc,
2028 					      struct binder_thread *thread,
2029 					      struct binder_buffer *buffer,
2030 					      binder_size_t off_end_offset,
2031 					      bool is_failure)
2032 {
2033 	int debug_id = buffer->debug_id;
2034 	binder_size_t off_start_offset, buffer_offset;
2035 
2036 	binder_debug(BINDER_DEBUG_TRANSACTION,
2037 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2038 		     proc->pid, buffer->debug_id,
2039 		     buffer->data_size, buffer->offsets_size,
2040 		     (unsigned long long)off_end_offset);
2041 
2042 	if (buffer->target_node)
2043 		binder_dec_node(buffer->target_node, 1, 0);
2044 
2045 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2046 
2047 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2048 	     buffer_offset += sizeof(binder_size_t)) {
2049 		struct binder_object_header *hdr;
2050 		size_t object_size = 0;
2051 		struct binder_object object;
2052 		binder_size_t object_offset;
2053 
2054 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2055 						   buffer, buffer_offset,
2056 						   sizeof(object_offset)))
2057 			object_size = binder_get_object(proc, NULL, buffer,
2058 							object_offset, &object);
2059 		if (object_size == 0) {
2060 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2061 			       debug_id, (u64)object_offset, buffer->data_size);
2062 			continue;
2063 		}
2064 		hdr = &object.hdr;
2065 		switch (hdr->type) {
2066 		case BINDER_TYPE_BINDER:
2067 		case BINDER_TYPE_WEAK_BINDER: {
2068 			struct flat_binder_object *fp;
2069 			struct binder_node *node;
2070 
2071 			fp = to_flat_binder_object(hdr);
2072 			node = binder_get_node(proc, fp->binder);
2073 			if (node == NULL) {
2074 				pr_err("transaction release %d bad node %016llx\n",
2075 				       debug_id, (u64)fp->binder);
2076 				break;
2077 			}
2078 			binder_debug(BINDER_DEBUG_TRANSACTION,
2079 				     "        node %d u%016llx\n",
2080 				     node->debug_id, (u64)node->ptr);
2081 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2082 					0);
2083 			binder_put_node(node);
2084 		} break;
2085 		case BINDER_TYPE_HANDLE:
2086 		case BINDER_TYPE_WEAK_HANDLE: {
2087 			struct flat_binder_object *fp;
2088 			struct binder_ref_data rdata;
2089 			int ret;
2090 
2091 			fp = to_flat_binder_object(hdr);
2092 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2093 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2094 
2095 			if (ret) {
2096 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2097 				 debug_id, fp->handle, ret);
2098 				break;
2099 			}
2100 			binder_debug(BINDER_DEBUG_TRANSACTION,
2101 				     "        ref %d desc %d\n",
2102 				     rdata.debug_id, rdata.desc);
2103 		} break;
2104 
2105 		case BINDER_TYPE_FD: {
2106 			/*
2107 			 * No need to close the file here since user-space
2108 			 * closes it for successfully delivered
2109 			 * transactions. For transactions that weren't
2110 			 * delivered, the new fd was never allocated so
2111 			 * there is no need to close and the fput on the
2112 			 * file is done when the transaction is torn
2113 			 * down.
2114 			 */
2115 		} break;
2116 		case BINDER_TYPE_PTR:
2117 			/*
2118 			 * Nothing to do here, this will get cleaned up when the
2119 			 * transaction buffer gets freed
2120 			 */
2121 			break;
2122 		case BINDER_TYPE_FDA: {
2123 			struct binder_fd_array_object *fda;
2124 			struct binder_buffer_object *parent;
2125 			struct binder_object ptr_object;
2126 			binder_size_t fda_offset;
2127 			size_t fd_index;
2128 			binder_size_t fd_buf_size;
2129 			binder_size_t num_valid;
2130 
2131 			if (is_failure) {
2132 				/*
2133 				 * The fd fixups have not been applied so no
2134 				 * fds need to be closed.
2135 				 */
2136 				continue;
2137 			}
2138 
2139 			num_valid = (buffer_offset - off_start_offset) /
2140 						sizeof(binder_size_t);
2141 			fda = to_binder_fd_array_object(hdr);
2142 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2143 						     fda->parent,
2144 						     off_start_offset,
2145 						     NULL,
2146 						     num_valid);
2147 			if (!parent) {
2148 				pr_err("transaction release %d bad parent offset\n",
2149 				       debug_id);
2150 				continue;
2151 			}
2152 			fd_buf_size = sizeof(u32) * fda->num_fds;
2153 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2154 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2155 				       debug_id, (u64)fda->num_fds);
2156 				continue;
2157 			}
2158 			if (fd_buf_size > parent->length ||
2159 			    fda->parent_offset > parent->length - fd_buf_size) {
2160 				/* No space for all file descriptors here. */
2161 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2162 				       debug_id, (u64)fda->num_fds);
2163 				continue;
2164 			}
2165 			/*
2166 			 * the source data for binder_buffer_object is visible
2167 			 * to user-space and the @buffer element is the user
2168 			 * pointer to the buffer_object containing the fd_array.
2169 			 * Convert the address to an offset relative to
2170 			 * the base of the transaction buffer.
2171 			 */
2172 			fda_offset = parent->buffer - buffer->user_data +
2173 				fda->parent_offset;
2174 			for (fd_index = 0; fd_index < fda->num_fds;
2175 			     fd_index++) {
2176 				u32 fd;
2177 				int err;
2178 				binder_size_t offset = fda_offset +
2179 					fd_index * sizeof(fd);
2180 
2181 				err = binder_alloc_copy_from_buffer(
2182 						&proc->alloc, &fd, buffer,
2183 						offset, sizeof(fd));
2184 				WARN_ON(err);
2185 				if (!err) {
2186 					binder_deferred_fd_close(fd);
2187 					/*
2188 					 * Need to make sure the thread goes
2189 					 * back to userspace to complete the
2190 					 * deferred close
2191 					 */
2192 					if (thread)
2193 						thread->looper_need_return = true;
2194 				}
2195 			}
2196 		} break;
2197 		default:
2198 			pr_err("transaction release %d bad object type %x\n",
2199 				debug_id, hdr->type);
2200 			break;
2201 		}
2202 	}
2203 }
2204 
2205 /* Clean up all the objects in the buffer */
2206 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2207 						struct binder_thread *thread,
2208 						struct binder_buffer *buffer,
2209 						bool is_failure)
2210 {
2211 	binder_size_t off_end_offset;
2212 
2213 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2214 	off_end_offset += buffer->offsets_size;
2215 
2216 	binder_transaction_buffer_release(proc, thread, buffer,
2217 					  off_end_offset, is_failure);
2218 }
2219 
2220 static int binder_translate_binder(struct flat_binder_object *fp,
2221 				   struct binder_transaction *t,
2222 				   struct binder_thread *thread)
2223 {
2224 	struct binder_node *node;
2225 	struct binder_proc *proc = thread->proc;
2226 	struct binder_proc *target_proc = t->to_proc;
2227 	struct binder_ref_data rdata;
2228 	int ret = 0;
2229 
2230 	node = binder_get_node(proc, fp->binder);
2231 	if (!node) {
2232 		node = binder_new_node(proc, fp);
2233 		if (!node)
2234 			return -ENOMEM;
2235 	}
2236 	if (fp->cookie != node->cookie) {
2237 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2238 				  proc->pid, thread->pid, (u64)fp->binder,
2239 				  node->debug_id, (u64)fp->cookie,
2240 				  (u64)node->cookie);
2241 		ret = -EINVAL;
2242 		goto done;
2243 	}
2244 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2245 		ret = -EPERM;
2246 		goto done;
2247 	}
2248 
2249 	ret = binder_inc_ref_for_node(target_proc, node,
2250 			fp->hdr.type == BINDER_TYPE_BINDER,
2251 			&thread->todo, &rdata);
2252 	if (ret)
2253 		goto done;
2254 
2255 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2256 		fp->hdr.type = BINDER_TYPE_HANDLE;
2257 	else
2258 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2259 	fp->binder = 0;
2260 	fp->handle = rdata.desc;
2261 	fp->cookie = 0;
2262 
2263 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2264 	binder_debug(BINDER_DEBUG_TRANSACTION,
2265 		     "        node %d u%016llx -> ref %d desc %d\n",
2266 		     node->debug_id, (u64)node->ptr,
2267 		     rdata.debug_id, rdata.desc);
2268 done:
2269 	binder_put_node(node);
2270 	return ret;
2271 }
2272 
2273 static int binder_translate_handle(struct flat_binder_object *fp,
2274 				   struct binder_transaction *t,
2275 				   struct binder_thread *thread)
2276 {
2277 	struct binder_proc *proc = thread->proc;
2278 	struct binder_proc *target_proc = t->to_proc;
2279 	struct binder_node *node;
2280 	struct binder_ref_data src_rdata;
2281 	int ret = 0;
2282 
2283 	node = binder_get_node_from_ref(proc, fp->handle,
2284 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2285 	if (!node) {
2286 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2287 				  proc->pid, thread->pid, fp->handle);
2288 		return -EINVAL;
2289 	}
2290 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2291 		ret = -EPERM;
2292 		goto done;
2293 	}
2294 
2295 	binder_node_lock(node);
2296 	if (node->proc == target_proc) {
2297 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2298 			fp->hdr.type = BINDER_TYPE_BINDER;
2299 		else
2300 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2301 		fp->binder = node->ptr;
2302 		fp->cookie = node->cookie;
2303 		if (node->proc)
2304 			binder_inner_proc_lock(node->proc);
2305 		else
2306 			__acquire(&node->proc->inner_lock);
2307 		binder_inc_node_nilocked(node,
2308 					 fp->hdr.type == BINDER_TYPE_BINDER,
2309 					 0, NULL);
2310 		if (node->proc)
2311 			binder_inner_proc_unlock(node->proc);
2312 		else
2313 			__release(&node->proc->inner_lock);
2314 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2315 		binder_debug(BINDER_DEBUG_TRANSACTION,
2316 			     "        ref %d desc %d -> node %d u%016llx\n",
2317 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2318 			     (u64)node->ptr);
2319 		binder_node_unlock(node);
2320 	} else {
2321 		struct binder_ref_data dest_rdata;
2322 
2323 		binder_node_unlock(node);
2324 		ret = binder_inc_ref_for_node(target_proc, node,
2325 				fp->hdr.type == BINDER_TYPE_HANDLE,
2326 				NULL, &dest_rdata);
2327 		if (ret)
2328 			goto done;
2329 
2330 		fp->binder = 0;
2331 		fp->handle = dest_rdata.desc;
2332 		fp->cookie = 0;
2333 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2334 						    &dest_rdata);
2335 		binder_debug(BINDER_DEBUG_TRANSACTION,
2336 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2337 			     src_rdata.debug_id, src_rdata.desc,
2338 			     dest_rdata.debug_id, dest_rdata.desc,
2339 			     node->debug_id);
2340 	}
2341 done:
2342 	binder_put_node(node);
2343 	return ret;
2344 }
2345 
2346 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2347 			       struct binder_transaction *t,
2348 			       struct binder_thread *thread,
2349 			       struct binder_transaction *in_reply_to)
2350 {
2351 	struct binder_proc *proc = thread->proc;
2352 	struct binder_proc *target_proc = t->to_proc;
2353 	struct binder_txn_fd_fixup *fixup;
2354 	struct file *file;
2355 	int ret = 0;
2356 	bool target_allows_fd;
2357 
2358 	if (in_reply_to)
2359 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2360 	else
2361 		target_allows_fd = t->buffer->target_node->accept_fds;
2362 	if (!target_allows_fd) {
2363 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2364 				  proc->pid, thread->pid,
2365 				  in_reply_to ? "reply" : "transaction",
2366 				  fd);
2367 		ret = -EPERM;
2368 		goto err_fd_not_accepted;
2369 	}
2370 
2371 	file = fget(fd);
2372 	if (!file) {
2373 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2374 				  proc->pid, thread->pid, fd);
2375 		ret = -EBADF;
2376 		goto err_fget;
2377 	}
2378 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2379 	if (ret < 0) {
2380 		ret = -EPERM;
2381 		goto err_security;
2382 	}
2383 
2384 	/*
2385 	 * Add fixup record for this transaction. The allocation
2386 	 * of the fd in the target needs to be done from a
2387 	 * target thread.
2388 	 */
2389 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2390 	if (!fixup) {
2391 		ret = -ENOMEM;
2392 		goto err_alloc;
2393 	}
2394 	fixup->file = file;
2395 	fixup->offset = fd_offset;
2396 	fixup->target_fd = -1;
2397 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2398 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2399 
2400 	return ret;
2401 
2402 err_alloc:
2403 err_security:
2404 	fput(file);
2405 err_fget:
2406 err_fd_not_accepted:
2407 	return ret;
2408 }
2409 
2410 /**
2411  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2412  * @offset:      offset in target buffer to fixup
2413  * @skip_size:   bytes to skip in copy (fixup will be written later)
2414  * @fixup_data:  data to write at fixup offset
2415  * @node:        list node
2416  *
2417  * This is used for the pointer fixup list (pf) which is created and consumed
2418  * during binder_transaction() and is only accessed locally. No
2419  * locking is necessary.
2420  *
2421  * The list is ordered by @offset.
2422  */
2423 struct binder_ptr_fixup {
2424 	binder_size_t offset;
2425 	size_t skip_size;
2426 	binder_uintptr_t fixup_data;
2427 	struct list_head node;
2428 };
2429 
2430 /**
2431  * struct binder_sg_copy - scatter-gather data to be copied
2432  * @offset:        offset in target buffer
2433  * @sender_uaddr:  user address in source buffer
2434  * @length:        bytes to copy
2435  * @node:          list node
2436  *
2437  * This is used for the sg copy list (sgc) which is created and consumed
2438  * during binder_transaction() and is only accessed locally. No
2439  * locking is necessary.
2440  *
2441  * The list is ordered by @offset.
2442  */
2443 struct binder_sg_copy {
2444 	binder_size_t offset;
2445 	const void __user *sender_uaddr;
2446 	size_t length;
2447 	struct list_head node;
2448 };
2449 
2450 /**
2451  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2452  * @alloc:	binder_alloc associated with @buffer
2453  * @buffer:	binder buffer in target process
2454  * @sgc_head:	list_head of scatter-gather copy list
2455  * @pf_head:	list_head of pointer fixup list
2456  *
2457  * Processes all elements of @sgc_head, applying fixups from @pf_head
2458  * and copying the scatter-gather data from the source process' user
2459  * buffer to the target's buffer. It is expected that the list creation
2460  * and processing all occurs during binder_transaction() so these lists
2461  * are only accessed in local context.
2462  *
2463  * Return: 0=success, else -errno
2464  */
2465 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2466 					 struct binder_buffer *buffer,
2467 					 struct list_head *sgc_head,
2468 					 struct list_head *pf_head)
2469 {
2470 	int ret = 0;
2471 	struct binder_sg_copy *sgc, *tmpsgc;
2472 	struct binder_ptr_fixup *tmppf;
2473 	struct binder_ptr_fixup *pf =
2474 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2475 					 node);
2476 
2477 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2478 		size_t bytes_copied = 0;
2479 
2480 		while (bytes_copied < sgc->length) {
2481 			size_t copy_size;
2482 			size_t bytes_left = sgc->length - bytes_copied;
2483 			size_t offset = sgc->offset + bytes_copied;
2484 
2485 			/*
2486 			 * We copy up to the fixup (pointed to by pf)
2487 			 */
2488 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2489 				       : bytes_left;
2490 			if (!ret && copy_size)
2491 				ret = binder_alloc_copy_user_to_buffer(
2492 						alloc, buffer,
2493 						offset,
2494 						sgc->sender_uaddr + bytes_copied,
2495 						copy_size);
2496 			bytes_copied += copy_size;
2497 			if (copy_size != bytes_left) {
2498 				BUG_ON(!pf);
2499 				/* we stopped at a fixup offset */
2500 				if (pf->skip_size) {
2501 					/*
2502 					 * we are just skipping. This is for
2503 					 * BINDER_TYPE_FDA where the translated
2504 					 * fds will be fixed up when we get
2505 					 * to target context.
2506 					 */
2507 					bytes_copied += pf->skip_size;
2508 				} else {
2509 					/* apply the fixup indicated by pf */
2510 					if (!ret)
2511 						ret = binder_alloc_copy_to_buffer(
2512 							alloc, buffer,
2513 							pf->offset,
2514 							&pf->fixup_data,
2515 							sizeof(pf->fixup_data));
2516 					bytes_copied += sizeof(pf->fixup_data);
2517 				}
2518 				list_del(&pf->node);
2519 				kfree(pf);
2520 				pf = list_first_entry_or_null(pf_head,
2521 						struct binder_ptr_fixup, node);
2522 			}
2523 		}
2524 		list_del(&sgc->node);
2525 		kfree(sgc);
2526 	}
2527 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2528 		BUG_ON(pf->skip_size == 0);
2529 		list_del(&pf->node);
2530 		kfree(pf);
2531 	}
2532 	BUG_ON(!list_empty(sgc_head));
2533 
2534 	return ret > 0 ? -EINVAL : ret;
2535 }
2536 
2537 /**
2538  * binder_cleanup_deferred_txn_lists() - free specified lists
2539  * @sgc_head:	list_head of scatter-gather copy list
2540  * @pf_head:	list_head of pointer fixup list
2541  *
2542  * Called to clean up @sgc_head and @pf_head if there is an
2543  * error.
2544  */
2545 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2546 					      struct list_head *pf_head)
2547 {
2548 	struct binder_sg_copy *sgc, *tmpsgc;
2549 	struct binder_ptr_fixup *pf, *tmppf;
2550 
2551 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2552 		list_del(&sgc->node);
2553 		kfree(sgc);
2554 	}
2555 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2556 		list_del(&pf->node);
2557 		kfree(pf);
2558 	}
2559 }
2560 
2561 /**
2562  * binder_defer_copy() - queue a scatter-gather buffer for copy
2563  * @sgc_head:		list_head of scatter-gather copy list
2564  * @offset:		binder buffer offset in target process
2565  * @sender_uaddr:	user address in source process
2566  * @length:		bytes to copy
2567  *
2568  * Specify a scatter-gather block to be copied. The actual copy must
2569  * be deferred until all the needed fixups are identified and queued.
2570  * Then the copy and fixups are done together so un-translated values
2571  * from the source are never visible in the target buffer.
2572  *
2573  * We are guaranteed that repeated calls to this function will have
2574  * monotonically increasing @offset values so the list will naturally
2575  * be ordered.
2576  *
2577  * Return: 0=success, else -errno
2578  */
2579 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2580 			     const void __user *sender_uaddr, size_t length)
2581 {
2582 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2583 
2584 	if (!bc)
2585 		return -ENOMEM;
2586 
2587 	bc->offset = offset;
2588 	bc->sender_uaddr = sender_uaddr;
2589 	bc->length = length;
2590 	INIT_LIST_HEAD(&bc->node);
2591 
2592 	/*
2593 	 * We are guaranteed that the deferred copies are in-order
2594 	 * so just add to the tail.
2595 	 */
2596 	list_add_tail(&bc->node, sgc_head);
2597 
2598 	return 0;
2599 }
2600 
2601 /**
2602  * binder_add_fixup() - queue a fixup to be applied to sg copy
2603  * @pf_head:	list_head of binder ptr fixup list
2604  * @offset:	binder buffer offset in target process
2605  * @fixup:	bytes to be copied for fixup
2606  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2607  *
2608  * Add the specified fixup to a list ordered by @offset. When copying
2609  * the scatter-gather buffers, the fixup will be copied instead of
2610  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2611  * will be applied later (in target process context), so we just skip
2612  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2613  * value in @fixup.
2614  *
2615  * This function is called *mostly* in @offset order, but there are
2616  * exceptions. Since out-of-order inserts are relatively uncommon,
2617  * we insert the new element by searching backward from the tail of
2618  * the list.
2619  *
2620  * Return: 0=success, else -errno
2621  */
2622 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2623 			    binder_uintptr_t fixup, size_t skip_size)
2624 {
2625 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2626 	struct binder_ptr_fixup *tmppf;
2627 
2628 	if (!pf)
2629 		return -ENOMEM;
2630 
2631 	pf->offset = offset;
2632 	pf->fixup_data = fixup;
2633 	pf->skip_size = skip_size;
2634 	INIT_LIST_HEAD(&pf->node);
2635 
2636 	/* Fixups are *mostly* added in-order, but there are some
2637 	 * exceptions. Look backwards through list for insertion point.
2638 	 */
2639 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2640 		if (tmppf->offset < pf->offset) {
2641 			list_add(&pf->node, &tmppf->node);
2642 			return 0;
2643 		}
2644 	}
2645 	/*
2646 	 * if we get here, then the new offset is the lowest so
2647 	 * insert at the head
2648 	 */
2649 	list_add(&pf->node, pf_head);
2650 	return 0;
2651 }
2652 
2653 static int binder_translate_fd_array(struct list_head *pf_head,
2654 				     struct binder_fd_array_object *fda,
2655 				     const void __user *sender_ubuffer,
2656 				     struct binder_buffer_object *parent,
2657 				     struct binder_buffer_object *sender_uparent,
2658 				     struct binder_transaction *t,
2659 				     struct binder_thread *thread,
2660 				     struct binder_transaction *in_reply_to)
2661 {
2662 	binder_size_t fdi, fd_buf_size;
2663 	binder_size_t fda_offset;
2664 	const void __user *sender_ufda_base;
2665 	struct binder_proc *proc = thread->proc;
2666 	int ret;
2667 
2668 	if (fda->num_fds == 0)
2669 		return 0;
2670 
2671 	fd_buf_size = sizeof(u32) * fda->num_fds;
2672 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2673 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2674 				  proc->pid, thread->pid, (u64)fda->num_fds);
2675 		return -EINVAL;
2676 	}
2677 	if (fd_buf_size > parent->length ||
2678 	    fda->parent_offset > parent->length - fd_buf_size) {
2679 		/* No space for all file descriptors here. */
2680 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2681 				  proc->pid, thread->pid, (u64)fda->num_fds);
2682 		return -EINVAL;
2683 	}
2684 	/*
2685 	 * the source data for binder_buffer_object is visible
2686 	 * to user-space and the @buffer element is the user
2687 	 * pointer to the buffer_object containing the fd_array.
2688 	 * Convert the address to an offset relative to
2689 	 * the base of the transaction buffer.
2690 	 */
2691 	fda_offset = parent->buffer - t->buffer->user_data +
2692 		fda->parent_offset;
2693 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2694 				fda->parent_offset;
2695 
2696 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2697 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2698 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2699 				  proc->pid, thread->pid);
2700 		return -EINVAL;
2701 	}
2702 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2703 	if (ret)
2704 		return ret;
2705 
2706 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2707 		u32 fd;
2708 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2709 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2710 
2711 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2712 		if (!ret)
2713 			ret = binder_translate_fd(fd, offset, t, thread,
2714 						  in_reply_to);
2715 		if (ret)
2716 			return ret > 0 ? -EINVAL : ret;
2717 	}
2718 	return 0;
2719 }
2720 
2721 static int binder_fixup_parent(struct list_head *pf_head,
2722 			       struct binder_transaction *t,
2723 			       struct binder_thread *thread,
2724 			       struct binder_buffer_object *bp,
2725 			       binder_size_t off_start_offset,
2726 			       binder_size_t num_valid,
2727 			       binder_size_t last_fixup_obj_off,
2728 			       binder_size_t last_fixup_min_off)
2729 {
2730 	struct binder_buffer_object *parent;
2731 	struct binder_buffer *b = t->buffer;
2732 	struct binder_proc *proc = thread->proc;
2733 	struct binder_proc *target_proc = t->to_proc;
2734 	struct binder_object object;
2735 	binder_size_t buffer_offset;
2736 	binder_size_t parent_offset;
2737 
2738 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2739 		return 0;
2740 
2741 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2742 				     off_start_offset, &parent_offset,
2743 				     num_valid);
2744 	if (!parent) {
2745 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2746 				  proc->pid, thread->pid);
2747 		return -EINVAL;
2748 	}
2749 
2750 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2751 				   parent_offset, bp->parent_offset,
2752 				   last_fixup_obj_off,
2753 				   last_fixup_min_off)) {
2754 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2755 				  proc->pid, thread->pid);
2756 		return -EINVAL;
2757 	}
2758 
2759 	if (parent->length < sizeof(binder_uintptr_t) ||
2760 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2761 		/* No space for a pointer here! */
2762 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2763 				  proc->pid, thread->pid);
2764 		return -EINVAL;
2765 	}
2766 
2767 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2768 
2769 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2770 }
2771 
2772 /**
2773  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2774  * @t1: the pending async txn in the frozen process
2775  * @t2: the new async txn to supersede the outdated pending one
2776  *
2777  * Return:  true if t2 can supersede t1
2778  *          false if t2 can not supersede t1
2779  */
2780 static bool binder_can_update_transaction(struct binder_transaction *t1,
2781 					  struct binder_transaction *t2)
2782 {
2783 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2784 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2785 		return false;
2786 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2787 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2788 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2789 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2790 		return true;
2791 	return false;
2792 }
2793 
2794 /**
2795  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2796  * @t:		 new async transaction
2797  * @target_list: list to find outdated transaction
2798  *
2799  * Return: the outdated transaction if found
2800  *         NULL if no outdated transacton can be found
2801  *
2802  * Requires the proc->inner_lock to be held.
2803  */
2804 static struct binder_transaction *
2805 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2806 					 struct list_head *target_list)
2807 {
2808 	struct binder_work *w;
2809 
2810 	list_for_each_entry(w, target_list, entry) {
2811 		struct binder_transaction *t_queued;
2812 
2813 		if (w->type != BINDER_WORK_TRANSACTION)
2814 			continue;
2815 		t_queued = container_of(w, struct binder_transaction, work);
2816 		if (binder_can_update_transaction(t_queued, t))
2817 			return t_queued;
2818 	}
2819 	return NULL;
2820 }
2821 
2822 /**
2823  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2824  * @t:		transaction to send
2825  * @proc:	process to send the transaction to
2826  * @thread:	thread in @proc to send the transaction to (may be NULL)
2827  *
2828  * This function queues a transaction to the specified process. It will try
2829  * to find a thread in the target process to handle the transaction and
2830  * wake it up. If no thread is found, the work is queued to the proc
2831  * waitqueue.
2832  *
2833  * If the @thread parameter is not NULL, the transaction is always queued
2834  * to the waitlist of that specific thread.
2835  *
2836  * Return:	0 if the transaction was successfully queued
2837  *		BR_DEAD_REPLY if the target process or thread is dead
2838  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2839  *			the sync transaction was rejected
2840  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2841  *		and the async transaction was successfully queued
2842  */
2843 static int binder_proc_transaction(struct binder_transaction *t,
2844 				    struct binder_proc *proc,
2845 				    struct binder_thread *thread)
2846 {
2847 	struct binder_node *node = t->buffer->target_node;
2848 	bool oneway = !!(t->flags & TF_ONE_WAY);
2849 	bool pending_async = false;
2850 	struct binder_transaction *t_outdated = NULL;
2851 	bool frozen = false;
2852 
2853 	BUG_ON(!node);
2854 	binder_node_lock(node);
2855 	if (oneway) {
2856 		BUG_ON(thread);
2857 		if (node->has_async_transaction)
2858 			pending_async = true;
2859 		else
2860 			node->has_async_transaction = true;
2861 	}
2862 
2863 	binder_inner_proc_lock(proc);
2864 	if (proc->is_frozen) {
2865 		frozen = true;
2866 		proc->sync_recv |= !oneway;
2867 		proc->async_recv |= oneway;
2868 	}
2869 
2870 	if ((frozen && !oneway) || proc->is_dead ||
2871 			(thread && thread->is_dead)) {
2872 		binder_inner_proc_unlock(proc);
2873 		binder_node_unlock(node);
2874 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2875 	}
2876 
2877 	if (!thread && !pending_async)
2878 		thread = binder_select_thread_ilocked(proc);
2879 
2880 	if (thread) {
2881 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2882 	} else if (!pending_async) {
2883 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2884 	} else {
2885 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2886 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2887 									      &node->async_todo);
2888 			if (t_outdated) {
2889 				binder_debug(BINDER_DEBUG_TRANSACTION,
2890 					     "txn %d supersedes %d\n",
2891 					     t->debug_id, t_outdated->debug_id);
2892 				list_del_init(&t_outdated->work.entry);
2893 				proc->outstanding_txns--;
2894 			}
2895 		}
2896 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2897 	}
2898 
2899 	if (!pending_async)
2900 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2901 
2902 	proc->outstanding_txns++;
2903 	binder_inner_proc_unlock(proc);
2904 	binder_node_unlock(node);
2905 
2906 	/*
2907 	 * To reduce potential contention, free the outdated transaction and
2908 	 * buffer after releasing the locks.
2909 	 */
2910 	if (t_outdated) {
2911 		struct binder_buffer *buffer = t_outdated->buffer;
2912 
2913 		t_outdated->buffer = NULL;
2914 		buffer->transaction = NULL;
2915 		trace_binder_transaction_update_buffer_release(buffer);
2916 		binder_release_entire_buffer(proc, NULL, buffer, false);
2917 		binder_alloc_free_buf(&proc->alloc, buffer);
2918 		kfree(t_outdated);
2919 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2920 	}
2921 
2922 	if (oneway && frozen)
2923 		return BR_TRANSACTION_PENDING_FROZEN;
2924 
2925 	return 0;
2926 }
2927 
2928 /**
2929  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2930  * @node:         struct binder_node for which to get refs
2931  * @procp:        returns @node->proc if valid
2932  * @error:        if no @procp then returns BR_DEAD_REPLY
2933  *
2934  * User-space normally keeps the node alive when creating a transaction
2935  * since it has a reference to the target. The local strong ref keeps it
2936  * alive if the sending process dies before the target process processes
2937  * the transaction. If the source process is malicious or has a reference
2938  * counting bug, relying on the local strong ref can fail.
2939  *
2940  * Since user-space can cause the local strong ref to go away, we also take
2941  * a tmpref on the node to ensure it survives while we are constructing
2942  * the transaction. We also need a tmpref on the proc while we are
2943  * constructing the transaction, so we take that here as well.
2944  *
2945  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2946  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2947  * target proc has died, @error is set to BR_DEAD_REPLY.
2948  */
2949 static struct binder_node *binder_get_node_refs_for_txn(
2950 		struct binder_node *node,
2951 		struct binder_proc **procp,
2952 		uint32_t *error)
2953 {
2954 	struct binder_node *target_node = NULL;
2955 
2956 	binder_node_inner_lock(node);
2957 	if (node->proc) {
2958 		target_node = node;
2959 		binder_inc_node_nilocked(node, 1, 0, NULL);
2960 		binder_inc_node_tmpref_ilocked(node);
2961 		node->proc->tmp_ref++;
2962 		*procp = node->proc;
2963 	} else
2964 		*error = BR_DEAD_REPLY;
2965 	binder_node_inner_unlock(node);
2966 
2967 	return target_node;
2968 }
2969 
2970 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2971 				      uint32_t command, int32_t param)
2972 {
2973 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2974 
2975 	if (!from) {
2976 		/* annotation for sparse */
2977 		__release(&from->proc->inner_lock);
2978 		return;
2979 	}
2980 
2981 	/* don't override existing errors */
2982 	if (from->ee.command == BR_OK)
2983 		binder_set_extended_error(&from->ee, id, command, param);
2984 	binder_inner_proc_unlock(from->proc);
2985 	binder_thread_dec_tmpref(from);
2986 }
2987 
2988 /**
2989  * binder_netlink_report() - report a transaction failure via netlink
2990  * @proc:	the binder proc sending the transaction
2991  * @t:		the binder transaction that failed
2992  * @data_size:	the user provided data size for the transaction
2993  * @error:	enum binder_driver_return_protocol returned to sender
2994  *
2995  * Note that t->buffer is not safe to access here, as it may have been
2996  * released (or not yet allocated). Callers should guarantee all the
2997  * transaction items used here are safe to access.
2998  */
2999 static void binder_netlink_report(struct binder_proc *proc,
3000 				  struct binder_transaction *t,
3001 				  u32 data_size,
3002 				  u32 error)
3003 {
3004 	const char *context = proc->context->name;
3005 	struct sk_buff *skb;
3006 	void *hdr;
3007 
3008 	if (!genl_has_listeners(&binder_nl_family, &init_net,
3009 				BINDER_NLGRP_REPORT))
3010 		return;
3011 
3012 	trace_binder_netlink_report(context, t, data_size, error);
3013 
3014 	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
3015 	if (!skb)
3016 		return;
3017 
3018 	hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
3019 	if (!hdr)
3020 		goto free_skb;
3021 
3022 	if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
3023 	    nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
3024 	    nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
3025 	    nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
3026 		goto cancel_skb;
3027 
3028 	if (t->to_proc &&
3029 	    nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
3030 		goto cancel_skb;
3031 
3032 	if (t->to_thread &&
3033 	    nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
3034 		goto cancel_skb;
3035 
3036 	if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
3037 		goto cancel_skb;
3038 
3039 	if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
3040 	    nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
3041 	    nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
3042 		goto cancel_skb;
3043 
3044 	genlmsg_end(skb, hdr);
3045 	genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
3046 			  GFP_KERNEL);
3047 	return;
3048 
3049 cancel_skb:
3050 	genlmsg_cancel(skb, hdr);
3051 free_skb:
3052 	nlmsg_free(skb);
3053 }
3054 
3055 static void binder_transaction(struct binder_proc *proc,
3056 			       struct binder_thread *thread,
3057 			       struct binder_transaction_data *tr, int reply,
3058 			       binder_size_t extra_buffers_size)
3059 {
3060 	int ret;
3061 	struct binder_transaction *t;
3062 	struct binder_work *w;
3063 	struct binder_work *tcomplete;
3064 	binder_size_t buffer_offset = 0;
3065 	binder_size_t off_start_offset, off_end_offset;
3066 	binder_size_t off_min;
3067 	binder_size_t sg_buf_offset, sg_buf_end_offset;
3068 	binder_size_t user_offset = 0;
3069 	struct binder_proc *target_proc = NULL;
3070 	struct binder_thread *target_thread = NULL;
3071 	struct binder_node *target_node = NULL;
3072 	struct binder_transaction *in_reply_to = NULL;
3073 	struct binder_transaction_log_entry *e;
3074 	uint32_t return_error = 0;
3075 	uint32_t return_error_param = 0;
3076 	uint32_t return_error_line = 0;
3077 	binder_size_t last_fixup_obj_off = 0;
3078 	binder_size_t last_fixup_min_off = 0;
3079 	struct binder_context *context = proc->context;
3080 	int t_debug_id = atomic_inc_return(&binder_last_id);
3081 	ktime_t t_start_time = ktime_get();
3082 	struct lsm_context lsmctx = { };
3083 	struct list_head sgc_head;
3084 	struct list_head pf_head;
3085 	const void __user *user_buffer = (const void __user *)
3086 				(uintptr_t)tr->data.ptr.buffer;
3087 	INIT_LIST_HEAD(&sgc_head);
3088 	INIT_LIST_HEAD(&pf_head);
3089 
3090 	e = binder_transaction_log_add(&binder_transaction_log);
3091 	e->debug_id = t_debug_id;
3092 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3093 	e->from_proc = proc->pid;
3094 	e->from_thread = thread->pid;
3095 	e->target_handle = tr->target.handle;
3096 	e->data_size = tr->data_size;
3097 	e->offsets_size = tr->offsets_size;
3098 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3099 
3100 	binder_inner_proc_lock(proc);
3101 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3102 	binder_inner_proc_unlock(proc);
3103 
3104 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3105 	if (!t) {
3106 		binder_txn_error("%d:%d cannot allocate transaction\n",
3107 				 thread->pid, proc->pid);
3108 		return_error = BR_FAILED_REPLY;
3109 		return_error_param = -ENOMEM;
3110 		return_error_line = __LINE__;
3111 		goto err_alloc_t_failed;
3112 	}
3113 	INIT_LIST_HEAD(&t->fd_fixups);
3114 	binder_stats_created(BINDER_STAT_TRANSACTION);
3115 	spin_lock_init(&t->lock);
3116 	t->debug_id = t_debug_id;
3117 	t->start_time = t_start_time;
3118 	t->from_pid = proc->pid;
3119 	t->from_tid = thread->pid;
3120 	t->sender_euid = task_euid(proc->tsk);
3121 	t->code = tr->code;
3122 	t->flags = tr->flags;
3123 	t->priority = task_nice(current);
3124 	t->work.type = BINDER_WORK_TRANSACTION;
3125 	t->is_async = !reply && (tr->flags & TF_ONE_WAY);
3126 	t->is_reply = reply;
3127 	if (!reply && !(tr->flags & TF_ONE_WAY))
3128 		t->from = thread;
3129 
3130 	if (reply) {
3131 		binder_inner_proc_lock(proc);
3132 		in_reply_to = thread->transaction_stack;
3133 		if (in_reply_to == NULL) {
3134 			binder_inner_proc_unlock(proc);
3135 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3136 					  proc->pid, thread->pid);
3137 			return_error = BR_FAILED_REPLY;
3138 			return_error_param = -EPROTO;
3139 			return_error_line = __LINE__;
3140 			goto err_empty_call_stack;
3141 		}
3142 		if (in_reply_to->to_thread != thread) {
3143 			spin_lock(&in_reply_to->lock);
3144 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3145 				proc->pid, thread->pid, in_reply_to->debug_id,
3146 				in_reply_to->to_proc ?
3147 				in_reply_to->to_proc->pid : 0,
3148 				in_reply_to->to_thread ?
3149 				in_reply_to->to_thread->pid : 0);
3150 			spin_unlock(&in_reply_to->lock);
3151 			binder_inner_proc_unlock(proc);
3152 			return_error = BR_FAILED_REPLY;
3153 			return_error_param = -EPROTO;
3154 			return_error_line = __LINE__;
3155 			in_reply_to = NULL;
3156 			goto err_bad_call_stack;
3157 		}
3158 		thread->transaction_stack = in_reply_to->to_parent;
3159 		binder_inner_proc_unlock(proc);
3160 		binder_set_nice(in_reply_to->saved_priority);
3161 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3162 		if (target_thread == NULL) {
3163 			/* annotation for sparse */
3164 			__release(&target_thread->proc->inner_lock);
3165 			binder_txn_error("%d:%d reply target not found\n",
3166 				thread->pid, proc->pid);
3167 			return_error = BR_DEAD_REPLY;
3168 			return_error_line = __LINE__;
3169 			goto err_dead_binder;
3170 		}
3171 		if (target_thread->transaction_stack != in_reply_to) {
3172 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3173 				proc->pid, thread->pid,
3174 				target_thread->transaction_stack ?
3175 				target_thread->transaction_stack->debug_id : 0,
3176 				in_reply_to->debug_id);
3177 			binder_inner_proc_unlock(target_thread->proc);
3178 			return_error = BR_FAILED_REPLY;
3179 			return_error_param = -EPROTO;
3180 			return_error_line = __LINE__;
3181 			in_reply_to = NULL;
3182 			target_thread = NULL;
3183 			goto err_dead_binder;
3184 		}
3185 		target_proc = target_thread->proc;
3186 		target_proc->tmp_ref++;
3187 		binder_inner_proc_unlock(target_thread->proc);
3188 	} else {
3189 		if (tr->target.handle) {
3190 			struct binder_ref *ref;
3191 
3192 			/*
3193 			 * There must already be a strong ref
3194 			 * on this node. If so, do a strong
3195 			 * increment on the node to ensure it
3196 			 * stays alive until the transaction is
3197 			 * done.
3198 			 */
3199 			binder_proc_lock(proc);
3200 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3201 						     true);
3202 			if (ref) {
3203 				target_node = binder_get_node_refs_for_txn(
3204 						ref->node, &target_proc,
3205 						&return_error);
3206 			} else {
3207 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3208 						  proc->pid, thread->pid, tr->target.handle);
3209 				return_error = BR_FAILED_REPLY;
3210 			}
3211 			binder_proc_unlock(proc);
3212 		} else {
3213 			mutex_lock(&context->context_mgr_node_lock);
3214 			target_node = context->binder_context_mgr_node;
3215 			if (target_node)
3216 				target_node = binder_get_node_refs_for_txn(
3217 						target_node, &target_proc,
3218 						&return_error);
3219 			else
3220 				return_error = BR_DEAD_REPLY;
3221 			mutex_unlock(&context->context_mgr_node_lock);
3222 			if (target_node && target_proc->pid == proc->pid) {
3223 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3224 						  proc->pid, thread->pid);
3225 				return_error = BR_FAILED_REPLY;
3226 				return_error_param = -EINVAL;
3227 				return_error_line = __LINE__;
3228 				goto err_invalid_target_handle;
3229 			}
3230 		}
3231 		if (!target_node) {
3232 			binder_txn_error("%d:%d cannot find target node\n",
3233 					 proc->pid, thread->pid);
3234 			/* return_error is set above */
3235 			return_error_param = -EINVAL;
3236 			return_error_line = __LINE__;
3237 			goto err_dead_binder;
3238 		}
3239 		e->to_node = target_node->debug_id;
3240 		if (WARN_ON(proc == target_proc)) {
3241 			binder_txn_error("%d:%d self transactions not allowed\n",
3242 				thread->pid, proc->pid);
3243 			return_error = BR_FAILED_REPLY;
3244 			return_error_param = -EINVAL;
3245 			return_error_line = __LINE__;
3246 			goto err_invalid_target_handle;
3247 		}
3248 		if (security_binder_transaction(proc->cred,
3249 						target_proc->cred) < 0) {
3250 			binder_txn_error("%d:%d transaction credentials failed\n",
3251 				thread->pid, proc->pid);
3252 			return_error = BR_FAILED_REPLY;
3253 			return_error_param = -EPERM;
3254 			return_error_line = __LINE__;
3255 			goto err_invalid_target_handle;
3256 		}
3257 		binder_inner_proc_lock(proc);
3258 
3259 		w = list_first_entry_or_null(&thread->todo,
3260 					     struct binder_work, entry);
3261 		if (!(tr->flags & TF_ONE_WAY) && w &&
3262 		    w->type == BINDER_WORK_TRANSACTION) {
3263 			/*
3264 			 * Do not allow new outgoing transaction from a
3265 			 * thread that has a transaction at the head of
3266 			 * its todo list. Only need to check the head
3267 			 * because binder_select_thread_ilocked picks a
3268 			 * thread from proc->waiting_threads to enqueue
3269 			 * the transaction, and nothing is queued to the
3270 			 * todo list while the thread is on waiting_threads.
3271 			 */
3272 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3273 					  proc->pid, thread->pid);
3274 			binder_inner_proc_unlock(proc);
3275 			return_error = BR_FAILED_REPLY;
3276 			return_error_param = -EPROTO;
3277 			return_error_line = __LINE__;
3278 			goto err_bad_todo_list;
3279 		}
3280 
3281 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3282 			struct binder_transaction *tmp;
3283 
3284 			tmp = thread->transaction_stack;
3285 			if (tmp->to_thread != thread) {
3286 				spin_lock(&tmp->lock);
3287 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3288 					proc->pid, thread->pid, tmp->debug_id,
3289 					tmp->to_proc ? tmp->to_proc->pid : 0,
3290 					tmp->to_thread ?
3291 					tmp->to_thread->pid : 0);
3292 				spin_unlock(&tmp->lock);
3293 				binder_inner_proc_unlock(proc);
3294 				return_error = BR_FAILED_REPLY;
3295 				return_error_param = -EPROTO;
3296 				return_error_line = __LINE__;
3297 				goto err_bad_call_stack;
3298 			}
3299 			while (tmp) {
3300 				struct binder_thread *from;
3301 
3302 				spin_lock(&tmp->lock);
3303 				from = tmp->from;
3304 				if (from && from->proc == target_proc) {
3305 					atomic_inc(&from->tmp_ref);
3306 					target_thread = from;
3307 					spin_unlock(&tmp->lock);
3308 					break;
3309 				}
3310 				spin_unlock(&tmp->lock);
3311 				tmp = tmp->from_parent;
3312 			}
3313 		}
3314 		binder_inner_proc_unlock(proc);
3315 	}
3316 
3317 	t->to_proc = target_proc;
3318 	t->to_thread = target_thread;
3319 	if (target_thread)
3320 		e->to_thread = target_thread->pid;
3321 	e->to_proc = target_proc->pid;
3322 
3323 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3324 	if (tcomplete == NULL) {
3325 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3326 			thread->pid, proc->pid);
3327 		return_error = BR_FAILED_REPLY;
3328 		return_error_param = -ENOMEM;
3329 		return_error_line = __LINE__;
3330 		goto err_alloc_tcomplete_failed;
3331 	}
3332 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3333 
3334 	if (reply)
3335 		binder_debug(BINDER_DEBUG_TRANSACTION,
3336 			     "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3337 			     proc->pid, thread->pid, t->debug_id,
3338 			     target_proc->pid, target_thread->pid,
3339 			     (u64)tr->data_size, (u64)tr->offsets_size,
3340 			     (u64)extra_buffers_size);
3341 	else
3342 		binder_debug(BINDER_DEBUG_TRANSACTION,
3343 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3344 			     proc->pid, thread->pid, t->debug_id,
3345 			     target_proc->pid, target_node->debug_id,
3346 			     (u64)tr->data_size, (u64)tr->offsets_size,
3347 			     (u64)extra_buffers_size);
3348 
3349 	if (target_node && target_node->txn_security_ctx) {
3350 		u32 secid;
3351 		size_t added_size;
3352 
3353 		security_cred_getsecid(proc->cred, &secid);
3354 		ret = security_secid_to_secctx(secid, &lsmctx);
3355 		if (ret < 0) {
3356 			binder_txn_error("%d:%d failed to get security context\n",
3357 				thread->pid, proc->pid);
3358 			return_error = BR_FAILED_REPLY;
3359 			return_error_param = ret;
3360 			return_error_line = __LINE__;
3361 			goto err_get_secctx_failed;
3362 		}
3363 		added_size = ALIGN(lsmctx.len, sizeof(u64));
3364 		extra_buffers_size += added_size;
3365 		if (extra_buffers_size < added_size) {
3366 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3367 				thread->pid, proc->pid);
3368 			return_error = BR_FAILED_REPLY;
3369 			return_error_param = -EINVAL;
3370 			return_error_line = __LINE__;
3371 			goto err_bad_extra_size;
3372 		}
3373 	}
3374 
3375 	trace_binder_transaction(reply, t, target_node);
3376 
3377 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3378 		tr->offsets_size, extra_buffers_size,
3379 		!reply && (t->flags & TF_ONE_WAY));
3380 	if (IS_ERR(t->buffer)) {
3381 		char *s;
3382 
3383 		ret = PTR_ERR(t->buffer);
3384 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3385 			: (ret == -ENOSPC) ? ": no space left"
3386 			: (ret == -ENOMEM) ? ": memory allocation failed"
3387 			: "";
3388 		binder_txn_error("cannot allocate buffer%s", s);
3389 
3390 		return_error_param = PTR_ERR(t->buffer);
3391 		return_error = return_error_param == -ESRCH ?
3392 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3393 		return_error_line = __LINE__;
3394 		t->buffer = NULL;
3395 		goto err_binder_alloc_buf_failed;
3396 	}
3397 	if (lsmctx.context) {
3398 		int err;
3399 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3400 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3401 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3402 				    ALIGN(lsmctx.len, sizeof(u64));
3403 
3404 		t->security_ctx = t->buffer->user_data + buf_offset;
3405 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3406 						  t->buffer, buf_offset,
3407 						  lsmctx.context, lsmctx.len);
3408 		if (err) {
3409 			t->security_ctx = 0;
3410 			WARN_ON(1);
3411 		}
3412 		security_release_secctx(&lsmctx);
3413 		lsmctx.context = NULL;
3414 	}
3415 	t->buffer->debug_id = t->debug_id;
3416 	t->buffer->transaction = t;
3417 	t->buffer->target_node = target_node;
3418 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3419 	trace_binder_transaction_alloc_buf(t->buffer);
3420 
3421 	if (binder_alloc_copy_user_to_buffer(
3422 				&target_proc->alloc,
3423 				t->buffer,
3424 				ALIGN(tr->data_size, sizeof(void *)),
3425 				(const void __user *)
3426 					(uintptr_t)tr->data.ptr.offsets,
3427 				tr->offsets_size)) {
3428 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3429 				proc->pid, thread->pid);
3430 		return_error = BR_FAILED_REPLY;
3431 		return_error_param = -EFAULT;
3432 		return_error_line = __LINE__;
3433 		goto err_copy_data_failed;
3434 	}
3435 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3436 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3437 				proc->pid, thread->pid, (u64)tr->offsets_size);
3438 		return_error = BR_FAILED_REPLY;
3439 		return_error_param = -EINVAL;
3440 		return_error_line = __LINE__;
3441 		goto err_bad_offset;
3442 	}
3443 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3444 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3445 				  proc->pid, thread->pid,
3446 				  (u64)extra_buffers_size);
3447 		return_error = BR_FAILED_REPLY;
3448 		return_error_param = -EINVAL;
3449 		return_error_line = __LINE__;
3450 		goto err_bad_offset;
3451 	}
3452 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3453 	buffer_offset = off_start_offset;
3454 	off_end_offset = off_start_offset + tr->offsets_size;
3455 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3456 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3457 		ALIGN(lsmctx.len, sizeof(u64));
3458 	off_min = 0;
3459 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3460 	     buffer_offset += sizeof(binder_size_t)) {
3461 		struct binder_object_header *hdr;
3462 		size_t object_size;
3463 		struct binder_object object;
3464 		binder_size_t object_offset;
3465 		binder_size_t copy_size;
3466 
3467 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3468 						  &object_offset,
3469 						  t->buffer,
3470 						  buffer_offset,
3471 						  sizeof(object_offset))) {
3472 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3473 				thread->pid, proc->pid);
3474 			return_error = BR_FAILED_REPLY;
3475 			return_error_param = -EINVAL;
3476 			return_error_line = __LINE__;
3477 			goto err_bad_offset;
3478 		}
3479 
3480 		/*
3481 		 * Copy the source user buffer up to the next object
3482 		 * that will be processed.
3483 		 */
3484 		copy_size = object_offset - user_offset;
3485 		if (copy_size && (user_offset > object_offset ||
3486 				object_offset > tr->data_size ||
3487 				binder_alloc_copy_user_to_buffer(
3488 					&target_proc->alloc,
3489 					t->buffer, user_offset,
3490 					user_buffer + user_offset,
3491 					copy_size))) {
3492 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3493 					proc->pid, thread->pid);
3494 			return_error = BR_FAILED_REPLY;
3495 			return_error_param = -EFAULT;
3496 			return_error_line = __LINE__;
3497 			goto err_copy_data_failed;
3498 		}
3499 		object_size = binder_get_object(target_proc, user_buffer,
3500 				t->buffer, object_offset, &object);
3501 		if (object_size == 0 || object_offset < off_min) {
3502 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3503 					  proc->pid, thread->pid,
3504 					  (u64)object_offset,
3505 					  (u64)off_min,
3506 					  (u64)t->buffer->data_size);
3507 			return_error = BR_FAILED_REPLY;
3508 			return_error_param = -EINVAL;
3509 			return_error_line = __LINE__;
3510 			goto err_bad_offset;
3511 		}
3512 		/*
3513 		 * Set offset to the next buffer fragment to be
3514 		 * copied
3515 		 */
3516 		user_offset = object_offset + object_size;
3517 
3518 		hdr = &object.hdr;
3519 		off_min = object_offset + object_size;
3520 		switch (hdr->type) {
3521 		case BINDER_TYPE_BINDER:
3522 		case BINDER_TYPE_WEAK_BINDER: {
3523 			struct flat_binder_object *fp;
3524 
3525 			fp = to_flat_binder_object(hdr);
3526 			ret = binder_translate_binder(fp, t, thread);
3527 
3528 			if (ret < 0 ||
3529 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3530 							t->buffer,
3531 							object_offset,
3532 							fp, sizeof(*fp))) {
3533 				binder_txn_error("%d:%d translate binder failed\n",
3534 					thread->pid, proc->pid);
3535 				return_error = BR_FAILED_REPLY;
3536 				return_error_param = ret;
3537 				return_error_line = __LINE__;
3538 				goto err_translate_failed;
3539 			}
3540 		} break;
3541 		case BINDER_TYPE_HANDLE:
3542 		case BINDER_TYPE_WEAK_HANDLE: {
3543 			struct flat_binder_object *fp;
3544 
3545 			fp = to_flat_binder_object(hdr);
3546 			ret = binder_translate_handle(fp, t, thread);
3547 			if (ret < 0 ||
3548 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3549 							t->buffer,
3550 							object_offset,
3551 							fp, sizeof(*fp))) {
3552 				binder_txn_error("%d:%d translate handle failed\n",
3553 					thread->pid, proc->pid);
3554 				return_error = BR_FAILED_REPLY;
3555 				return_error_param = ret;
3556 				return_error_line = __LINE__;
3557 				goto err_translate_failed;
3558 			}
3559 		} break;
3560 
3561 		case BINDER_TYPE_FD: {
3562 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3563 			binder_size_t fd_offset = object_offset +
3564 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3565 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3566 						      thread, in_reply_to);
3567 
3568 			fp->pad_binder = 0;
3569 			if (ret < 0 ||
3570 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3571 							t->buffer,
3572 							object_offset,
3573 							fp, sizeof(*fp))) {
3574 				binder_txn_error("%d:%d translate fd failed\n",
3575 					thread->pid, proc->pid);
3576 				return_error = BR_FAILED_REPLY;
3577 				return_error_param = ret;
3578 				return_error_line = __LINE__;
3579 				goto err_translate_failed;
3580 			}
3581 		} break;
3582 		case BINDER_TYPE_FDA: {
3583 			struct binder_object ptr_object;
3584 			binder_size_t parent_offset;
3585 			struct binder_object user_object;
3586 			size_t user_parent_size;
3587 			struct binder_fd_array_object *fda =
3588 				to_binder_fd_array_object(hdr);
3589 			size_t num_valid = (buffer_offset - off_start_offset) /
3590 						sizeof(binder_size_t);
3591 			struct binder_buffer_object *parent =
3592 				binder_validate_ptr(target_proc, t->buffer,
3593 						    &ptr_object, fda->parent,
3594 						    off_start_offset,
3595 						    &parent_offset,
3596 						    num_valid);
3597 			if (!parent) {
3598 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3599 						  proc->pid, thread->pid);
3600 				return_error = BR_FAILED_REPLY;
3601 				return_error_param = -EINVAL;
3602 				return_error_line = __LINE__;
3603 				goto err_bad_parent;
3604 			}
3605 			if (!binder_validate_fixup(target_proc, t->buffer,
3606 						   off_start_offset,
3607 						   parent_offset,
3608 						   fda->parent_offset,
3609 						   last_fixup_obj_off,
3610 						   last_fixup_min_off)) {
3611 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3612 						  proc->pid, thread->pid);
3613 				return_error = BR_FAILED_REPLY;
3614 				return_error_param = -EINVAL;
3615 				return_error_line = __LINE__;
3616 				goto err_bad_parent;
3617 			}
3618 			/*
3619 			 * We need to read the user version of the parent
3620 			 * object to get the original user offset
3621 			 */
3622 			user_parent_size =
3623 				binder_get_object(proc, user_buffer, t->buffer,
3624 						  parent_offset, &user_object);
3625 			if (user_parent_size != sizeof(user_object.bbo)) {
3626 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3627 						  proc->pid, thread->pid,
3628 						  user_parent_size,
3629 						  sizeof(user_object.bbo));
3630 				return_error = BR_FAILED_REPLY;
3631 				return_error_param = -EINVAL;
3632 				return_error_line = __LINE__;
3633 				goto err_bad_parent;
3634 			}
3635 			ret = binder_translate_fd_array(&pf_head, fda,
3636 							user_buffer, parent,
3637 							&user_object.bbo, t,
3638 							thread, in_reply_to);
3639 			if (!ret)
3640 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3641 								  t->buffer,
3642 								  object_offset,
3643 								  fda, sizeof(*fda));
3644 			if (ret) {
3645 				binder_txn_error("%d:%d translate fd array failed\n",
3646 					thread->pid, proc->pid);
3647 				return_error = BR_FAILED_REPLY;
3648 				return_error_param = ret > 0 ? -EINVAL : ret;
3649 				return_error_line = __LINE__;
3650 				goto err_translate_failed;
3651 			}
3652 			last_fixup_obj_off = parent_offset;
3653 			last_fixup_min_off =
3654 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3655 		} break;
3656 		case BINDER_TYPE_PTR: {
3657 			struct binder_buffer_object *bp =
3658 				to_binder_buffer_object(hdr);
3659 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3660 			size_t num_valid;
3661 
3662 			if (bp->length > buf_left) {
3663 				binder_user_error("%d:%d got transaction with too large buffer\n",
3664 						  proc->pid, thread->pid);
3665 				return_error = BR_FAILED_REPLY;
3666 				return_error_param = -EINVAL;
3667 				return_error_line = __LINE__;
3668 				goto err_bad_offset;
3669 			}
3670 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3671 				(const void __user *)(uintptr_t)bp->buffer,
3672 				bp->length);
3673 			if (ret) {
3674 				binder_txn_error("%d:%d deferred copy failed\n",
3675 					thread->pid, proc->pid);
3676 				return_error = BR_FAILED_REPLY;
3677 				return_error_param = ret;
3678 				return_error_line = __LINE__;
3679 				goto err_translate_failed;
3680 			}
3681 			/* Fixup buffer pointer to target proc address space */
3682 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3683 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3684 
3685 			num_valid = (buffer_offset - off_start_offset) /
3686 					sizeof(binder_size_t);
3687 			ret = binder_fixup_parent(&pf_head, t,
3688 						  thread, bp,
3689 						  off_start_offset,
3690 						  num_valid,
3691 						  last_fixup_obj_off,
3692 						  last_fixup_min_off);
3693 			if (ret < 0 ||
3694 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3695 							t->buffer,
3696 							object_offset,
3697 							bp, sizeof(*bp))) {
3698 				binder_txn_error("%d:%d failed to fixup parent\n",
3699 					thread->pid, proc->pid);
3700 				return_error = BR_FAILED_REPLY;
3701 				return_error_param = ret;
3702 				return_error_line = __LINE__;
3703 				goto err_translate_failed;
3704 			}
3705 			last_fixup_obj_off = object_offset;
3706 			last_fixup_min_off = 0;
3707 		} break;
3708 		default:
3709 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3710 				proc->pid, thread->pid, hdr->type);
3711 			return_error = BR_FAILED_REPLY;
3712 			return_error_param = -EINVAL;
3713 			return_error_line = __LINE__;
3714 			goto err_bad_object_type;
3715 		}
3716 	}
3717 	/* Done processing objects, copy the rest of the buffer */
3718 	if (binder_alloc_copy_user_to_buffer(
3719 				&target_proc->alloc,
3720 				t->buffer, user_offset,
3721 				user_buffer + user_offset,
3722 				tr->data_size - user_offset)) {
3723 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3724 				proc->pid, thread->pid);
3725 		return_error = BR_FAILED_REPLY;
3726 		return_error_param = -EFAULT;
3727 		return_error_line = __LINE__;
3728 		goto err_copy_data_failed;
3729 	}
3730 
3731 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3732 					    &sgc_head, &pf_head);
3733 	if (ret) {
3734 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3735 				  proc->pid, thread->pid);
3736 		return_error = BR_FAILED_REPLY;
3737 		return_error_param = ret;
3738 		return_error_line = __LINE__;
3739 		goto err_copy_data_failed;
3740 	}
3741 	if (t->buffer->oneway_spam_suspect) {
3742 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3743 		binder_netlink_report(proc, t, tr->data_size,
3744 				      BR_ONEWAY_SPAM_SUSPECT);
3745 	} else {
3746 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3747 	}
3748 
3749 	if (reply) {
3750 		binder_enqueue_thread_work(thread, tcomplete);
3751 		binder_inner_proc_lock(target_proc);
3752 		if (target_thread->is_dead) {
3753 			return_error = BR_DEAD_REPLY;
3754 			binder_inner_proc_unlock(target_proc);
3755 			goto err_dead_proc_or_thread;
3756 		}
3757 		BUG_ON(t->buffer->async_transaction != 0);
3758 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3759 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3760 		target_proc->outstanding_txns++;
3761 		binder_inner_proc_unlock(target_proc);
3762 		wake_up_interruptible_sync(&target_thread->wait);
3763 		binder_free_transaction(in_reply_to);
3764 	} else if (!(t->flags & TF_ONE_WAY)) {
3765 		BUG_ON(t->buffer->async_transaction != 0);
3766 		binder_inner_proc_lock(proc);
3767 		/*
3768 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3769 		 * userspace immediately; this allows the target process to
3770 		 * immediately start processing this transaction, reducing
3771 		 * latency. We will then return the TRANSACTION_COMPLETE when
3772 		 * the target replies (or there is an error).
3773 		 */
3774 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3775 		t->from_parent = thread->transaction_stack;
3776 		thread->transaction_stack = t;
3777 		binder_inner_proc_unlock(proc);
3778 		return_error = binder_proc_transaction(t,
3779 				target_proc, target_thread);
3780 		if (return_error) {
3781 			binder_inner_proc_lock(proc);
3782 			binder_pop_transaction_ilocked(thread, t);
3783 			binder_inner_proc_unlock(proc);
3784 			goto err_dead_proc_or_thread;
3785 		}
3786 	} else {
3787 		/*
3788 		 * Make a transaction copy. It is not safe to access 't' after
3789 		 * binder_proc_transaction() reported a pending frozen. The
3790 		 * target could thaw and consume the transaction at any point.
3791 		 * Instead, use a safe 't_copy' for binder_netlink_report().
3792 		 */
3793 		struct binder_transaction t_copy = *t;
3794 
3795 		BUG_ON(target_node == NULL);
3796 		BUG_ON(t->buffer->async_transaction != 1);
3797 		return_error = binder_proc_transaction(t, target_proc, NULL);
3798 		/*
3799 		 * Let the caller know when async transaction reaches a frozen
3800 		 * process and is put in a pending queue, waiting for the target
3801 		 * process to be unfrozen.
3802 		 */
3803 		if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
3804 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3805 			binder_netlink_report(proc, &t_copy, tr->data_size,
3806 					      return_error);
3807 		}
3808 		binder_enqueue_thread_work(thread, tcomplete);
3809 		if (return_error &&
3810 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3811 			goto err_dead_proc_or_thread;
3812 	}
3813 	if (target_thread)
3814 		binder_thread_dec_tmpref(target_thread);
3815 	binder_proc_dec_tmpref(target_proc);
3816 	if (target_node)
3817 		binder_dec_node_tmpref(target_node);
3818 	/*
3819 	 * write barrier to synchronize with initialization
3820 	 * of log entry
3821 	 */
3822 	smp_wmb();
3823 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3824 	return;
3825 
3826 err_dead_proc_or_thread:
3827 	binder_txn_error("%d:%d %s process or thread\n",
3828 			 proc->pid, thread->pid,
3829 			 return_error == BR_FROZEN_REPLY ? "frozen" : "dead");
3830 	return_error_line = __LINE__;
3831 	binder_dequeue_work(proc, tcomplete);
3832 err_translate_failed:
3833 err_bad_object_type:
3834 err_bad_offset:
3835 err_bad_parent:
3836 err_copy_data_failed:
3837 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3838 	binder_free_txn_fixups(t);
3839 	trace_binder_transaction_failed_buffer_release(t->buffer);
3840 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3841 					  buffer_offset, true);
3842 	if (target_node)
3843 		binder_dec_node_tmpref(target_node);
3844 	target_node = NULL;
3845 	t->buffer->transaction = NULL;
3846 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3847 err_binder_alloc_buf_failed:
3848 err_bad_extra_size:
3849 	if (lsmctx.context)
3850 		security_release_secctx(&lsmctx);
3851 err_get_secctx_failed:
3852 	kfree(tcomplete);
3853 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3854 err_alloc_tcomplete_failed:
3855 	if (trace_binder_txn_latency_free_enabled())
3856 		binder_txn_latency_free(t);
3857 err_bad_todo_list:
3858 err_bad_call_stack:
3859 err_empty_call_stack:
3860 err_dead_binder:
3861 err_invalid_target_handle:
3862 	if (target_node) {
3863 		binder_dec_node(target_node, 1, 0);
3864 		binder_dec_node_tmpref(target_node);
3865 	}
3866 
3867 	binder_netlink_report(proc, t, tr->data_size, return_error);
3868 	kfree(t);
3869 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3870 err_alloc_t_failed:
3871 
3872 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3873 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3874 		     proc->pid, thread->pid, reply ? "reply" :
3875 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3876 		     target_proc ? target_proc->pid : 0,
3877 		     target_thread ? target_thread->pid : 0,
3878 		     t_debug_id, return_error, return_error_param,
3879 		     tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3880 		     return_error_line);
3881 
3882 	if (target_thread)
3883 		binder_thread_dec_tmpref(target_thread);
3884 	if (target_proc)
3885 		binder_proc_dec_tmpref(target_proc);
3886 
3887 	{
3888 		struct binder_transaction_log_entry *fe;
3889 
3890 		e->return_error = return_error;
3891 		e->return_error_param = return_error_param;
3892 		e->return_error_line = return_error_line;
3893 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3894 		*fe = *e;
3895 		/*
3896 		 * write barrier to synchronize with initialization
3897 		 * of log entry
3898 		 */
3899 		smp_wmb();
3900 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3901 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3902 	}
3903 
3904 	BUG_ON(thread->return_error.cmd != BR_OK);
3905 	if (in_reply_to) {
3906 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3907 				return_error, return_error_param);
3908 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3909 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3910 		binder_send_failed_reply(in_reply_to, return_error);
3911 	} else {
3912 		binder_inner_proc_lock(proc);
3913 		binder_set_extended_error(&thread->ee, t_debug_id,
3914 				return_error, return_error_param);
3915 		binder_inner_proc_unlock(proc);
3916 		thread->return_error.cmd = return_error;
3917 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3918 	}
3919 }
3920 
3921 static int
3922 binder_request_freeze_notification(struct binder_proc *proc,
3923 				   struct binder_thread *thread,
3924 				   struct binder_handle_cookie *handle_cookie)
3925 {
3926 	struct binder_ref_freeze *freeze;
3927 	struct binder_ref *ref;
3928 
3929 	freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3930 	if (!freeze)
3931 		return -ENOMEM;
3932 	binder_proc_lock(proc);
3933 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3934 	if (!ref) {
3935 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3936 				  proc->pid, thread->pid, handle_cookie->handle);
3937 		binder_proc_unlock(proc);
3938 		kfree(freeze);
3939 		return -EINVAL;
3940 	}
3941 
3942 	binder_node_lock(ref->node);
3943 	if (ref->freeze) {
3944 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3945 				  proc->pid, thread->pid);
3946 		binder_node_unlock(ref->node);
3947 		binder_proc_unlock(proc);
3948 		kfree(freeze);
3949 		return -EINVAL;
3950 	}
3951 
3952 	binder_stats_created(BINDER_STAT_FREEZE);
3953 	INIT_LIST_HEAD(&freeze->work.entry);
3954 	freeze->cookie = handle_cookie->cookie;
3955 	freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3956 	ref->freeze = freeze;
3957 
3958 	if (ref->node->proc) {
3959 		binder_inner_proc_lock(ref->node->proc);
3960 		freeze->is_frozen = ref->node->proc->is_frozen;
3961 		binder_inner_proc_unlock(ref->node->proc);
3962 
3963 		binder_inner_proc_lock(proc);
3964 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3965 		binder_wakeup_proc_ilocked(proc);
3966 		binder_inner_proc_unlock(proc);
3967 	}
3968 
3969 	binder_node_unlock(ref->node);
3970 	binder_proc_unlock(proc);
3971 	return 0;
3972 }
3973 
3974 static int
3975 binder_clear_freeze_notification(struct binder_proc *proc,
3976 				 struct binder_thread *thread,
3977 				 struct binder_handle_cookie *handle_cookie)
3978 {
3979 	struct binder_ref_freeze *freeze;
3980 	struct binder_ref *ref;
3981 
3982 	binder_proc_lock(proc);
3983 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3984 	if (!ref) {
3985 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3986 				  proc->pid, thread->pid, handle_cookie->handle);
3987 		binder_proc_unlock(proc);
3988 		return -EINVAL;
3989 	}
3990 
3991 	binder_node_lock(ref->node);
3992 
3993 	if (!ref->freeze) {
3994 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3995 				  proc->pid, thread->pid);
3996 		binder_node_unlock(ref->node);
3997 		binder_proc_unlock(proc);
3998 		return -EINVAL;
3999 	}
4000 	freeze = ref->freeze;
4001 	binder_inner_proc_lock(proc);
4002 	if (freeze->cookie != handle_cookie->cookie) {
4003 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
4004 				  proc->pid, thread->pid, (u64)freeze->cookie,
4005 				  (u64)handle_cookie->cookie);
4006 		binder_inner_proc_unlock(proc);
4007 		binder_node_unlock(ref->node);
4008 		binder_proc_unlock(proc);
4009 		return -EINVAL;
4010 	}
4011 	ref->freeze = NULL;
4012 	/*
4013 	 * Take the existing freeze object and overwrite its work type. There are three cases here:
4014 	 * 1. No pending notification. In this case just add the work to the queue.
4015 	 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
4016 	 *    should resend with the new work type.
4017 	 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
4018 	 *    needs to be done here.
4019 	 */
4020 	freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
4021 	if (list_empty(&freeze->work.entry)) {
4022 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4023 		binder_wakeup_proc_ilocked(proc);
4024 	} else if (freeze->sent) {
4025 		freeze->resend = true;
4026 	}
4027 	binder_inner_proc_unlock(proc);
4028 	binder_node_unlock(ref->node);
4029 	binder_proc_unlock(proc);
4030 	return 0;
4031 }
4032 
4033 static int
4034 binder_freeze_notification_done(struct binder_proc *proc,
4035 				struct binder_thread *thread,
4036 				binder_uintptr_t cookie)
4037 {
4038 	struct binder_ref_freeze *freeze = NULL;
4039 	struct binder_work *w;
4040 
4041 	binder_inner_proc_lock(proc);
4042 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
4043 		struct binder_ref_freeze *tmp_freeze =
4044 			container_of(w, struct binder_ref_freeze, work);
4045 
4046 		if (tmp_freeze->cookie == cookie) {
4047 			freeze = tmp_freeze;
4048 			break;
4049 		}
4050 	}
4051 	if (!freeze) {
4052 		binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
4053 				  proc->pid, thread->pid, (u64)cookie);
4054 		binder_inner_proc_unlock(proc);
4055 		return -EINVAL;
4056 	}
4057 	binder_dequeue_work_ilocked(&freeze->work);
4058 	freeze->sent = false;
4059 	if (freeze->resend) {
4060 		freeze->resend = false;
4061 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4062 		binder_wakeup_proc_ilocked(proc);
4063 	}
4064 	binder_inner_proc_unlock(proc);
4065 	return 0;
4066 }
4067 
4068 /**
4069  * binder_free_buf() - free the specified buffer
4070  * @proc:       binder proc that owns buffer
4071  * @thread:     binder thread performing the buffer release
4072  * @buffer:     buffer to be freed
4073  * @is_failure: failed to send transaction
4074  *
4075  * If the buffer is for an async transaction, enqueue the next async
4076  * transaction from the node.
4077  *
4078  * Cleanup the buffer and free it.
4079  */
4080 static void
4081 binder_free_buf(struct binder_proc *proc,
4082 		struct binder_thread *thread,
4083 		struct binder_buffer *buffer, bool is_failure)
4084 {
4085 	binder_inner_proc_lock(proc);
4086 	if (buffer->transaction) {
4087 		buffer->transaction->buffer = NULL;
4088 		buffer->transaction = NULL;
4089 	}
4090 	binder_inner_proc_unlock(proc);
4091 	if (buffer->async_transaction && buffer->target_node) {
4092 		struct binder_node *buf_node;
4093 		struct binder_work *w;
4094 
4095 		buf_node = buffer->target_node;
4096 		binder_node_inner_lock(buf_node);
4097 		BUG_ON(!buf_node->has_async_transaction);
4098 		BUG_ON(buf_node->proc != proc);
4099 		w = binder_dequeue_work_head_ilocked(
4100 				&buf_node->async_todo);
4101 		if (!w) {
4102 			buf_node->has_async_transaction = false;
4103 		} else {
4104 			binder_enqueue_work_ilocked(
4105 					w, &proc->todo);
4106 			binder_wakeup_proc_ilocked(proc);
4107 		}
4108 		binder_node_inner_unlock(buf_node);
4109 	}
4110 	trace_binder_transaction_buffer_release(buffer);
4111 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
4112 	binder_alloc_free_buf(&proc->alloc, buffer);
4113 }
4114 
4115 static int binder_thread_write(struct binder_proc *proc,
4116 			struct binder_thread *thread,
4117 			binder_uintptr_t binder_buffer, size_t size,
4118 			binder_size_t *consumed)
4119 {
4120 	uint32_t cmd;
4121 	struct binder_context *context = proc->context;
4122 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4123 	void __user *ptr = buffer + *consumed;
4124 	void __user *end = buffer + size;
4125 
4126 	while (ptr < end && thread->return_error.cmd == BR_OK) {
4127 		int ret;
4128 
4129 		if (get_user(cmd, (uint32_t __user *)ptr))
4130 			return -EFAULT;
4131 		ptr += sizeof(uint32_t);
4132 		trace_binder_command(cmd);
4133 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4134 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4135 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4136 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4137 		}
4138 		switch (cmd) {
4139 		case BC_INCREFS:
4140 		case BC_ACQUIRE:
4141 		case BC_RELEASE:
4142 		case BC_DECREFS: {
4143 			uint32_t target;
4144 			const char *debug_string;
4145 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4146 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4147 			struct binder_ref_data rdata;
4148 
4149 			if (get_user(target, (uint32_t __user *)ptr))
4150 				return -EFAULT;
4151 
4152 			ptr += sizeof(uint32_t);
4153 			ret = -1;
4154 			if (increment && !target) {
4155 				struct binder_node *ctx_mgr_node;
4156 
4157 				mutex_lock(&context->context_mgr_node_lock);
4158 				ctx_mgr_node = context->binder_context_mgr_node;
4159 				if (ctx_mgr_node) {
4160 					if (ctx_mgr_node->proc == proc) {
4161 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4162 								  proc->pid, thread->pid);
4163 						mutex_unlock(&context->context_mgr_node_lock);
4164 						return -EINVAL;
4165 					}
4166 					ret = binder_inc_ref_for_node(
4167 							proc, ctx_mgr_node,
4168 							strong, NULL, &rdata);
4169 				}
4170 				mutex_unlock(&context->context_mgr_node_lock);
4171 			}
4172 			if (ret)
4173 				ret = binder_update_ref_for_handle(
4174 						proc, target, increment, strong,
4175 						&rdata);
4176 			if (!ret && rdata.desc != target) {
4177 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4178 					proc->pid, thread->pid,
4179 					target, rdata.desc);
4180 			}
4181 			switch (cmd) {
4182 			case BC_INCREFS:
4183 				debug_string = "IncRefs";
4184 				break;
4185 			case BC_ACQUIRE:
4186 				debug_string = "Acquire";
4187 				break;
4188 			case BC_RELEASE:
4189 				debug_string = "Release";
4190 				break;
4191 			case BC_DECREFS:
4192 			default:
4193 				debug_string = "DecRefs";
4194 				break;
4195 			}
4196 			if (ret) {
4197 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4198 					proc->pid, thread->pid, debug_string,
4199 					strong, target, ret);
4200 				break;
4201 			}
4202 			binder_debug(BINDER_DEBUG_USER_REFS,
4203 				     "%d:%d %s ref %d desc %d s %d w %d\n",
4204 				     proc->pid, thread->pid, debug_string,
4205 				     rdata.debug_id, rdata.desc, rdata.strong,
4206 				     rdata.weak);
4207 			break;
4208 		}
4209 		case BC_INCREFS_DONE:
4210 		case BC_ACQUIRE_DONE: {
4211 			binder_uintptr_t node_ptr;
4212 			binder_uintptr_t cookie;
4213 			struct binder_node *node;
4214 			bool free_node;
4215 
4216 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4217 				return -EFAULT;
4218 			ptr += sizeof(binder_uintptr_t);
4219 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4220 				return -EFAULT;
4221 			ptr += sizeof(binder_uintptr_t);
4222 			node = binder_get_node(proc, node_ptr);
4223 			if (node == NULL) {
4224 				binder_user_error("%d:%d %s u%016llx no match\n",
4225 					proc->pid, thread->pid,
4226 					cmd == BC_INCREFS_DONE ?
4227 					"BC_INCREFS_DONE" :
4228 					"BC_ACQUIRE_DONE",
4229 					(u64)node_ptr);
4230 				break;
4231 			}
4232 			if (cookie != node->cookie) {
4233 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4234 					proc->pid, thread->pid,
4235 					cmd == BC_INCREFS_DONE ?
4236 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4237 					(u64)node_ptr, node->debug_id,
4238 					(u64)cookie, (u64)node->cookie);
4239 				binder_put_node(node);
4240 				break;
4241 			}
4242 			binder_node_inner_lock(node);
4243 			if (cmd == BC_ACQUIRE_DONE) {
4244 				if (node->pending_strong_ref == 0) {
4245 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4246 						proc->pid, thread->pid,
4247 						node->debug_id);
4248 					binder_node_inner_unlock(node);
4249 					binder_put_node(node);
4250 					break;
4251 				}
4252 				node->pending_strong_ref = 0;
4253 			} else {
4254 				if (node->pending_weak_ref == 0) {
4255 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4256 						proc->pid, thread->pid,
4257 						node->debug_id);
4258 					binder_node_inner_unlock(node);
4259 					binder_put_node(node);
4260 					break;
4261 				}
4262 				node->pending_weak_ref = 0;
4263 			}
4264 			free_node = binder_dec_node_nilocked(node,
4265 					cmd == BC_ACQUIRE_DONE, 0);
4266 			WARN_ON(free_node);
4267 			binder_debug(BINDER_DEBUG_USER_REFS,
4268 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4269 				     proc->pid, thread->pid,
4270 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4271 				     node->debug_id, node->local_strong_refs,
4272 				     node->local_weak_refs, node->tmp_refs);
4273 			binder_node_inner_unlock(node);
4274 			binder_put_node(node);
4275 			break;
4276 		}
4277 		case BC_ATTEMPT_ACQUIRE:
4278 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4279 			return -EINVAL;
4280 		case BC_ACQUIRE_RESULT:
4281 			pr_err("BC_ACQUIRE_RESULT not supported\n");
4282 			return -EINVAL;
4283 
4284 		case BC_FREE_BUFFER: {
4285 			binder_uintptr_t data_ptr;
4286 			struct binder_buffer *buffer;
4287 
4288 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4289 				return -EFAULT;
4290 			ptr += sizeof(binder_uintptr_t);
4291 
4292 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4293 							      data_ptr);
4294 			if (IS_ERR_OR_NULL(buffer)) {
4295 				if (PTR_ERR(buffer) == -EPERM) {
4296 					binder_user_error(
4297 						"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4298 						proc->pid, thread->pid,
4299 						(unsigned long)data_ptr - proc->alloc.vm_start);
4300 				} else {
4301 					binder_user_error(
4302 						"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4303 						proc->pid, thread->pid,
4304 						(unsigned long)data_ptr - proc->alloc.vm_start);
4305 				}
4306 				break;
4307 			}
4308 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4309 				     "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4310 				     proc->pid, thread->pid,
4311 				     (unsigned long)data_ptr - proc->alloc.vm_start,
4312 				     buffer->debug_id,
4313 				     buffer->transaction ? "active" : "finished");
4314 			binder_free_buf(proc, thread, buffer, false);
4315 			break;
4316 		}
4317 
4318 		case BC_TRANSACTION_SG:
4319 		case BC_REPLY_SG: {
4320 			struct binder_transaction_data_sg tr;
4321 
4322 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4323 				return -EFAULT;
4324 			ptr += sizeof(tr);
4325 			binder_transaction(proc, thread, &tr.transaction_data,
4326 					   cmd == BC_REPLY_SG, tr.buffers_size);
4327 			break;
4328 		}
4329 		case BC_TRANSACTION:
4330 		case BC_REPLY: {
4331 			struct binder_transaction_data tr;
4332 
4333 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4334 				return -EFAULT;
4335 			ptr += sizeof(tr);
4336 			binder_transaction(proc, thread, &tr,
4337 					   cmd == BC_REPLY, 0);
4338 			break;
4339 		}
4340 
4341 		case BC_REGISTER_LOOPER:
4342 			binder_debug(BINDER_DEBUG_THREADS,
4343 				     "%d:%d BC_REGISTER_LOOPER\n",
4344 				     proc->pid, thread->pid);
4345 			binder_inner_proc_lock(proc);
4346 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4347 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4348 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4349 					proc->pid, thread->pid);
4350 			} else if (proc->requested_threads == 0) {
4351 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4352 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4353 					proc->pid, thread->pid);
4354 			} else {
4355 				proc->requested_threads--;
4356 				proc->requested_threads_started++;
4357 			}
4358 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4359 			binder_inner_proc_unlock(proc);
4360 			break;
4361 		case BC_ENTER_LOOPER:
4362 			binder_debug(BINDER_DEBUG_THREADS,
4363 				     "%d:%d BC_ENTER_LOOPER\n",
4364 				     proc->pid, thread->pid);
4365 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4366 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4367 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4368 					proc->pid, thread->pid);
4369 			}
4370 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4371 			break;
4372 		case BC_EXIT_LOOPER:
4373 			binder_debug(BINDER_DEBUG_THREADS,
4374 				     "%d:%d BC_EXIT_LOOPER\n",
4375 				     proc->pid, thread->pid);
4376 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4377 			break;
4378 
4379 		case BC_REQUEST_DEATH_NOTIFICATION:
4380 		case BC_CLEAR_DEATH_NOTIFICATION: {
4381 			uint32_t target;
4382 			binder_uintptr_t cookie;
4383 			struct binder_ref *ref;
4384 			struct binder_ref_death *death = NULL;
4385 
4386 			if (get_user(target, (uint32_t __user *)ptr))
4387 				return -EFAULT;
4388 			ptr += sizeof(uint32_t);
4389 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4390 				return -EFAULT;
4391 			ptr += sizeof(binder_uintptr_t);
4392 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4393 				/*
4394 				 * Allocate memory for death notification
4395 				 * before taking lock
4396 				 */
4397 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4398 				if (death == NULL) {
4399 					WARN_ON(thread->return_error.cmd !=
4400 						BR_OK);
4401 					thread->return_error.cmd = BR_ERROR;
4402 					binder_enqueue_thread_work(
4403 						thread,
4404 						&thread->return_error.work);
4405 					binder_debug(
4406 						BINDER_DEBUG_FAILED_TRANSACTION,
4407 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4408 						proc->pid, thread->pid);
4409 					break;
4410 				}
4411 			}
4412 			binder_proc_lock(proc);
4413 			ref = binder_get_ref_olocked(proc, target, false);
4414 			if (ref == NULL) {
4415 				binder_user_error("%d:%d %s invalid ref %d\n",
4416 					proc->pid, thread->pid,
4417 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4418 					"BC_REQUEST_DEATH_NOTIFICATION" :
4419 					"BC_CLEAR_DEATH_NOTIFICATION",
4420 					target);
4421 				binder_proc_unlock(proc);
4422 				kfree(death);
4423 				break;
4424 			}
4425 
4426 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4427 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4428 				     proc->pid, thread->pid,
4429 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4430 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4431 				     "BC_CLEAR_DEATH_NOTIFICATION",
4432 				     (u64)cookie, ref->data.debug_id,
4433 				     ref->data.desc, ref->data.strong,
4434 				     ref->data.weak, ref->node->debug_id);
4435 
4436 			binder_node_lock(ref->node);
4437 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4438 				if (ref->death) {
4439 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4440 						proc->pid, thread->pid);
4441 					binder_node_unlock(ref->node);
4442 					binder_proc_unlock(proc);
4443 					kfree(death);
4444 					break;
4445 				}
4446 				binder_stats_created(BINDER_STAT_DEATH);
4447 				INIT_LIST_HEAD(&death->work.entry);
4448 				death->cookie = cookie;
4449 				ref->death = death;
4450 				if (ref->node->proc == NULL) {
4451 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4452 
4453 					binder_inner_proc_lock(proc);
4454 					binder_enqueue_work_ilocked(
4455 						&ref->death->work, &proc->todo);
4456 					binder_wakeup_proc_ilocked(proc);
4457 					binder_inner_proc_unlock(proc);
4458 				}
4459 			} else {
4460 				if (ref->death == NULL) {
4461 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4462 						proc->pid, thread->pid);
4463 					binder_node_unlock(ref->node);
4464 					binder_proc_unlock(proc);
4465 					break;
4466 				}
4467 				death = ref->death;
4468 				if (death->cookie != cookie) {
4469 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4470 						proc->pid, thread->pid,
4471 						(u64)death->cookie,
4472 						(u64)cookie);
4473 					binder_node_unlock(ref->node);
4474 					binder_proc_unlock(proc);
4475 					break;
4476 				}
4477 				ref->death = NULL;
4478 				binder_inner_proc_lock(proc);
4479 				if (list_empty(&death->work.entry)) {
4480 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4481 					if (thread->looper &
4482 					    (BINDER_LOOPER_STATE_REGISTERED |
4483 					     BINDER_LOOPER_STATE_ENTERED))
4484 						binder_enqueue_thread_work_ilocked(
4485 								thread,
4486 								&death->work);
4487 					else {
4488 						binder_enqueue_work_ilocked(
4489 								&death->work,
4490 								&proc->todo);
4491 						binder_wakeup_proc_ilocked(
4492 								proc);
4493 					}
4494 				} else {
4495 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4496 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4497 				}
4498 				binder_inner_proc_unlock(proc);
4499 			}
4500 			binder_node_unlock(ref->node);
4501 			binder_proc_unlock(proc);
4502 		} break;
4503 		case BC_DEAD_BINDER_DONE: {
4504 			struct binder_work *w;
4505 			binder_uintptr_t cookie;
4506 			struct binder_ref_death *death = NULL;
4507 
4508 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4509 				return -EFAULT;
4510 
4511 			ptr += sizeof(cookie);
4512 			binder_inner_proc_lock(proc);
4513 			list_for_each_entry(w, &proc->delivered_death,
4514 					    entry) {
4515 				struct binder_ref_death *tmp_death =
4516 					container_of(w,
4517 						     struct binder_ref_death,
4518 						     work);
4519 
4520 				if (tmp_death->cookie == cookie) {
4521 					death = tmp_death;
4522 					break;
4523 				}
4524 			}
4525 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4526 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4527 				     proc->pid, thread->pid, (u64)cookie,
4528 				     death);
4529 			if (death == NULL) {
4530 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4531 					proc->pid, thread->pid, (u64)cookie);
4532 				binder_inner_proc_unlock(proc);
4533 				break;
4534 			}
4535 			binder_dequeue_work_ilocked(&death->work);
4536 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4537 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4538 				if (thread->looper &
4539 					(BINDER_LOOPER_STATE_REGISTERED |
4540 					 BINDER_LOOPER_STATE_ENTERED))
4541 					binder_enqueue_thread_work_ilocked(
4542 						thread, &death->work);
4543 				else {
4544 					binder_enqueue_work_ilocked(
4545 							&death->work,
4546 							&proc->todo);
4547 					binder_wakeup_proc_ilocked(proc);
4548 				}
4549 			}
4550 			binder_inner_proc_unlock(proc);
4551 		} break;
4552 
4553 		case BC_REQUEST_FREEZE_NOTIFICATION: {
4554 			struct binder_handle_cookie handle_cookie;
4555 			int error;
4556 
4557 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4558 				return -EFAULT;
4559 			ptr += sizeof(handle_cookie);
4560 			error = binder_request_freeze_notification(proc, thread,
4561 								   &handle_cookie);
4562 			if (error)
4563 				return error;
4564 		} break;
4565 
4566 		case BC_CLEAR_FREEZE_NOTIFICATION: {
4567 			struct binder_handle_cookie handle_cookie;
4568 			int error;
4569 
4570 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4571 				return -EFAULT;
4572 			ptr += sizeof(handle_cookie);
4573 			error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4574 			if (error)
4575 				return error;
4576 		} break;
4577 
4578 		case BC_FREEZE_NOTIFICATION_DONE: {
4579 			binder_uintptr_t cookie;
4580 			int error;
4581 
4582 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4583 				return -EFAULT;
4584 
4585 			ptr += sizeof(cookie);
4586 			error = binder_freeze_notification_done(proc, thread, cookie);
4587 			if (error)
4588 				return error;
4589 		} break;
4590 
4591 		default:
4592 			pr_err("%d:%d unknown command %u\n",
4593 			       proc->pid, thread->pid, cmd);
4594 			return -EINVAL;
4595 		}
4596 		*consumed = ptr - buffer;
4597 	}
4598 	return 0;
4599 }
4600 
4601 static void binder_stat_br(struct binder_proc *proc,
4602 			   struct binder_thread *thread, uint32_t cmd)
4603 {
4604 	trace_binder_return(cmd);
4605 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4606 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4607 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4608 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4609 	}
4610 }
4611 
4612 static int binder_put_node_cmd(struct binder_proc *proc,
4613 			       struct binder_thread *thread,
4614 			       void __user **ptrp,
4615 			       binder_uintptr_t node_ptr,
4616 			       binder_uintptr_t node_cookie,
4617 			       int node_debug_id,
4618 			       uint32_t cmd, const char *cmd_name)
4619 {
4620 	void __user *ptr = *ptrp;
4621 
4622 	if (put_user(cmd, (uint32_t __user *)ptr))
4623 		return -EFAULT;
4624 	ptr += sizeof(uint32_t);
4625 
4626 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4627 		return -EFAULT;
4628 	ptr += sizeof(binder_uintptr_t);
4629 
4630 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4631 		return -EFAULT;
4632 	ptr += sizeof(binder_uintptr_t);
4633 
4634 	binder_stat_br(proc, thread, cmd);
4635 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4636 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4637 		     (u64)node_ptr, (u64)node_cookie);
4638 
4639 	*ptrp = ptr;
4640 	return 0;
4641 }
4642 
4643 static int binder_wait_for_work(struct binder_thread *thread,
4644 				bool do_proc_work)
4645 {
4646 	DEFINE_WAIT(wait);
4647 	struct binder_proc *proc = thread->proc;
4648 	int ret = 0;
4649 
4650 	binder_inner_proc_lock(proc);
4651 	for (;;) {
4652 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4653 		if (binder_has_work_ilocked(thread, do_proc_work))
4654 			break;
4655 		if (do_proc_work)
4656 			list_add(&thread->waiting_thread_node,
4657 				 &proc->waiting_threads);
4658 		binder_inner_proc_unlock(proc);
4659 		schedule();
4660 		binder_inner_proc_lock(proc);
4661 		list_del_init(&thread->waiting_thread_node);
4662 		if (signal_pending(current)) {
4663 			ret = -EINTR;
4664 			break;
4665 		}
4666 	}
4667 	finish_wait(&thread->wait, &wait);
4668 	binder_inner_proc_unlock(proc);
4669 
4670 	return ret;
4671 }
4672 
4673 /**
4674  * binder_apply_fd_fixups() - finish fd translation
4675  * @proc:         binder_proc associated @t->buffer
4676  * @t:	binder transaction with list of fd fixups
4677  *
4678  * Now that we are in the context of the transaction target
4679  * process, we can allocate and install fds. Process the
4680  * list of fds to translate and fixup the buffer with the
4681  * new fds first and only then install the files.
4682  *
4683  * If we fail to allocate an fd, skip the install and release
4684  * any fds that have already been allocated.
4685  *
4686  * Return: 0 on success, a negative errno code on failure.
4687  */
4688 static int binder_apply_fd_fixups(struct binder_proc *proc,
4689 				  struct binder_transaction *t)
4690 {
4691 	struct binder_txn_fd_fixup *fixup, *tmp;
4692 	int ret = 0;
4693 
4694 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4695 		int fd = get_unused_fd_flags(O_CLOEXEC);
4696 
4697 		if (fd < 0) {
4698 			binder_debug(BINDER_DEBUG_TRANSACTION,
4699 				     "failed fd fixup txn %d fd %d\n",
4700 				     t->debug_id, fd);
4701 			ret = -ENOMEM;
4702 			goto err;
4703 		}
4704 		binder_debug(BINDER_DEBUG_TRANSACTION,
4705 			     "fd fixup txn %d fd %d\n",
4706 			     t->debug_id, fd);
4707 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4708 		fixup->target_fd = fd;
4709 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4710 						fixup->offset, &fd,
4711 						sizeof(u32))) {
4712 			ret = -EINVAL;
4713 			goto err;
4714 		}
4715 	}
4716 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4717 		fd_install(fixup->target_fd, fixup->file);
4718 		list_del(&fixup->fixup_entry);
4719 		kfree(fixup);
4720 	}
4721 
4722 	return ret;
4723 
4724 err:
4725 	binder_free_txn_fixups(t);
4726 	return ret;
4727 }
4728 
4729 static int binder_thread_read(struct binder_proc *proc,
4730 			      struct binder_thread *thread,
4731 			      binder_uintptr_t binder_buffer, size_t size,
4732 			      binder_size_t *consumed, int non_block)
4733 {
4734 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4735 	void __user *ptr = buffer + *consumed;
4736 	void __user *end = buffer + size;
4737 
4738 	int ret = 0;
4739 	int wait_for_proc_work;
4740 
4741 	if (*consumed == 0) {
4742 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4743 			return -EFAULT;
4744 		ptr += sizeof(uint32_t);
4745 	}
4746 
4747 retry:
4748 	binder_inner_proc_lock(proc);
4749 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4750 	binder_inner_proc_unlock(proc);
4751 
4752 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4753 
4754 	trace_binder_wait_for_work(wait_for_proc_work,
4755 				   !!thread->transaction_stack,
4756 				   !binder_worklist_empty(proc, &thread->todo));
4757 	if (wait_for_proc_work) {
4758 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4759 					BINDER_LOOPER_STATE_ENTERED))) {
4760 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4761 				proc->pid, thread->pid, thread->looper);
4762 			wait_event_interruptible(binder_user_error_wait,
4763 						 binder_stop_on_user_error < 2);
4764 		}
4765 		binder_set_nice(proc->default_priority);
4766 	}
4767 
4768 	if (non_block) {
4769 		if (!binder_has_work(thread, wait_for_proc_work))
4770 			ret = -EAGAIN;
4771 	} else {
4772 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4773 	}
4774 
4775 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4776 
4777 	if (ret)
4778 		return ret;
4779 
4780 	while (1) {
4781 		uint32_t cmd;
4782 		struct binder_transaction_data_secctx tr;
4783 		struct binder_transaction_data *trd = &tr.transaction_data;
4784 		struct binder_work *w = NULL;
4785 		struct list_head *list = NULL;
4786 		struct binder_transaction *t = NULL;
4787 		struct binder_thread *t_from;
4788 		size_t trsize = sizeof(*trd);
4789 
4790 		binder_inner_proc_lock(proc);
4791 		if (!binder_worklist_empty_ilocked(&thread->todo))
4792 			list = &thread->todo;
4793 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4794 			   wait_for_proc_work)
4795 			list = &proc->todo;
4796 		else {
4797 			binder_inner_proc_unlock(proc);
4798 
4799 			/* no data added */
4800 			if (ptr - buffer == 4 && !thread->looper_need_return)
4801 				goto retry;
4802 			break;
4803 		}
4804 
4805 		if (end - ptr < sizeof(tr) + 4) {
4806 			binder_inner_proc_unlock(proc);
4807 			break;
4808 		}
4809 		w = binder_dequeue_work_head_ilocked(list);
4810 		if (binder_worklist_empty_ilocked(&thread->todo))
4811 			thread->process_todo = false;
4812 
4813 		switch (w->type) {
4814 		case BINDER_WORK_TRANSACTION: {
4815 			binder_inner_proc_unlock(proc);
4816 			t = container_of(w, struct binder_transaction, work);
4817 		} break;
4818 		case BINDER_WORK_RETURN_ERROR: {
4819 			struct binder_error *e = container_of(
4820 					w, struct binder_error, work);
4821 
4822 			WARN_ON(e->cmd == BR_OK);
4823 			binder_inner_proc_unlock(proc);
4824 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4825 				return -EFAULT;
4826 			cmd = e->cmd;
4827 			e->cmd = BR_OK;
4828 			ptr += sizeof(uint32_t);
4829 
4830 			binder_stat_br(proc, thread, cmd);
4831 		} break;
4832 		case BINDER_WORK_TRANSACTION_COMPLETE:
4833 		case BINDER_WORK_TRANSACTION_PENDING:
4834 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4835 			if (proc->oneway_spam_detection_enabled &&
4836 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4837 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4838 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4839 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4840 			else
4841 				cmd = BR_TRANSACTION_COMPLETE;
4842 			binder_inner_proc_unlock(proc);
4843 			kfree(w);
4844 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4845 			if (put_user(cmd, (uint32_t __user *)ptr))
4846 				return -EFAULT;
4847 			ptr += sizeof(uint32_t);
4848 
4849 			binder_stat_br(proc, thread, cmd);
4850 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4851 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4852 				     proc->pid, thread->pid);
4853 		} break;
4854 		case BINDER_WORK_NODE: {
4855 			struct binder_node *node = container_of(w, struct binder_node, work);
4856 			int strong, weak;
4857 			binder_uintptr_t node_ptr = node->ptr;
4858 			binder_uintptr_t node_cookie = node->cookie;
4859 			int node_debug_id = node->debug_id;
4860 			int has_weak_ref;
4861 			int has_strong_ref;
4862 			void __user *orig_ptr = ptr;
4863 
4864 			BUG_ON(proc != node->proc);
4865 			strong = node->internal_strong_refs ||
4866 					node->local_strong_refs;
4867 			weak = !hlist_empty(&node->refs) ||
4868 					node->local_weak_refs ||
4869 					node->tmp_refs || strong;
4870 			has_strong_ref = node->has_strong_ref;
4871 			has_weak_ref = node->has_weak_ref;
4872 
4873 			if (weak && !has_weak_ref) {
4874 				node->has_weak_ref = 1;
4875 				node->pending_weak_ref = 1;
4876 				node->local_weak_refs++;
4877 			}
4878 			if (strong && !has_strong_ref) {
4879 				node->has_strong_ref = 1;
4880 				node->pending_strong_ref = 1;
4881 				node->local_strong_refs++;
4882 			}
4883 			if (!strong && has_strong_ref)
4884 				node->has_strong_ref = 0;
4885 			if (!weak && has_weak_ref)
4886 				node->has_weak_ref = 0;
4887 			if (!weak && !strong) {
4888 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4889 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4890 					     proc->pid, thread->pid,
4891 					     node_debug_id,
4892 					     (u64)node_ptr,
4893 					     (u64)node_cookie);
4894 				rb_erase(&node->rb_node, &proc->nodes);
4895 				binder_inner_proc_unlock(proc);
4896 				binder_node_lock(node);
4897 				/*
4898 				 * Acquire the node lock before freeing the
4899 				 * node to serialize with other threads that
4900 				 * may have been holding the node lock while
4901 				 * decrementing this node (avoids race where
4902 				 * this thread frees while the other thread
4903 				 * is unlocking the node after the final
4904 				 * decrement)
4905 				 */
4906 				binder_node_unlock(node);
4907 				binder_free_node(node);
4908 			} else
4909 				binder_inner_proc_unlock(proc);
4910 
4911 			if (weak && !has_weak_ref)
4912 				ret = binder_put_node_cmd(
4913 						proc, thread, &ptr, node_ptr,
4914 						node_cookie, node_debug_id,
4915 						BR_INCREFS, "BR_INCREFS");
4916 			if (!ret && strong && !has_strong_ref)
4917 				ret = binder_put_node_cmd(
4918 						proc, thread, &ptr, node_ptr,
4919 						node_cookie, node_debug_id,
4920 						BR_ACQUIRE, "BR_ACQUIRE");
4921 			if (!ret && !strong && has_strong_ref)
4922 				ret = binder_put_node_cmd(
4923 						proc, thread, &ptr, node_ptr,
4924 						node_cookie, node_debug_id,
4925 						BR_RELEASE, "BR_RELEASE");
4926 			if (!ret && !weak && has_weak_ref)
4927 				ret = binder_put_node_cmd(
4928 						proc, thread, &ptr, node_ptr,
4929 						node_cookie, node_debug_id,
4930 						BR_DECREFS, "BR_DECREFS");
4931 			if (orig_ptr == ptr)
4932 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4933 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4934 					     proc->pid, thread->pid,
4935 					     node_debug_id,
4936 					     (u64)node_ptr,
4937 					     (u64)node_cookie);
4938 			if (ret)
4939 				return ret;
4940 		} break;
4941 		case BINDER_WORK_DEAD_BINDER:
4942 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4943 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4944 			struct binder_ref_death *death;
4945 			uint32_t cmd;
4946 			binder_uintptr_t cookie;
4947 
4948 			death = container_of(w, struct binder_ref_death, work);
4949 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4950 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4951 			else
4952 				cmd = BR_DEAD_BINDER;
4953 			cookie = death->cookie;
4954 
4955 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4956 				     "%d:%d %s %016llx\n",
4957 				      proc->pid, thread->pid,
4958 				      cmd == BR_DEAD_BINDER ?
4959 				      "BR_DEAD_BINDER" :
4960 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4961 				      (u64)cookie);
4962 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4963 				binder_inner_proc_unlock(proc);
4964 				kfree(death);
4965 				binder_stats_deleted(BINDER_STAT_DEATH);
4966 			} else {
4967 				binder_enqueue_work_ilocked(
4968 						w, &proc->delivered_death);
4969 				binder_inner_proc_unlock(proc);
4970 			}
4971 			if (put_user(cmd, (uint32_t __user *)ptr))
4972 				return -EFAULT;
4973 			ptr += sizeof(uint32_t);
4974 			if (put_user(cookie,
4975 				     (binder_uintptr_t __user *)ptr))
4976 				return -EFAULT;
4977 			ptr += sizeof(binder_uintptr_t);
4978 			binder_stat_br(proc, thread, cmd);
4979 			if (cmd == BR_DEAD_BINDER)
4980 				goto done; /* DEAD_BINDER notifications can cause transactions */
4981 		} break;
4982 
4983 		case BINDER_WORK_FROZEN_BINDER: {
4984 			struct binder_ref_freeze *freeze;
4985 			struct binder_frozen_state_info info;
4986 
4987 			memset(&info, 0, sizeof(info));
4988 			freeze = container_of(w, struct binder_ref_freeze, work);
4989 			info.is_frozen = freeze->is_frozen;
4990 			info.cookie = freeze->cookie;
4991 			freeze->sent = true;
4992 			binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4993 			binder_inner_proc_unlock(proc);
4994 
4995 			if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4996 				return -EFAULT;
4997 			ptr += sizeof(uint32_t);
4998 			if (copy_to_user(ptr, &info, sizeof(info)))
4999 				return -EFAULT;
5000 			ptr += sizeof(info);
5001 			binder_stat_br(proc, thread, BR_FROZEN_BINDER);
5002 			goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
5003 		} break;
5004 
5005 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5006 			struct binder_ref_freeze *freeze =
5007 			    container_of(w, struct binder_ref_freeze, work);
5008 			binder_uintptr_t cookie = freeze->cookie;
5009 
5010 			binder_inner_proc_unlock(proc);
5011 			kfree(freeze);
5012 			binder_stats_deleted(BINDER_STAT_FREEZE);
5013 			if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
5014 				return -EFAULT;
5015 			ptr += sizeof(uint32_t);
5016 			if (put_user(cookie, (binder_uintptr_t __user *)ptr))
5017 				return -EFAULT;
5018 			ptr += sizeof(binder_uintptr_t);
5019 			binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
5020 		} break;
5021 
5022 		default:
5023 			binder_inner_proc_unlock(proc);
5024 			pr_err("%d:%d: bad work type %d\n",
5025 			       proc->pid, thread->pid, w->type);
5026 			break;
5027 		}
5028 
5029 		if (!t)
5030 			continue;
5031 
5032 		BUG_ON(t->buffer == NULL);
5033 		if (t->buffer->target_node) {
5034 			struct binder_node *target_node = t->buffer->target_node;
5035 
5036 			trd->target.ptr = target_node->ptr;
5037 			trd->cookie =  target_node->cookie;
5038 			t->saved_priority = task_nice(current);
5039 			if (t->priority < target_node->min_priority &&
5040 			    !(t->flags & TF_ONE_WAY))
5041 				binder_set_nice(t->priority);
5042 			else if (!(t->flags & TF_ONE_WAY) ||
5043 				 t->saved_priority > target_node->min_priority)
5044 				binder_set_nice(target_node->min_priority);
5045 			cmd = BR_TRANSACTION;
5046 		} else {
5047 			trd->target.ptr = 0;
5048 			trd->cookie = 0;
5049 			cmd = BR_REPLY;
5050 		}
5051 		trd->code = t->code;
5052 		trd->flags = t->flags;
5053 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
5054 
5055 		t_from = binder_get_txn_from(t);
5056 		if (t_from) {
5057 			struct task_struct *sender = t_from->proc->tsk;
5058 
5059 			trd->sender_pid =
5060 				task_tgid_nr_ns(sender,
5061 						task_active_pid_ns(current));
5062 		} else {
5063 			trd->sender_pid = 0;
5064 		}
5065 
5066 		ret = binder_apply_fd_fixups(proc, t);
5067 		if (ret) {
5068 			struct binder_buffer *buffer = t->buffer;
5069 			bool oneway = !!(t->flags & TF_ONE_WAY);
5070 			int tid = t->debug_id;
5071 
5072 			if (t_from)
5073 				binder_thread_dec_tmpref(t_from);
5074 			buffer->transaction = NULL;
5075 			binder_cleanup_transaction(t, "fd fixups failed",
5076 						   BR_FAILED_REPLY);
5077 			binder_free_buf(proc, thread, buffer, true);
5078 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5079 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5080 				     proc->pid, thread->pid,
5081 				     oneway ? "async " :
5082 					(cmd == BR_REPLY ? "reply " : ""),
5083 				     tid, BR_FAILED_REPLY, ret, __LINE__);
5084 			if (cmd == BR_REPLY) {
5085 				cmd = BR_FAILED_REPLY;
5086 				if (put_user(cmd, (uint32_t __user *)ptr))
5087 					return -EFAULT;
5088 				ptr += sizeof(uint32_t);
5089 				binder_stat_br(proc, thread, cmd);
5090 				break;
5091 			}
5092 			continue;
5093 		}
5094 		trd->data_size = t->buffer->data_size;
5095 		trd->offsets_size = t->buffer->offsets_size;
5096 		trd->data.ptr.buffer = t->buffer->user_data;
5097 		trd->data.ptr.offsets = trd->data.ptr.buffer +
5098 					ALIGN(t->buffer->data_size,
5099 					    sizeof(void *));
5100 
5101 		tr.secctx = t->security_ctx;
5102 		if (t->security_ctx) {
5103 			cmd = BR_TRANSACTION_SEC_CTX;
5104 			trsize = sizeof(tr);
5105 		}
5106 		if (put_user(cmd, (uint32_t __user *)ptr)) {
5107 			if (t_from)
5108 				binder_thread_dec_tmpref(t_from);
5109 
5110 			binder_cleanup_transaction(t, "put_user failed",
5111 						   BR_FAILED_REPLY);
5112 
5113 			return -EFAULT;
5114 		}
5115 		ptr += sizeof(uint32_t);
5116 		if (copy_to_user(ptr, &tr, trsize)) {
5117 			if (t_from)
5118 				binder_thread_dec_tmpref(t_from);
5119 
5120 			binder_cleanup_transaction(t, "copy_to_user failed",
5121 						   BR_FAILED_REPLY);
5122 
5123 			return -EFAULT;
5124 		}
5125 		ptr += trsize;
5126 
5127 		trace_binder_transaction_received(t);
5128 		binder_stat_br(proc, thread, cmd);
5129 		binder_debug(BINDER_DEBUG_TRANSACTION,
5130 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5131 			     proc->pid, thread->pid,
5132 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5133 				(cmd == BR_TRANSACTION_SEC_CTX) ?
5134 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5135 			     t->debug_id, t_from ? t_from->proc->pid : 0,
5136 			     t_from ? t_from->pid : 0, cmd,
5137 			     t->buffer->data_size, t->buffer->offsets_size);
5138 
5139 		if (t_from)
5140 			binder_thread_dec_tmpref(t_from);
5141 		t->buffer->allow_user_free = 1;
5142 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5143 			binder_inner_proc_lock(thread->proc);
5144 			t->to_parent = thread->transaction_stack;
5145 			t->to_thread = thread;
5146 			thread->transaction_stack = t;
5147 			binder_inner_proc_unlock(thread->proc);
5148 		} else {
5149 			binder_free_transaction(t);
5150 		}
5151 		break;
5152 	}
5153 
5154 done:
5155 
5156 	*consumed = ptr - buffer;
5157 	binder_inner_proc_lock(proc);
5158 	if (proc->requested_threads == 0 &&
5159 	    list_empty(&thread->proc->waiting_threads) &&
5160 	    proc->requested_threads_started < proc->max_threads &&
5161 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5162 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5163 	     /*spawn a new thread if we leave this out */) {
5164 		proc->requested_threads++;
5165 		binder_inner_proc_unlock(proc);
5166 		binder_debug(BINDER_DEBUG_THREADS,
5167 			     "%d:%d BR_SPAWN_LOOPER\n",
5168 			     proc->pid, thread->pid);
5169 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5170 			return -EFAULT;
5171 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5172 	} else
5173 		binder_inner_proc_unlock(proc);
5174 	return 0;
5175 }
5176 
5177 static void binder_release_work(struct binder_proc *proc,
5178 				struct list_head *list)
5179 {
5180 	struct binder_work *w;
5181 	enum binder_work_type wtype;
5182 
5183 	while (1) {
5184 		binder_inner_proc_lock(proc);
5185 		w = binder_dequeue_work_head_ilocked(list);
5186 		wtype = w ? w->type : 0;
5187 		binder_inner_proc_unlock(proc);
5188 		if (!w)
5189 			return;
5190 
5191 		switch (wtype) {
5192 		case BINDER_WORK_TRANSACTION: {
5193 			struct binder_transaction *t;
5194 
5195 			t = container_of(w, struct binder_transaction, work);
5196 
5197 			binder_cleanup_transaction(t, "process died.",
5198 						   BR_DEAD_REPLY);
5199 		} break;
5200 		case BINDER_WORK_RETURN_ERROR: {
5201 			struct binder_error *e = container_of(
5202 					w, struct binder_error, work);
5203 
5204 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5205 				"undelivered TRANSACTION_ERROR: %u\n",
5206 				e->cmd);
5207 		} break;
5208 		case BINDER_WORK_TRANSACTION_PENDING:
5209 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5210 		case BINDER_WORK_TRANSACTION_COMPLETE: {
5211 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5212 				"undelivered TRANSACTION_COMPLETE\n");
5213 			kfree(w);
5214 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5215 		} break;
5216 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5217 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5218 			struct binder_ref_death *death;
5219 
5220 			death = container_of(w, struct binder_ref_death, work);
5221 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5222 				"undelivered death notification, %016llx\n",
5223 				(u64)death->cookie);
5224 			kfree(death);
5225 			binder_stats_deleted(BINDER_STAT_DEATH);
5226 		} break;
5227 		case BINDER_WORK_NODE:
5228 			break;
5229 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5230 			struct binder_ref_freeze *freeze;
5231 
5232 			freeze = container_of(w, struct binder_ref_freeze, work);
5233 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5234 				     "undelivered freeze notification, %016llx\n",
5235 				     (u64)freeze->cookie);
5236 			kfree(freeze);
5237 			binder_stats_deleted(BINDER_STAT_FREEZE);
5238 		} break;
5239 		default:
5240 			pr_err("unexpected work type, %d, not freed\n",
5241 			       wtype);
5242 			break;
5243 		}
5244 	}
5245 
5246 }
5247 
5248 static struct binder_thread *binder_get_thread_ilocked(
5249 		struct binder_proc *proc, struct binder_thread *new_thread)
5250 {
5251 	struct binder_thread *thread = NULL;
5252 	struct rb_node *parent = NULL;
5253 	struct rb_node **p = &proc->threads.rb_node;
5254 
5255 	while (*p) {
5256 		parent = *p;
5257 		thread = rb_entry(parent, struct binder_thread, rb_node);
5258 
5259 		if (current->pid < thread->pid)
5260 			p = &(*p)->rb_left;
5261 		else if (current->pid > thread->pid)
5262 			p = &(*p)->rb_right;
5263 		else
5264 			return thread;
5265 	}
5266 	if (!new_thread)
5267 		return NULL;
5268 	thread = new_thread;
5269 	binder_stats_created(BINDER_STAT_THREAD);
5270 	thread->proc = proc;
5271 	thread->pid = current->pid;
5272 	atomic_set(&thread->tmp_ref, 0);
5273 	init_waitqueue_head(&thread->wait);
5274 	INIT_LIST_HEAD(&thread->todo);
5275 	rb_link_node(&thread->rb_node, parent, p);
5276 	rb_insert_color(&thread->rb_node, &proc->threads);
5277 	thread->looper_need_return = true;
5278 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5279 	thread->return_error.cmd = BR_OK;
5280 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5281 	thread->reply_error.cmd = BR_OK;
5282 	thread->ee.command = BR_OK;
5283 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5284 	return thread;
5285 }
5286 
5287 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5288 {
5289 	struct binder_thread *thread;
5290 	struct binder_thread *new_thread;
5291 
5292 	binder_inner_proc_lock(proc);
5293 	thread = binder_get_thread_ilocked(proc, NULL);
5294 	binder_inner_proc_unlock(proc);
5295 	if (!thread) {
5296 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5297 		if (new_thread == NULL)
5298 			return NULL;
5299 		binder_inner_proc_lock(proc);
5300 		thread = binder_get_thread_ilocked(proc, new_thread);
5301 		binder_inner_proc_unlock(proc);
5302 		if (thread != new_thread)
5303 			kfree(new_thread);
5304 	}
5305 	return thread;
5306 }
5307 
5308 static void binder_free_proc(struct binder_proc *proc)
5309 {
5310 	struct binder_device *device;
5311 
5312 	BUG_ON(!list_empty(&proc->todo));
5313 	BUG_ON(!list_empty(&proc->delivered_death));
5314 	if (proc->outstanding_txns)
5315 		pr_warn("%s: Unexpected outstanding_txns %d\n",
5316 			__func__, proc->outstanding_txns);
5317 	device = container_of(proc->context, struct binder_device, context);
5318 	if (refcount_dec_and_test(&device->ref)) {
5319 		binder_remove_device(device);
5320 		kfree(proc->context->name);
5321 		kfree(device);
5322 	}
5323 	binder_alloc_deferred_release(&proc->alloc);
5324 	put_task_struct(proc->tsk);
5325 	put_cred(proc->cred);
5326 	binder_stats_deleted(BINDER_STAT_PROC);
5327 	dbitmap_free(&proc->dmap);
5328 	kfree(proc);
5329 }
5330 
5331 static void binder_free_thread(struct binder_thread *thread)
5332 {
5333 	BUG_ON(!list_empty(&thread->todo));
5334 	binder_stats_deleted(BINDER_STAT_THREAD);
5335 	binder_proc_dec_tmpref(thread->proc);
5336 	kfree(thread);
5337 }
5338 
5339 static int binder_thread_release(struct binder_proc *proc,
5340 				 struct binder_thread *thread)
5341 {
5342 	struct binder_transaction *t;
5343 	struct binder_transaction *send_reply = NULL;
5344 	int active_transactions = 0;
5345 	struct binder_transaction *last_t = NULL;
5346 
5347 	binder_inner_proc_lock(thread->proc);
5348 	/*
5349 	 * take a ref on the proc so it survives
5350 	 * after we remove this thread from proc->threads.
5351 	 * The corresponding dec is when we actually
5352 	 * free the thread in binder_free_thread()
5353 	 */
5354 	proc->tmp_ref++;
5355 	/*
5356 	 * take a ref on this thread to ensure it
5357 	 * survives while we are releasing it
5358 	 */
5359 	atomic_inc(&thread->tmp_ref);
5360 	rb_erase(&thread->rb_node, &proc->threads);
5361 	t = thread->transaction_stack;
5362 	if (t) {
5363 		spin_lock(&t->lock);
5364 		if (t->to_thread == thread)
5365 			send_reply = t;
5366 	} else {
5367 		__acquire(&t->lock);
5368 	}
5369 	thread->is_dead = true;
5370 
5371 	while (t) {
5372 		last_t = t;
5373 		active_transactions++;
5374 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5375 			     "release %d:%d transaction %d %s, still active\n",
5376 			      proc->pid, thread->pid,
5377 			     t->debug_id,
5378 			     (t->to_thread == thread) ? "in" : "out");
5379 
5380 		if (t->to_thread == thread) {
5381 			thread->proc->outstanding_txns--;
5382 			t->to_proc = NULL;
5383 			t->to_thread = NULL;
5384 			if (t->buffer) {
5385 				t->buffer->transaction = NULL;
5386 				t->buffer = NULL;
5387 			}
5388 			t = t->to_parent;
5389 		} else if (t->from == thread) {
5390 			t->from = NULL;
5391 			t = t->from_parent;
5392 		} else
5393 			BUG();
5394 		spin_unlock(&last_t->lock);
5395 		if (t)
5396 			spin_lock(&t->lock);
5397 		else
5398 			__acquire(&t->lock);
5399 	}
5400 	/* annotation for sparse, lock not acquired in last iteration above */
5401 	__release(&t->lock);
5402 
5403 	/*
5404 	 * If this thread used poll, make sure we remove the waitqueue from any
5405 	 * poll data structures holding it.
5406 	 */
5407 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5408 		wake_up_pollfree(&thread->wait);
5409 
5410 	binder_inner_proc_unlock(thread->proc);
5411 
5412 	/*
5413 	 * This is needed to avoid races between wake_up_pollfree() above and
5414 	 * someone else removing the last entry from the queue for other reasons
5415 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5416 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5417 	 * we can be sure they're done after we call synchronize_rcu().
5418 	 */
5419 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5420 		synchronize_rcu();
5421 
5422 	if (send_reply)
5423 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5424 	binder_release_work(proc, &thread->todo);
5425 	binder_thread_dec_tmpref(thread);
5426 	return active_transactions;
5427 }
5428 
5429 static __poll_t binder_poll(struct file *filp,
5430 				struct poll_table_struct *wait)
5431 {
5432 	struct binder_proc *proc = filp->private_data;
5433 	struct binder_thread *thread = NULL;
5434 	bool wait_for_proc_work;
5435 
5436 	thread = binder_get_thread(proc);
5437 	if (!thread)
5438 		return EPOLLERR;
5439 
5440 	binder_inner_proc_lock(thread->proc);
5441 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5442 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5443 
5444 	binder_inner_proc_unlock(thread->proc);
5445 
5446 	poll_wait(filp, &thread->wait, wait);
5447 
5448 	if (binder_has_work(thread, wait_for_proc_work))
5449 		return EPOLLIN;
5450 
5451 	return 0;
5452 }
5453 
5454 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5455 				struct binder_thread *thread)
5456 {
5457 	int ret = 0;
5458 	struct binder_proc *proc = filp->private_data;
5459 	void __user *ubuf = (void __user *)arg;
5460 	struct binder_write_read bwr;
5461 
5462 	if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5463 		return -EFAULT;
5464 
5465 	binder_debug(BINDER_DEBUG_READ_WRITE,
5466 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5467 		     proc->pid, thread->pid,
5468 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5469 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5470 
5471 	if (bwr.write_size > 0) {
5472 		ret = binder_thread_write(proc, thread,
5473 					  bwr.write_buffer,
5474 					  bwr.write_size,
5475 					  &bwr.write_consumed);
5476 		trace_binder_write_done(ret);
5477 		if (ret < 0) {
5478 			bwr.read_consumed = 0;
5479 			goto out;
5480 		}
5481 	}
5482 	if (bwr.read_size > 0) {
5483 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5484 					 bwr.read_size,
5485 					 &bwr.read_consumed,
5486 					 filp->f_flags & O_NONBLOCK);
5487 		trace_binder_read_done(ret);
5488 		binder_inner_proc_lock(proc);
5489 		if (!binder_worklist_empty_ilocked(&proc->todo))
5490 			binder_wakeup_proc_ilocked(proc);
5491 		binder_inner_proc_unlock(proc);
5492 		if (ret < 0)
5493 			goto out;
5494 	}
5495 	binder_debug(BINDER_DEBUG_READ_WRITE,
5496 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5497 		     proc->pid, thread->pid,
5498 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5499 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5500 out:
5501 	if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5502 		ret = -EFAULT;
5503 	return ret;
5504 }
5505 
5506 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5507 				    struct flat_binder_object *fbo)
5508 {
5509 	int ret = 0;
5510 	struct binder_proc *proc = filp->private_data;
5511 	struct binder_context *context = proc->context;
5512 	struct binder_node *new_node;
5513 	kuid_t curr_euid = current_euid();
5514 
5515 	guard(mutex)(&context->context_mgr_node_lock);
5516 	if (context->binder_context_mgr_node) {
5517 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5518 		return -EBUSY;
5519 	}
5520 	ret = security_binder_set_context_mgr(proc->cred);
5521 	if (ret < 0)
5522 		return ret;
5523 	if (uid_valid(context->binder_context_mgr_uid)) {
5524 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5525 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5526 			       from_kuid(&init_user_ns, curr_euid),
5527 			       from_kuid(&init_user_ns,
5528 					 context->binder_context_mgr_uid));
5529 			return -EPERM;
5530 		}
5531 	} else {
5532 		context->binder_context_mgr_uid = curr_euid;
5533 	}
5534 	new_node = binder_new_node(proc, fbo);
5535 	if (!new_node)
5536 		return -ENOMEM;
5537 	binder_node_lock(new_node);
5538 	new_node->local_weak_refs++;
5539 	new_node->local_strong_refs++;
5540 	new_node->has_strong_ref = 1;
5541 	new_node->has_weak_ref = 1;
5542 	context->binder_context_mgr_node = new_node;
5543 	binder_node_unlock(new_node);
5544 	binder_put_node(new_node);
5545 	return ret;
5546 }
5547 
5548 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5549 		struct binder_node_info_for_ref *info)
5550 {
5551 	struct binder_node *node;
5552 	struct binder_context *context = proc->context;
5553 	__u32 handle = info->handle;
5554 
5555 	if (info->strong_count || info->weak_count || info->reserved1 ||
5556 	    info->reserved2 || info->reserved3) {
5557 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5558 				  proc->pid);
5559 		return -EINVAL;
5560 	}
5561 
5562 	/* This ioctl may only be used by the context manager */
5563 	mutex_lock(&context->context_mgr_node_lock);
5564 	if (!context->binder_context_mgr_node ||
5565 		context->binder_context_mgr_node->proc != proc) {
5566 		mutex_unlock(&context->context_mgr_node_lock);
5567 		return -EPERM;
5568 	}
5569 	mutex_unlock(&context->context_mgr_node_lock);
5570 
5571 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5572 	if (!node)
5573 		return -EINVAL;
5574 
5575 	info->strong_count = node->local_strong_refs +
5576 		node->internal_strong_refs;
5577 	info->weak_count = node->local_weak_refs;
5578 
5579 	binder_put_node(node);
5580 
5581 	return 0;
5582 }
5583 
5584 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5585 				struct binder_node_debug_info *info)
5586 {
5587 	struct rb_node *n;
5588 	binder_uintptr_t ptr = info->ptr;
5589 
5590 	memset(info, 0, sizeof(*info));
5591 
5592 	binder_inner_proc_lock(proc);
5593 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5594 		struct binder_node *node = rb_entry(n, struct binder_node,
5595 						    rb_node);
5596 		if (node->ptr > ptr) {
5597 			info->ptr = node->ptr;
5598 			info->cookie = node->cookie;
5599 			info->has_strong_ref = node->has_strong_ref;
5600 			info->has_weak_ref = node->has_weak_ref;
5601 			break;
5602 		}
5603 	}
5604 	binder_inner_proc_unlock(proc);
5605 
5606 	return 0;
5607 }
5608 
5609 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5610 {
5611 	struct rb_node *n;
5612 	struct binder_thread *thread;
5613 
5614 	if (proc->outstanding_txns > 0)
5615 		return true;
5616 
5617 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5618 		thread = rb_entry(n, struct binder_thread, rb_node);
5619 		if (thread->transaction_stack)
5620 			return true;
5621 	}
5622 	return false;
5623 }
5624 
5625 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5626 {
5627 	struct binder_node *prev = NULL;
5628 	struct rb_node *n;
5629 	struct binder_ref *ref;
5630 
5631 	binder_inner_proc_lock(proc);
5632 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5633 		struct binder_node *node;
5634 
5635 		node = rb_entry(n, struct binder_node, rb_node);
5636 		binder_inc_node_tmpref_ilocked(node);
5637 		binder_inner_proc_unlock(proc);
5638 		if (prev)
5639 			binder_put_node(prev);
5640 		binder_node_lock(node);
5641 		hlist_for_each_entry(ref, &node->refs, node_entry) {
5642 			/*
5643 			 * Need the node lock to synchronize
5644 			 * with new notification requests and the
5645 			 * inner lock to synchronize with queued
5646 			 * freeze notifications.
5647 			 */
5648 			binder_inner_proc_lock(ref->proc);
5649 			if (!ref->freeze) {
5650 				binder_inner_proc_unlock(ref->proc);
5651 				continue;
5652 			}
5653 			ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5654 			if (list_empty(&ref->freeze->work.entry)) {
5655 				ref->freeze->is_frozen = is_frozen;
5656 				binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5657 				binder_wakeup_proc_ilocked(ref->proc);
5658 			} else {
5659 				if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5660 					ref->freeze->resend = true;
5661 				ref->freeze->is_frozen = is_frozen;
5662 			}
5663 			binder_inner_proc_unlock(ref->proc);
5664 		}
5665 		prev = node;
5666 		binder_node_unlock(node);
5667 		binder_inner_proc_lock(proc);
5668 		if (proc->is_dead)
5669 			break;
5670 	}
5671 	binder_inner_proc_unlock(proc);
5672 	if (prev)
5673 		binder_put_node(prev);
5674 }
5675 
5676 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5677 			       struct binder_proc *target_proc)
5678 {
5679 	int ret = 0;
5680 
5681 	if (!info->enable) {
5682 		binder_inner_proc_lock(target_proc);
5683 		target_proc->sync_recv = false;
5684 		target_proc->async_recv = false;
5685 		target_proc->is_frozen = false;
5686 		binder_inner_proc_unlock(target_proc);
5687 		binder_add_freeze_work(target_proc, false);
5688 		return 0;
5689 	}
5690 
5691 	/*
5692 	 * Freezing the target. Prevent new transactions by
5693 	 * setting frozen state. If timeout specified, wait
5694 	 * for transactions to drain.
5695 	 */
5696 	binder_inner_proc_lock(target_proc);
5697 	target_proc->sync_recv = false;
5698 	target_proc->async_recv = false;
5699 	target_proc->is_frozen = true;
5700 	binder_inner_proc_unlock(target_proc);
5701 
5702 	if (info->timeout_ms > 0)
5703 		ret = wait_event_interruptible_timeout(
5704 			target_proc->freeze_wait,
5705 			(!target_proc->outstanding_txns),
5706 			msecs_to_jiffies(info->timeout_ms));
5707 
5708 	/* Check pending transactions that wait for reply */
5709 	if (ret >= 0) {
5710 		binder_inner_proc_lock(target_proc);
5711 		if (binder_txns_pending_ilocked(target_proc))
5712 			ret = -EAGAIN;
5713 		binder_inner_proc_unlock(target_proc);
5714 	}
5715 
5716 	if (ret < 0) {
5717 		binder_inner_proc_lock(target_proc);
5718 		target_proc->is_frozen = false;
5719 		binder_inner_proc_unlock(target_proc);
5720 	} else {
5721 		binder_add_freeze_work(target_proc, true);
5722 	}
5723 
5724 	return ret;
5725 }
5726 
5727 static int binder_ioctl_get_freezer_info(
5728 				struct binder_frozen_status_info *info)
5729 {
5730 	struct binder_proc *target_proc;
5731 	bool found = false;
5732 	__u32 txns_pending;
5733 
5734 	info->sync_recv = 0;
5735 	info->async_recv = 0;
5736 
5737 	mutex_lock(&binder_procs_lock);
5738 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5739 		if (target_proc->pid == info->pid) {
5740 			found = true;
5741 			binder_inner_proc_lock(target_proc);
5742 			txns_pending = binder_txns_pending_ilocked(target_proc);
5743 			info->sync_recv |= target_proc->sync_recv |
5744 					(txns_pending << 1);
5745 			info->async_recv |= target_proc->async_recv;
5746 			binder_inner_proc_unlock(target_proc);
5747 		}
5748 	}
5749 	mutex_unlock(&binder_procs_lock);
5750 
5751 	if (!found)
5752 		return -EINVAL;
5753 
5754 	return 0;
5755 }
5756 
5757 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5758 					   void __user *ubuf)
5759 {
5760 	struct binder_extended_error ee;
5761 
5762 	binder_inner_proc_lock(thread->proc);
5763 	ee = thread->ee;
5764 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5765 	binder_inner_proc_unlock(thread->proc);
5766 
5767 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5768 		return -EFAULT;
5769 
5770 	return 0;
5771 }
5772 
5773 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5774 {
5775 	int ret;
5776 	struct binder_proc *proc = filp->private_data;
5777 	struct binder_thread *thread;
5778 	void __user *ubuf = (void __user *)arg;
5779 
5780 	trace_binder_ioctl(cmd, arg);
5781 
5782 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5783 	if (ret)
5784 		goto err_unlocked;
5785 
5786 	thread = binder_get_thread(proc);
5787 	if (thread == NULL) {
5788 		ret = -ENOMEM;
5789 		goto err;
5790 	}
5791 
5792 	switch (cmd) {
5793 	case BINDER_WRITE_READ:
5794 		ret = binder_ioctl_write_read(filp, arg, thread);
5795 		if (ret)
5796 			goto err;
5797 		break;
5798 	case BINDER_SET_MAX_THREADS: {
5799 		u32 max_threads;
5800 
5801 		if (copy_from_user(&max_threads, ubuf,
5802 				   sizeof(max_threads))) {
5803 			ret = -EINVAL;
5804 			goto err;
5805 		}
5806 		binder_inner_proc_lock(proc);
5807 		proc->max_threads = max_threads;
5808 		binder_inner_proc_unlock(proc);
5809 		break;
5810 	}
5811 	case BINDER_SET_CONTEXT_MGR_EXT: {
5812 		struct flat_binder_object fbo;
5813 
5814 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5815 			ret = -EINVAL;
5816 			goto err;
5817 		}
5818 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5819 		if (ret)
5820 			goto err;
5821 		break;
5822 	}
5823 	case BINDER_SET_CONTEXT_MGR:
5824 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5825 		if (ret)
5826 			goto err;
5827 		break;
5828 	case BINDER_THREAD_EXIT:
5829 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5830 			     proc->pid, thread->pid);
5831 		binder_thread_release(proc, thread);
5832 		thread = NULL;
5833 		break;
5834 	case BINDER_VERSION: {
5835 		struct binder_version __user *ver = ubuf;
5836 
5837 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5838 			     &ver->protocol_version)) {
5839 			ret = -EINVAL;
5840 			goto err;
5841 		}
5842 		break;
5843 	}
5844 	case BINDER_GET_NODE_INFO_FOR_REF: {
5845 		struct binder_node_info_for_ref info;
5846 
5847 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5848 			ret = -EFAULT;
5849 			goto err;
5850 		}
5851 
5852 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5853 		if (ret < 0)
5854 			goto err;
5855 
5856 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5857 			ret = -EFAULT;
5858 			goto err;
5859 		}
5860 
5861 		break;
5862 	}
5863 	case BINDER_GET_NODE_DEBUG_INFO: {
5864 		struct binder_node_debug_info info;
5865 
5866 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5867 			ret = -EFAULT;
5868 			goto err;
5869 		}
5870 
5871 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5872 		if (ret < 0)
5873 			goto err;
5874 
5875 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5876 			ret = -EFAULT;
5877 			goto err;
5878 		}
5879 		break;
5880 	}
5881 	case BINDER_FREEZE: {
5882 		struct binder_freeze_info info;
5883 		struct binder_proc **target_procs = NULL, *target_proc;
5884 		int target_procs_count = 0, i = 0;
5885 
5886 		ret = 0;
5887 
5888 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5889 			ret = -EFAULT;
5890 			goto err;
5891 		}
5892 
5893 		mutex_lock(&binder_procs_lock);
5894 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5895 			if (target_proc->pid == info.pid)
5896 				target_procs_count++;
5897 		}
5898 
5899 		if (target_procs_count == 0) {
5900 			mutex_unlock(&binder_procs_lock);
5901 			ret = -EINVAL;
5902 			goto err;
5903 		}
5904 
5905 		target_procs = kcalloc(target_procs_count,
5906 				       sizeof(struct binder_proc *),
5907 				       GFP_KERNEL);
5908 
5909 		if (!target_procs) {
5910 			mutex_unlock(&binder_procs_lock);
5911 			ret = -ENOMEM;
5912 			goto err;
5913 		}
5914 
5915 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5916 			if (target_proc->pid != info.pid)
5917 				continue;
5918 
5919 			binder_inner_proc_lock(target_proc);
5920 			target_proc->tmp_ref++;
5921 			binder_inner_proc_unlock(target_proc);
5922 
5923 			target_procs[i++] = target_proc;
5924 		}
5925 		mutex_unlock(&binder_procs_lock);
5926 
5927 		for (i = 0; i < target_procs_count; i++) {
5928 			if (ret >= 0)
5929 				ret = binder_ioctl_freeze(&info,
5930 							  target_procs[i]);
5931 
5932 			binder_proc_dec_tmpref(target_procs[i]);
5933 		}
5934 
5935 		kfree(target_procs);
5936 
5937 		if (ret < 0)
5938 			goto err;
5939 		break;
5940 	}
5941 	case BINDER_GET_FROZEN_INFO: {
5942 		struct binder_frozen_status_info info;
5943 
5944 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5945 			ret = -EFAULT;
5946 			goto err;
5947 		}
5948 
5949 		ret = binder_ioctl_get_freezer_info(&info);
5950 		if (ret < 0)
5951 			goto err;
5952 
5953 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5954 			ret = -EFAULT;
5955 			goto err;
5956 		}
5957 		break;
5958 	}
5959 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5960 		uint32_t enable;
5961 
5962 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5963 			ret = -EFAULT;
5964 			goto err;
5965 		}
5966 		binder_inner_proc_lock(proc);
5967 		proc->oneway_spam_detection_enabled = (bool)enable;
5968 		binder_inner_proc_unlock(proc);
5969 		break;
5970 	}
5971 	case BINDER_GET_EXTENDED_ERROR:
5972 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5973 		if (ret < 0)
5974 			goto err;
5975 		break;
5976 	default:
5977 		ret = -EINVAL;
5978 		goto err;
5979 	}
5980 	ret = 0;
5981 err:
5982 	if (thread)
5983 		thread->looper_need_return = false;
5984 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5985 	if (ret && ret != -EINTR)
5986 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5987 err_unlocked:
5988 	trace_binder_ioctl_done(ret);
5989 	return ret;
5990 }
5991 
5992 static void binder_vma_open(struct vm_area_struct *vma)
5993 {
5994 	struct binder_proc *proc = vma->vm_private_data;
5995 
5996 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5997 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5998 		     proc->pid, vma->vm_start, vma->vm_end,
5999 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6000 		     (unsigned long)pgprot_val(vma->vm_page_prot));
6001 }
6002 
6003 static void binder_vma_close(struct vm_area_struct *vma)
6004 {
6005 	struct binder_proc *proc = vma->vm_private_data;
6006 
6007 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6008 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
6009 		     proc->pid, vma->vm_start, vma->vm_end,
6010 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6011 		     (unsigned long)pgprot_val(vma->vm_page_prot));
6012 	binder_alloc_vma_close(&proc->alloc);
6013 }
6014 
6015 VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
6016 {
6017 	return VM_FAULT_SIGBUS;
6018 }
6019 EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
6020 
6021 static const struct vm_operations_struct binder_vm_ops = {
6022 	.open = binder_vma_open,
6023 	.close = binder_vma_close,
6024 	.fault = binder_vm_fault,
6025 };
6026 
6027 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
6028 {
6029 	struct binder_proc *proc = filp->private_data;
6030 
6031 	if (proc->tsk != current->group_leader)
6032 		return -EINVAL;
6033 
6034 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6035 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
6036 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
6037 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6038 		     (unsigned long)pgprot_val(vma->vm_page_prot));
6039 
6040 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
6041 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
6042 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
6043 		return -EPERM;
6044 	}
6045 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
6046 
6047 	vma->vm_ops = &binder_vm_ops;
6048 	vma->vm_private_data = proc;
6049 
6050 	return binder_alloc_mmap_handler(&proc->alloc, vma);
6051 }
6052 
6053 static int binder_open(struct inode *nodp, struct file *filp)
6054 {
6055 	struct binder_proc *proc, *itr;
6056 	struct binder_device *binder_dev;
6057 	struct binderfs_info *info;
6058 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
6059 	bool existing_pid = false;
6060 
6061 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6062 		     current->group_leader->pid, current->pid);
6063 
6064 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6065 	if (proc == NULL)
6066 		return -ENOMEM;
6067 
6068 	dbitmap_init(&proc->dmap);
6069 	spin_lock_init(&proc->inner_lock);
6070 	spin_lock_init(&proc->outer_lock);
6071 	get_task_struct(current->group_leader);
6072 	proc->tsk = current->group_leader;
6073 	proc->cred = get_cred(filp->f_cred);
6074 	INIT_LIST_HEAD(&proc->todo);
6075 	init_waitqueue_head(&proc->freeze_wait);
6076 	proc->default_priority = task_nice(current);
6077 	/* binderfs stashes devices in i_private */
6078 	if (is_binderfs_device(nodp)) {
6079 		binder_dev = nodp->i_private;
6080 		info = nodp->i_sb->s_fs_info;
6081 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
6082 	} else {
6083 		binder_dev = container_of(filp->private_data,
6084 					  struct binder_device, miscdev);
6085 	}
6086 	refcount_inc(&binder_dev->ref);
6087 	proc->context = &binder_dev->context;
6088 	binder_alloc_init(&proc->alloc);
6089 
6090 	binder_stats_created(BINDER_STAT_PROC);
6091 	proc->pid = current->group_leader->pid;
6092 	INIT_LIST_HEAD(&proc->delivered_death);
6093 	INIT_LIST_HEAD(&proc->delivered_freeze);
6094 	INIT_LIST_HEAD(&proc->waiting_threads);
6095 	filp->private_data = proc;
6096 
6097 	mutex_lock(&binder_procs_lock);
6098 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6099 		if (itr->pid == proc->pid) {
6100 			existing_pid = true;
6101 			break;
6102 		}
6103 	}
6104 	hlist_add_head(&proc->proc_node, &binder_procs);
6105 	mutex_unlock(&binder_procs_lock);
6106 
6107 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
6108 		char strbuf[11];
6109 
6110 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6111 		/*
6112 		 * proc debug entries are shared between contexts.
6113 		 * Only create for the first PID to avoid debugfs log spamming
6114 		 * The printing code will anyway print all contexts for a given
6115 		 * PID so this is not a problem.
6116 		 */
6117 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6118 			binder_debugfs_dir_entry_proc,
6119 			(void *)(unsigned long)proc->pid,
6120 			&proc_fops);
6121 	}
6122 
6123 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
6124 		char strbuf[11];
6125 		struct dentry *binderfs_entry;
6126 
6127 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6128 		/*
6129 		 * Similar to debugfs, the process specific log file is shared
6130 		 * between contexts. Only create for the first PID.
6131 		 * This is ok since same as debugfs, the log file will contain
6132 		 * information on all contexts of a given PID.
6133 		 */
6134 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6135 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6136 		if (!IS_ERR(binderfs_entry)) {
6137 			proc->binderfs_entry = binderfs_entry;
6138 		} else {
6139 			int error;
6140 
6141 			error = PTR_ERR(binderfs_entry);
6142 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
6143 				strbuf, error);
6144 		}
6145 	}
6146 
6147 	return 0;
6148 }
6149 
6150 static int binder_flush(struct file *filp, fl_owner_t id)
6151 {
6152 	struct binder_proc *proc = filp->private_data;
6153 
6154 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6155 
6156 	return 0;
6157 }
6158 
6159 static void binder_deferred_flush(struct binder_proc *proc)
6160 {
6161 	struct rb_node *n;
6162 	int wake_count = 0;
6163 
6164 	binder_inner_proc_lock(proc);
6165 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6166 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6167 
6168 		thread->looper_need_return = true;
6169 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6170 			wake_up_interruptible(&thread->wait);
6171 			wake_count++;
6172 		}
6173 	}
6174 	binder_inner_proc_unlock(proc);
6175 
6176 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6177 		     "binder_flush: %d woke %d threads\n", proc->pid,
6178 		     wake_count);
6179 }
6180 
6181 static int binder_release(struct inode *nodp, struct file *filp)
6182 {
6183 	struct binder_proc *proc = filp->private_data;
6184 
6185 	debugfs_remove(proc->debugfs_entry);
6186 
6187 	if (proc->binderfs_entry) {
6188 		simple_recursive_removal(proc->binderfs_entry, NULL);
6189 		proc->binderfs_entry = NULL;
6190 	}
6191 
6192 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6193 
6194 	return 0;
6195 }
6196 
6197 static int binder_node_release(struct binder_node *node, int refs)
6198 {
6199 	struct binder_ref *ref;
6200 	int death = 0;
6201 	struct binder_proc *proc = node->proc;
6202 
6203 	binder_release_work(proc, &node->async_todo);
6204 
6205 	binder_node_lock(node);
6206 	binder_inner_proc_lock(proc);
6207 	binder_dequeue_work_ilocked(&node->work);
6208 	/*
6209 	 * The caller must have taken a temporary ref on the node,
6210 	 */
6211 	BUG_ON(!node->tmp_refs);
6212 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6213 		binder_inner_proc_unlock(proc);
6214 		binder_node_unlock(node);
6215 		binder_free_node(node);
6216 
6217 		return refs;
6218 	}
6219 
6220 	node->proc = NULL;
6221 	node->local_strong_refs = 0;
6222 	node->local_weak_refs = 0;
6223 	binder_inner_proc_unlock(proc);
6224 
6225 	spin_lock(&binder_dead_nodes_lock);
6226 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
6227 	spin_unlock(&binder_dead_nodes_lock);
6228 
6229 	hlist_for_each_entry(ref, &node->refs, node_entry) {
6230 		refs++;
6231 		/*
6232 		 * Need the node lock to synchronize
6233 		 * with new notification requests and the
6234 		 * inner lock to synchronize with queued
6235 		 * death notifications.
6236 		 */
6237 		binder_inner_proc_lock(ref->proc);
6238 		if (!ref->death) {
6239 			binder_inner_proc_unlock(ref->proc);
6240 			continue;
6241 		}
6242 
6243 		death++;
6244 
6245 		BUG_ON(!list_empty(&ref->death->work.entry));
6246 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6247 		binder_enqueue_work_ilocked(&ref->death->work,
6248 					    &ref->proc->todo);
6249 		binder_wakeup_proc_ilocked(ref->proc);
6250 		binder_inner_proc_unlock(ref->proc);
6251 	}
6252 
6253 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
6254 		     "node %d now dead, refs %d, death %d\n",
6255 		     node->debug_id, refs, death);
6256 	binder_node_unlock(node);
6257 	binder_put_node(node);
6258 
6259 	return refs;
6260 }
6261 
6262 static void binder_deferred_release(struct binder_proc *proc)
6263 {
6264 	struct binder_context *context = proc->context;
6265 	struct rb_node *n;
6266 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6267 
6268 	mutex_lock(&binder_procs_lock);
6269 	hlist_del(&proc->proc_node);
6270 	mutex_unlock(&binder_procs_lock);
6271 
6272 	mutex_lock(&context->context_mgr_node_lock);
6273 	if (context->binder_context_mgr_node &&
6274 	    context->binder_context_mgr_node->proc == proc) {
6275 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
6276 			     "%s: %d context_mgr_node gone\n",
6277 			     __func__, proc->pid);
6278 		context->binder_context_mgr_node = NULL;
6279 	}
6280 	mutex_unlock(&context->context_mgr_node_lock);
6281 	binder_inner_proc_lock(proc);
6282 	/*
6283 	 * Make sure proc stays alive after we
6284 	 * remove all the threads
6285 	 */
6286 	proc->tmp_ref++;
6287 
6288 	proc->is_dead = true;
6289 	proc->is_frozen = false;
6290 	proc->sync_recv = false;
6291 	proc->async_recv = false;
6292 	threads = 0;
6293 	active_transactions = 0;
6294 	while ((n = rb_first(&proc->threads))) {
6295 		struct binder_thread *thread;
6296 
6297 		thread = rb_entry(n, struct binder_thread, rb_node);
6298 		binder_inner_proc_unlock(proc);
6299 		threads++;
6300 		active_transactions += binder_thread_release(proc, thread);
6301 		binder_inner_proc_lock(proc);
6302 	}
6303 
6304 	nodes = 0;
6305 	incoming_refs = 0;
6306 	while ((n = rb_first(&proc->nodes))) {
6307 		struct binder_node *node;
6308 
6309 		node = rb_entry(n, struct binder_node, rb_node);
6310 		nodes++;
6311 		/*
6312 		 * take a temporary ref on the node before
6313 		 * calling binder_node_release() which will either
6314 		 * kfree() the node or call binder_put_node()
6315 		 */
6316 		binder_inc_node_tmpref_ilocked(node);
6317 		rb_erase(&node->rb_node, &proc->nodes);
6318 		binder_inner_proc_unlock(proc);
6319 		incoming_refs = binder_node_release(node, incoming_refs);
6320 		binder_inner_proc_lock(proc);
6321 	}
6322 	binder_inner_proc_unlock(proc);
6323 
6324 	outgoing_refs = 0;
6325 	binder_proc_lock(proc);
6326 	while ((n = rb_first(&proc->refs_by_desc))) {
6327 		struct binder_ref *ref;
6328 
6329 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
6330 		outgoing_refs++;
6331 		binder_cleanup_ref_olocked(ref);
6332 		binder_proc_unlock(proc);
6333 		binder_free_ref(ref);
6334 		binder_proc_lock(proc);
6335 	}
6336 	binder_proc_unlock(proc);
6337 
6338 	binder_release_work(proc, &proc->todo);
6339 	binder_release_work(proc, &proc->delivered_death);
6340 	binder_release_work(proc, &proc->delivered_freeze);
6341 
6342 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6343 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6344 		     __func__, proc->pid, threads, nodes, incoming_refs,
6345 		     outgoing_refs, active_transactions);
6346 
6347 	binder_proc_dec_tmpref(proc);
6348 }
6349 
6350 static void binder_deferred_func(struct work_struct *work)
6351 {
6352 	struct binder_proc *proc;
6353 
6354 	int defer;
6355 
6356 	do {
6357 		mutex_lock(&binder_deferred_lock);
6358 		if (!hlist_empty(&binder_deferred_list)) {
6359 			proc = hlist_entry(binder_deferred_list.first,
6360 					struct binder_proc, deferred_work_node);
6361 			hlist_del_init(&proc->deferred_work_node);
6362 			defer = proc->deferred_work;
6363 			proc->deferred_work = 0;
6364 		} else {
6365 			proc = NULL;
6366 			defer = 0;
6367 		}
6368 		mutex_unlock(&binder_deferred_lock);
6369 
6370 		if (defer & BINDER_DEFERRED_FLUSH)
6371 			binder_deferred_flush(proc);
6372 
6373 		if (defer & BINDER_DEFERRED_RELEASE)
6374 			binder_deferred_release(proc); /* frees proc */
6375 	} while (proc);
6376 }
6377 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6378 
6379 static void
6380 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6381 {
6382 	guard(mutex)(&binder_deferred_lock);
6383 	proc->deferred_work |= defer;
6384 	if (hlist_unhashed(&proc->deferred_work_node)) {
6385 		hlist_add_head(&proc->deferred_work_node,
6386 				&binder_deferred_list);
6387 		schedule_work(&binder_deferred_work);
6388 	}
6389 }
6390 
6391 static void print_binder_transaction_ilocked(struct seq_file *m,
6392 					     struct binder_proc *proc,
6393 					     const char *prefix,
6394 					     struct binder_transaction *t)
6395 {
6396 	struct binder_proc *to_proc;
6397 	struct binder_buffer *buffer = t->buffer;
6398 	ktime_t current_time = ktime_get();
6399 
6400 	spin_lock(&t->lock);
6401 	to_proc = t->to_proc;
6402 	seq_printf(m,
6403 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
6404 		   prefix, t->debug_id, t,
6405 		   t->from_pid,
6406 		   t->from_tid,
6407 		   to_proc ? to_proc->pid : 0,
6408 		   t->to_thread ? t->to_thread->pid : 0,
6409 		   t->code, t->flags, t->priority, t->is_async, t->is_reply,
6410 		   ktime_ms_delta(current_time, t->start_time));
6411 	spin_unlock(&t->lock);
6412 
6413 	if (proc != to_proc) {
6414 		/*
6415 		 * Can only safely deref buffer if we are holding the
6416 		 * correct proc inner lock for this node
6417 		 */
6418 		seq_puts(m, "\n");
6419 		return;
6420 	}
6421 
6422 	if (buffer == NULL) {
6423 		seq_puts(m, " buffer free\n");
6424 		return;
6425 	}
6426 	if (buffer->target_node)
6427 		seq_printf(m, " node %d", buffer->target_node->debug_id);
6428 	seq_printf(m, " size %zd:%zd offset %lx\n",
6429 		   buffer->data_size, buffer->offsets_size,
6430 		   buffer->user_data - proc->alloc.vm_start);
6431 }
6432 
6433 static void print_binder_work_ilocked(struct seq_file *m,
6434 				      struct binder_proc *proc,
6435 				      const char *prefix,
6436 				      const char *transaction_prefix,
6437 				      struct binder_work *w, bool hash_ptrs)
6438 {
6439 	struct binder_node *node;
6440 	struct binder_transaction *t;
6441 
6442 	switch (w->type) {
6443 	case BINDER_WORK_TRANSACTION:
6444 		t = container_of(w, struct binder_transaction, work);
6445 		print_binder_transaction_ilocked(
6446 				m, proc, transaction_prefix, t);
6447 		break;
6448 	case BINDER_WORK_RETURN_ERROR: {
6449 		struct binder_error *e = container_of(
6450 				w, struct binder_error, work);
6451 
6452 		seq_printf(m, "%stransaction error: %u\n",
6453 			   prefix, e->cmd);
6454 	} break;
6455 	case BINDER_WORK_TRANSACTION_COMPLETE:
6456 		seq_printf(m, "%stransaction complete\n", prefix);
6457 		break;
6458 	case BINDER_WORK_NODE:
6459 		node = container_of(w, struct binder_node, work);
6460 		if (hash_ptrs)
6461 			seq_printf(m, "%snode work %d: u%p c%p\n",
6462 				   prefix, node->debug_id,
6463 				   (void *)(long)node->ptr,
6464 				   (void *)(long)node->cookie);
6465 		else
6466 			seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6467 				   prefix, node->debug_id,
6468 				   (u64)node->ptr, (u64)node->cookie);
6469 		break;
6470 	case BINDER_WORK_DEAD_BINDER:
6471 		seq_printf(m, "%shas dead binder\n", prefix);
6472 		break;
6473 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6474 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6475 		break;
6476 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6477 		seq_printf(m, "%shas cleared death notification\n", prefix);
6478 		break;
6479 	case BINDER_WORK_FROZEN_BINDER:
6480 		seq_printf(m, "%shas frozen binder\n", prefix);
6481 		break;
6482 	case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6483 		seq_printf(m, "%shas cleared freeze notification\n", prefix);
6484 		break;
6485 	default:
6486 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6487 		break;
6488 	}
6489 }
6490 
6491 static void print_binder_thread_ilocked(struct seq_file *m,
6492 					struct binder_thread *thread,
6493 					bool print_always, bool hash_ptrs)
6494 {
6495 	struct binder_transaction *t;
6496 	struct binder_work *w;
6497 	size_t start_pos = m->count;
6498 	size_t header_pos;
6499 
6500 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6501 			thread->pid, thread->looper,
6502 			thread->looper_need_return,
6503 			atomic_read(&thread->tmp_ref));
6504 	header_pos = m->count;
6505 	t = thread->transaction_stack;
6506 	while (t) {
6507 		if (t->from == thread) {
6508 			print_binder_transaction_ilocked(m, thread->proc,
6509 					"    outgoing transaction", t);
6510 			t = t->from_parent;
6511 		} else if (t->to_thread == thread) {
6512 			print_binder_transaction_ilocked(m, thread->proc,
6513 						 "    incoming transaction", t);
6514 			t = t->to_parent;
6515 		} else {
6516 			print_binder_transaction_ilocked(m, thread->proc,
6517 					"    bad transaction", t);
6518 			t = NULL;
6519 		}
6520 	}
6521 	list_for_each_entry(w, &thread->todo, entry) {
6522 		print_binder_work_ilocked(m, thread->proc, "    ",
6523 					  "    pending transaction",
6524 					  w, hash_ptrs);
6525 	}
6526 	if (!print_always && m->count == header_pos)
6527 		m->count = start_pos;
6528 }
6529 
6530 static void print_binder_node_nilocked(struct seq_file *m,
6531 				       struct binder_node *node,
6532 				       bool hash_ptrs)
6533 {
6534 	struct binder_ref *ref;
6535 	struct binder_work *w;
6536 	int count;
6537 
6538 	count = hlist_count_nodes(&node->refs);
6539 
6540 	if (hash_ptrs)
6541 		seq_printf(m, "  node %d: u%p c%p", node->debug_id,
6542 			   (void *)(long)node->ptr, (void *)(long)node->cookie);
6543 	else
6544 		seq_printf(m, "  node %d: u%016llx c%016llx", node->debug_id,
6545 			   (u64)node->ptr, (u64)node->cookie);
6546 	seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6547 		   node->has_strong_ref, node->has_weak_ref,
6548 		   node->local_strong_refs, node->local_weak_refs,
6549 		   node->internal_strong_refs, count, node->tmp_refs);
6550 	if (count) {
6551 		seq_puts(m, " proc");
6552 		hlist_for_each_entry(ref, &node->refs, node_entry)
6553 			seq_printf(m, " %d", ref->proc->pid);
6554 	}
6555 	seq_puts(m, "\n");
6556 	if (node->proc) {
6557 		list_for_each_entry(w, &node->async_todo, entry)
6558 			print_binder_work_ilocked(m, node->proc, "    ",
6559 					  "    pending async transaction",
6560 					  w, hash_ptrs);
6561 	}
6562 }
6563 
6564 static void print_binder_ref_olocked(struct seq_file *m,
6565 				     struct binder_ref *ref)
6566 {
6567 	binder_node_lock(ref->node);
6568 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6569 		   ref->data.debug_id, ref->data.desc,
6570 		   ref->node->proc ? "" : "dead ",
6571 		   ref->node->debug_id, ref->data.strong,
6572 		   ref->data.weak, ref->death);
6573 	binder_node_unlock(ref->node);
6574 }
6575 
6576 /**
6577  * print_next_binder_node_ilocked() - Print binder_node from a locked list
6578  * @m:          struct seq_file for output via seq_printf()
6579  * @proc:       struct binder_proc we hold the inner_proc_lock to (if any)
6580  * @node:       struct binder_node to print fields of
6581  * @prev_node:	struct binder_node we hold a temporary reference to (if any)
6582  * @hash_ptrs:  whether to hash @node's binder_uintptr_t fields
6583  *
6584  * Helper function to handle synchronization around printing a struct
6585  * binder_node while iterating through @proc->nodes or the dead nodes list.
6586  * Caller must hold either @proc->inner_lock (for live nodes) or
6587  * binder_dead_nodes_lock. This lock will be released during the body of this
6588  * function, but it will be reacquired before returning to the caller.
6589  *
6590  * Return:	pointer to the struct binder_node we hold a tmpref on
6591  */
6592 static struct binder_node *
6593 print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6594 			       struct binder_node *node,
6595 			       struct binder_node *prev_node, bool hash_ptrs)
6596 {
6597 	/*
6598 	 * Take a temporary reference on the node so that isn't freed while
6599 	 * we print it.
6600 	 */
6601 	binder_inc_node_tmpref_ilocked(node);
6602 	/*
6603 	 * Live nodes need to drop the inner proc lock and dead nodes need to
6604 	 * drop the binder_dead_nodes_lock before trying to take the node lock.
6605 	 */
6606 	if (proc)
6607 		binder_inner_proc_unlock(proc);
6608 	else
6609 		spin_unlock(&binder_dead_nodes_lock);
6610 	if (prev_node)
6611 		binder_put_node(prev_node);
6612 	binder_node_inner_lock(node);
6613 	print_binder_node_nilocked(m, node, hash_ptrs);
6614 	binder_node_inner_unlock(node);
6615 	if (proc)
6616 		binder_inner_proc_lock(proc);
6617 	else
6618 		spin_lock(&binder_dead_nodes_lock);
6619 	return node;
6620 }
6621 
6622 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6623 			      bool print_all, bool hash_ptrs)
6624 {
6625 	struct binder_work *w;
6626 	struct rb_node *n;
6627 	size_t start_pos = m->count;
6628 	size_t header_pos;
6629 	struct binder_node *last_node = NULL;
6630 
6631 	seq_printf(m, "proc %d\n", proc->pid);
6632 	seq_printf(m, "context %s\n", proc->context->name);
6633 	header_pos = m->count;
6634 
6635 	binder_inner_proc_lock(proc);
6636 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6637 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6638 						rb_node), print_all, hash_ptrs);
6639 
6640 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6641 		struct binder_node *node = rb_entry(n, struct binder_node,
6642 						    rb_node);
6643 		if (!print_all && !node->has_async_transaction)
6644 			continue;
6645 
6646 		last_node = print_next_binder_node_ilocked(m, proc, node,
6647 							   last_node,
6648 							   hash_ptrs);
6649 	}
6650 	binder_inner_proc_unlock(proc);
6651 	if (last_node)
6652 		binder_put_node(last_node);
6653 
6654 	if (print_all) {
6655 		binder_proc_lock(proc);
6656 		for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6657 			print_binder_ref_olocked(m, rb_entry(n,
6658 							     struct binder_ref,
6659 							     rb_node_desc));
6660 		binder_proc_unlock(proc);
6661 	}
6662 	binder_alloc_print_allocated(m, &proc->alloc);
6663 	binder_inner_proc_lock(proc);
6664 	list_for_each_entry(w, &proc->todo, entry)
6665 		print_binder_work_ilocked(m, proc, "  ",
6666 					  "  pending transaction", w,
6667 					  hash_ptrs);
6668 	list_for_each_entry(w, &proc->delivered_death, entry) {
6669 		seq_puts(m, "  has delivered dead binder\n");
6670 		break;
6671 	}
6672 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
6673 		seq_puts(m, "  has delivered freeze binder\n");
6674 		break;
6675 	}
6676 	binder_inner_proc_unlock(proc);
6677 	if (!print_all && m->count == header_pos)
6678 		m->count = start_pos;
6679 }
6680 
6681 static const char * const binder_return_strings[] = {
6682 	"BR_ERROR",
6683 	"BR_OK",
6684 	"BR_TRANSACTION",
6685 	"BR_REPLY",
6686 	"BR_ACQUIRE_RESULT",
6687 	"BR_DEAD_REPLY",
6688 	"BR_TRANSACTION_COMPLETE",
6689 	"BR_INCREFS",
6690 	"BR_ACQUIRE",
6691 	"BR_RELEASE",
6692 	"BR_DECREFS",
6693 	"BR_ATTEMPT_ACQUIRE",
6694 	"BR_NOOP",
6695 	"BR_SPAWN_LOOPER",
6696 	"BR_FINISHED",
6697 	"BR_DEAD_BINDER",
6698 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6699 	"BR_FAILED_REPLY",
6700 	"BR_FROZEN_REPLY",
6701 	"BR_ONEWAY_SPAM_SUSPECT",
6702 	"BR_TRANSACTION_PENDING_FROZEN",
6703 	"BR_FROZEN_BINDER",
6704 	"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6705 };
6706 
6707 static const char * const binder_command_strings[] = {
6708 	"BC_TRANSACTION",
6709 	"BC_REPLY",
6710 	"BC_ACQUIRE_RESULT",
6711 	"BC_FREE_BUFFER",
6712 	"BC_INCREFS",
6713 	"BC_ACQUIRE",
6714 	"BC_RELEASE",
6715 	"BC_DECREFS",
6716 	"BC_INCREFS_DONE",
6717 	"BC_ACQUIRE_DONE",
6718 	"BC_ATTEMPT_ACQUIRE",
6719 	"BC_REGISTER_LOOPER",
6720 	"BC_ENTER_LOOPER",
6721 	"BC_EXIT_LOOPER",
6722 	"BC_REQUEST_DEATH_NOTIFICATION",
6723 	"BC_CLEAR_DEATH_NOTIFICATION",
6724 	"BC_DEAD_BINDER_DONE",
6725 	"BC_TRANSACTION_SG",
6726 	"BC_REPLY_SG",
6727 	"BC_REQUEST_FREEZE_NOTIFICATION",
6728 	"BC_CLEAR_FREEZE_NOTIFICATION",
6729 	"BC_FREEZE_NOTIFICATION_DONE",
6730 };
6731 
6732 static const char * const binder_objstat_strings[] = {
6733 	"proc",
6734 	"thread",
6735 	"node",
6736 	"ref",
6737 	"death",
6738 	"transaction",
6739 	"transaction_complete",
6740 	"freeze",
6741 };
6742 
6743 static void print_binder_stats(struct seq_file *m, const char *prefix,
6744 			       struct binder_stats *stats)
6745 {
6746 	int i;
6747 
6748 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6749 		     ARRAY_SIZE(binder_command_strings));
6750 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6751 		int temp = atomic_read(&stats->bc[i]);
6752 
6753 		if (temp)
6754 			seq_printf(m, "%s%s: %d\n", prefix,
6755 				   binder_command_strings[i], temp);
6756 	}
6757 
6758 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6759 		     ARRAY_SIZE(binder_return_strings));
6760 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6761 		int temp = atomic_read(&stats->br[i]);
6762 
6763 		if (temp)
6764 			seq_printf(m, "%s%s: %d\n", prefix,
6765 				   binder_return_strings[i], temp);
6766 	}
6767 
6768 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6769 		     ARRAY_SIZE(binder_objstat_strings));
6770 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6771 		     ARRAY_SIZE(stats->obj_deleted));
6772 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6773 		int created = atomic_read(&stats->obj_created[i]);
6774 		int deleted = atomic_read(&stats->obj_deleted[i]);
6775 
6776 		if (created || deleted)
6777 			seq_printf(m, "%s%s: active %d total %d\n",
6778 				prefix,
6779 				binder_objstat_strings[i],
6780 				created - deleted,
6781 				created);
6782 	}
6783 }
6784 
6785 static void print_binder_proc_stats(struct seq_file *m,
6786 				    struct binder_proc *proc)
6787 {
6788 	struct binder_work *w;
6789 	struct binder_thread *thread;
6790 	struct rb_node *n;
6791 	int count, strong, weak, ready_threads;
6792 	size_t free_async_space =
6793 		binder_alloc_get_free_async_space(&proc->alloc);
6794 
6795 	seq_printf(m, "proc %d\n", proc->pid);
6796 	seq_printf(m, "context %s\n", proc->context->name);
6797 	count = 0;
6798 	ready_threads = 0;
6799 	binder_inner_proc_lock(proc);
6800 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6801 		count++;
6802 
6803 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6804 		ready_threads++;
6805 
6806 	seq_printf(m, "  threads: %d\n", count);
6807 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6808 			"  ready threads %d\n"
6809 			"  free async space %zd\n", proc->requested_threads,
6810 			proc->requested_threads_started, proc->max_threads,
6811 			ready_threads,
6812 			free_async_space);
6813 	count = 0;
6814 	for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6815 		count++;
6816 	binder_inner_proc_unlock(proc);
6817 	seq_printf(m, "  nodes: %d\n", count);
6818 	count = 0;
6819 	strong = 0;
6820 	weak = 0;
6821 	binder_proc_lock(proc);
6822 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6823 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6824 						  rb_node_desc);
6825 		count++;
6826 		strong += ref->data.strong;
6827 		weak += ref->data.weak;
6828 	}
6829 	binder_proc_unlock(proc);
6830 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6831 
6832 	count = binder_alloc_get_allocated_count(&proc->alloc);
6833 	seq_printf(m, "  buffers: %d\n", count);
6834 
6835 	binder_alloc_print_pages(m, &proc->alloc);
6836 
6837 	count = 0;
6838 	binder_inner_proc_lock(proc);
6839 	list_for_each_entry(w, &proc->todo, entry) {
6840 		if (w->type == BINDER_WORK_TRANSACTION)
6841 			count++;
6842 	}
6843 	binder_inner_proc_unlock(proc);
6844 	seq_printf(m, "  pending transactions: %d\n", count);
6845 
6846 	print_binder_stats(m, "  ", &proc->stats);
6847 }
6848 
6849 static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6850 {
6851 	struct binder_proc *proc;
6852 	struct binder_node *node;
6853 	struct binder_node *last_node = NULL;
6854 
6855 	seq_puts(m, "binder state:\n");
6856 
6857 	spin_lock(&binder_dead_nodes_lock);
6858 	if (!hlist_empty(&binder_dead_nodes))
6859 		seq_puts(m, "dead nodes:\n");
6860 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6861 		last_node = print_next_binder_node_ilocked(m, NULL, node,
6862 							   last_node,
6863 							   hash_ptrs);
6864 	spin_unlock(&binder_dead_nodes_lock);
6865 	if (last_node)
6866 		binder_put_node(last_node);
6867 
6868 	mutex_lock(&binder_procs_lock);
6869 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6870 		print_binder_proc(m, proc, true, hash_ptrs);
6871 	mutex_unlock(&binder_procs_lock);
6872 }
6873 
6874 static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6875 {
6876 	struct binder_proc *proc;
6877 
6878 	seq_puts(m, "binder transactions:\n");
6879 	mutex_lock(&binder_procs_lock);
6880 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6881 		print_binder_proc(m, proc, false, hash_ptrs);
6882 	mutex_unlock(&binder_procs_lock);
6883 }
6884 
6885 static int state_show(struct seq_file *m, void *unused)
6886 {
6887 	print_binder_state(m, false);
6888 	return 0;
6889 }
6890 
6891 static int state_hashed_show(struct seq_file *m, void *unused)
6892 {
6893 	print_binder_state(m, true);
6894 	return 0;
6895 }
6896 
6897 static int stats_show(struct seq_file *m, void *unused)
6898 {
6899 	struct binder_proc *proc;
6900 
6901 	seq_puts(m, "binder stats:\n");
6902 
6903 	print_binder_stats(m, "", &binder_stats);
6904 
6905 	mutex_lock(&binder_procs_lock);
6906 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6907 		print_binder_proc_stats(m, proc);
6908 	mutex_unlock(&binder_procs_lock);
6909 
6910 	return 0;
6911 }
6912 
6913 static int transactions_show(struct seq_file *m, void *unused)
6914 {
6915 	print_binder_transactions(m, false);
6916 	return 0;
6917 }
6918 
6919 static int transactions_hashed_show(struct seq_file *m, void *unused)
6920 {
6921 	print_binder_transactions(m, true);
6922 	return 0;
6923 }
6924 
6925 static int proc_show(struct seq_file *m, void *unused)
6926 {
6927 	struct binder_proc *itr;
6928 	int pid = (unsigned long)m->private;
6929 
6930 	guard(mutex)(&binder_procs_lock);
6931 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6932 		if (itr->pid == pid) {
6933 			seq_puts(m, "binder proc state:\n");
6934 			print_binder_proc(m, itr, true, false);
6935 		}
6936 	}
6937 
6938 	return 0;
6939 }
6940 
6941 static void print_binder_transaction_log_entry(struct seq_file *m,
6942 					struct binder_transaction_log_entry *e)
6943 {
6944 	int debug_id = READ_ONCE(e->debug_id_done);
6945 	/*
6946 	 * read barrier to guarantee debug_id_done read before
6947 	 * we print the log values
6948 	 */
6949 	smp_rmb();
6950 	seq_printf(m,
6951 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6952 		   e->debug_id, (e->call_type == 2) ? "reply" :
6953 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6954 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6955 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6956 		   e->return_error, e->return_error_param,
6957 		   e->return_error_line);
6958 	/*
6959 	 * read-barrier to guarantee read of debug_id_done after
6960 	 * done printing the fields of the entry
6961 	 */
6962 	smp_rmb();
6963 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6964 			"\n" : " (incomplete)\n");
6965 }
6966 
6967 static int transaction_log_show(struct seq_file *m, void *unused)
6968 {
6969 	struct binder_transaction_log *log = m->private;
6970 	unsigned int log_cur = atomic_read(&log->cur);
6971 	unsigned int count;
6972 	unsigned int cur;
6973 	int i;
6974 
6975 	count = log_cur + 1;
6976 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6977 		0 : count % ARRAY_SIZE(log->entry);
6978 	if (count > ARRAY_SIZE(log->entry) || log->full)
6979 		count = ARRAY_SIZE(log->entry);
6980 	for (i = 0; i < count; i++) {
6981 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6982 
6983 		print_binder_transaction_log_entry(m, &log->entry[index]);
6984 	}
6985 	return 0;
6986 }
6987 
6988 const struct file_operations binder_fops = {
6989 	.owner = THIS_MODULE,
6990 	.poll = binder_poll,
6991 	.unlocked_ioctl = binder_ioctl,
6992 	.compat_ioctl = compat_ptr_ioctl,
6993 	.mmap = binder_mmap,
6994 	.open = binder_open,
6995 	.flush = binder_flush,
6996 	.release = binder_release,
6997 };
6998 
6999 DEFINE_SHOW_ATTRIBUTE(state);
7000 DEFINE_SHOW_ATTRIBUTE(state_hashed);
7001 DEFINE_SHOW_ATTRIBUTE(stats);
7002 DEFINE_SHOW_ATTRIBUTE(transactions);
7003 DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
7004 DEFINE_SHOW_ATTRIBUTE(transaction_log);
7005 
7006 const struct binder_debugfs_entry binder_debugfs_entries[] = {
7007 	{
7008 		.name = "state",
7009 		.mode = 0444,
7010 		.fops = &state_fops,
7011 		.data = NULL,
7012 	},
7013 	{
7014 		.name = "state_hashed",
7015 		.mode = 0444,
7016 		.fops = &state_hashed_fops,
7017 		.data = NULL,
7018 	},
7019 	{
7020 		.name = "stats",
7021 		.mode = 0444,
7022 		.fops = &stats_fops,
7023 		.data = NULL,
7024 	},
7025 	{
7026 		.name = "transactions",
7027 		.mode = 0444,
7028 		.fops = &transactions_fops,
7029 		.data = NULL,
7030 	},
7031 	{
7032 		.name = "transactions_hashed",
7033 		.mode = 0444,
7034 		.fops = &transactions_hashed_fops,
7035 		.data = NULL,
7036 	},
7037 	{
7038 		.name = "transaction_log",
7039 		.mode = 0444,
7040 		.fops = &transaction_log_fops,
7041 		.data = &binder_transaction_log,
7042 	},
7043 	{
7044 		.name = "failed_transaction_log",
7045 		.mode = 0444,
7046 		.fops = &transaction_log_fops,
7047 		.data = &binder_transaction_log_failed,
7048 	},
7049 	{} /* terminator */
7050 };
7051 
7052 void binder_add_device(struct binder_device *device)
7053 {
7054 	guard(spinlock)(&binder_devices_lock);
7055 	hlist_add_head(&device->hlist, &binder_devices);
7056 }
7057 
7058 void binder_remove_device(struct binder_device *device)
7059 {
7060 	guard(spinlock)(&binder_devices_lock);
7061 	hlist_del_init(&device->hlist);
7062 }
7063 
7064 static int __init init_binder_device(const char *name)
7065 {
7066 	int ret;
7067 	struct binder_device *binder_device;
7068 
7069 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7070 	if (!binder_device)
7071 		return -ENOMEM;
7072 
7073 	binder_device->miscdev.fops = &binder_fops;
7074 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7075 	binder_device->miscdev.name = name;
7076 
7077 	refcount_set(&binder_device->ref, 1);
7078 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
7079 	binder_device->context.name = name;
7080 	mutex_init(&binder_device->context.context_mgr_node_lock);
7081 
7082 	ret = misc_register(&binder_device->miscdev);
7083 	if (ret < 0) {
7084 		kfree(binder_device);
7085 		return ret;
7086 	}
7087 
7088 	binder_add_device(binder_device);
7089 
7090 	return ret;
7091 }
7092 
7093 static int __init binder_init(void)
7094 {
7095 	int ret;
7096 	char *device_name, *device_tmp;
7097 	struct binder_device *device;
7098 	struct hlist_node *tmp;
7099 	char *device_names = NULL;
7100 	const struct binder_debugfs_entry *db_entry;
7101 
7102 	ret = binder_alloc_shrinker_init();
7103 	if (ret)
7104 		return ret;
7105 
7106 	atomic_set(&binder_transaction_log.cur, ~0U);
7107 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
7108 
7109 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7110 
7111 	binder_for_each_debugfs_entry(db_entry)
7112 		debugfs_create_file(db_entry->name,
7113 					db_entry->mode,
7114 					binder_debugfs_dir_entry_root,
7115 					db_entry->data,
7116 					db_entry->fops);
7117 
7118 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7119 						binder_debugfs_dir_entry_root);
7120 
7121 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7122 	    strcmp(binder_devices_param, "") != 0) {
7123 		/*
7124 		* Copy the module_parameter string, because we don't want to
7125 		* tokenize it in-place.
7126 		 */
7127 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7128 		if (!device_names) {
7129 			ret = -ENOMEM;
7130 			goto err_alloc_device_names_failed;
7131 		}
7132 
7133 		device_tmp = device_names;
7134 		while ((device_name = strsep(&device_tmp, ","))) {
7135 			ret = init_binder_device(device_name);
7136 			if (ret)
7137 				goto err_init_binder_device_failed;
7138 		}
7139 	}
7140 
7141 	ret = genl_register_family(&binder_nl_family);
7142 	if (ret)
7143 		goto err_init_binder_device_failed;
7144 
7145 	ret = init_binderfs();
7146 	if (ret)
7147 		goto err_init_binderfs_failed;
7148 
7149 	return ret;
7150 
7151 err_init_binderfs_failed:
7152 	genl_unregister_family(&binder_nl_family);
7153 
7154 err_init_binder_device_failed:
7155 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7156 		misc_deregister(&device->miscdev);
7157 		binder_remove_device(device);
7158 		kfree(device);
7159 	}
7160 
7161 	kfree(device_names);
7162 
7163 err_alloc_device_names_failed:
7164 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7165 	binder_alloc_shrinker_exit();
7166 
7167 	return ret;
7168 }
7169 
7170 device_initcall(binder_init);
7171 
7172 #define CREATE_TRACE_POINTS
7173 #include "binder_trace.h"
7174