xref: /linux/drivers/android/binder.c (revision 63740349eba78f242bcbf60d5244d7f2b2600853)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <kunit/visibility.h>
72 
73 #include <uapi/linux/android/binder.h>
74 
75 #include <linux/cacheflush.h>
76 
77 #include "binder_netlink.h"
78 #include "binder_internal.h"
79 #include "binder_trace.h"
80 
81 static HLIST_HEAD(binder_deferred_list);
82 static DEFINE_MUTEX(binder_deferred_lock);
83 
84 static HLIST_HEAD(binder_devices);
85 static DEFINE_SPINLOCK(binder_devices_lock);
86 
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89 
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92 
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96 
97 static int proc_show(struct seq_file *m, void *unused);
98 DEFINE_SHOW_ATTRIBUTE(proc);
99 
100 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
101 
102 enum {
103 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
104 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
105 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
106 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
107 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
108 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
109 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
110 	BINDER_DEBUG_USER_REFS              = 1U << 7,
111 	BINDER_DEBUG_THREADS                = 1U << 8,
112 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
113 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
114 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
115 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
116 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
117 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
118 };
119 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122 
123 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124 module_param_named(devices, binder_devices_param, charp, 0444);
125 
126 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127 static int binder_stop_on_user_error;
128 
129 static int binder_set_stop_on_user_error(const char *val,
130 					 const struct kernel_param *kp)
131 {
132 	int ret;
133 
134 	ret = param_set_int(val, kp);
135 	if (binder_stop_on_user_error < 2)
136 		wake_up(&binder_user_error_wait);
137 	return ret;
138 }
139 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 	param_get_int, &binder_stop_on_user_error, 0644);
141 
142 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
143 {
144 	struct va_format vaf;
145 	va_list args;
146 
147 	if (binder_debug_mask & mask) {
148 		va_start(args, format);
149 		vaf.va = &args;
150 		vaf.fmt = format;
151 		pr_info_ratelimited("%pV", &vaf);
152 		va_end(args);
153 	}
154 }
155 
156 #define binder_txn_error(x...) \
157 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
158 
159 static __printf(1, 2) void binder_user_error(const char *format, ...)
160 {
161 	struct va_format vaf;
162 	va_list args;
163 
164 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
165 		va_start(args, format);
166 		vaf.va = &args;
167 		vaf.fmt = format;
168 		pr_info_ratelimited("%pV", &vaf);
169 		va_end(args);
170 	}
171 
172 	if (binder_stop_on_user_error)
173 		binder_stop_on_user_error = 2;
174 }
175 
176 #define binder_set_extended_error(ee, _id, _command, _param) \
177 	do { \
178 		(ee)->id = _id; \
179 		(ee)->command = _command; \
180 		(ee)->param = _param; \
181 	} while (0)
182 
183 #define to_flat_binder_object(hdr) \
184 	container_of(hdr, struct flat_binder_object, hdr)
185 
186 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187 
188 #define to_binder_buffer_object(hdr) \
189 	container_of(hdr, struct binder_buffer_object, hdr)
190 
191 #define to_binder_fd_array_object(hdr) \
192 	container_of(hdr, struct binder_fd_array_object, hdr)
193 
194 static struct binder_stats binder_stats;
195 
196 static inline void binder_stats_deleted(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_deleted[type]);
199 }
200 
201 static inline void binder_stats_created(enum binder_stat_types type)
202 {
203 	atomic_inc(&binder_stats.obj_created[type]);
204 }
205 
206 struct binder_transaction_log_entry {
207 	int debug_id;
208 	int debug_id_done;
209 	int call_type;
210 	int from_proc;
211 	int from_thread;
212 	int target_handle;
213 	int to_proc;
214 	int to_thread;
215 	int to_node;
216 	int data_size;
217 	int offsets_size;
218 	int return_error_line;
219 	uint32_t return_error;
220 	uint32_t return_error_param;
221 	char context_name[BINDERFS_MAX_NAME + 1];
222 };
223 
224 struct binder_transaction_log {
225 	atomic_t cur;
226 	bool full;
227 	struct binder_transaction_log_entry entry[32];
228 };
229 
230 static struct binder_transaction_log binder_transaction_log;
231 static struct binder_transaction_log binder_transaction_log_failed;
232 
233 static struct binder_transaction_log_entry *binder_transaction_log_add(
234 	struct binder_transaction_log *log)
235 {
236 	struct binder_transaction_log_entry *e;
237 	unsigned int cur = atomic_inc_return(&log->cur);
238 
239 	if (cur >= ARRAY_SIZE(log->entry))
240 		log->full = true;
241 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242 	WRITE_ONCE(e->debug_id_done, 0);
243 	/*
244 	 * write-barrier to synchronize access to e->debug_id_done.
245 	 * We make sure the initialized 0 value is seen before
246 	 * memset() other fields are zeroed by memset.
247 	 */
248 	smp_wmb();
249 	memset(e, 0, sizeof(*e));
250 	return e;
251 }
252 
253 enum binder_deferred_state {
254 	BINDER_DEFERRED_FLUSH        = 0x01,
255 	BINDER_DEFERRED_RELEASE      = 0x02,
256 };
257 
258 enum {
259 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
260 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
261 	BINDER_LOOPER_STATE_EXITED      = 0x04,
262 	BINDER_LOOPER_STATE_INVALID     = 0x08,
263 	BINDER_LOOPER_STATE_WAITING     = 0x10,
264 	BINDER_LOOPER_STATE_POLL        = 0x20,
265 };
266 
267 /**
268  * binder_proc_lock() - Acquire outer lock for given binder_proc
269  * @proc:         struct binder_proc to acquire
270  *
271  * Acquires proc->outer_lock. Used to protect binder_ref
272  * structures associated with the given proc.
273  */
274 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
275 static void
276 _binder_proc_lock(struct binder_proc *proc, int line)
277 	__acquires(&proc->outer_lock)
278 {
279 	binder_debug(BINDER_DEBUG_SPINLOCKS,
280 		     "%s: line=%d\n", __func__, line);
281 	spin_lock(&proc->outer_lock);
282 }
283 
284 /**
285  * binder_proc_unlock() - Release outer lock for given binder_proc
286  * @proc:                struct binder_proc to acquire
287  *
288  * Release lock acquired via binder_proc_lock()
289  */
290 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
291 static void
292 _binder_proc_unlock(struct binder_proc *proc, int line)
293 	__releases(&proc->outer_lock)
294 {
295 	binder_debug(BINDER_DEBUG_SPINLOCKS,
296 		     "%s: line=%d\n", __func__, line);
297 	spin_unlock(&proc->outer_lock);
298 }
299 
300 /**
301  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
302  * @proc:         struct binder_proc to acquire
303  *
304  * Acquires proc->inner_lock. Used to protect todo lists
305  */
306 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
307 static void
308 _binder_inner_proc_lock(struct binder_proc *proc, int line)
309 	__acquires(&proc->inner_lock)
310 {
311 	binder_debug(BINDER_DEBUG_SPINLOCKS,
312 		     "%s: line=%d\n", __func__, line);
313 	spin_lock(&proc->inner_lock);
314 }
315 
316 /**
317  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
318  * @proc:         struct binder_proc to acquire
319  *
320  * Release lock acquired via binder_inner_proc_lock()
321  */
322 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
323 static void
324 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
325 	__releases(&proc->inner_lock)
326 {
327 	binder_debug(BINDER_DEBUG_SPINLOCKS,
328 		     "%s: line=%d\n", __func__, line);
329 	spin_unlock(&proc->inner_lock);
330 }
331 
332 /**
333  * binder_node_lock() - Acquire spinlock for given binder_node
334  * @node:         struct binder_node to acquire
335  *
336  * Acquires node->lock. Used to protect binder_node fields
337  */
338 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
339 static void
340 _binder_node_lock(struct binder_node *node, int line)
341 	__acquires(&node->lock)
342 {
343 	binder_debug(BINDER_DEBUG_SPINLOCKS,
344 		     "%s: line=%d\n", __func__, line);
345 	spin_lock(&node->lock);
346 }
347 
348 /**
349  * binder_node_unlock() - Release spinlock for given binder_proc
350  * @node:         struct binder_node to acquire
351  *
352  * Release lock acquired via binder_node_lock()
353  */
354 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
355 static void
356 _binder_node_unlock(struct binder_node *node, int line)
357 	__releases(&node->lock)
358 {
359 	binder_debug(BINDER_DEBUG_SPINLOCKS,
360 		     "%s: line=%d\n", __func__, line);
361 	spin_unlock(&node->lock);
362 }
363 
364 /**
365  * binder_node_inner_lock() - Acquire node and inner locks
366  * @node:         struct binder_node to acquire
367  *
368  * Acquires node->lock. If node->proc also acquires
369  * proc->inner_lock. Used to protect binder_node fields
370  */
371 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
372 static void
373 _binder_node_inner_lock(struct binder_node *node, int line)
374 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
375 {
376 	binder_debug(BINDER_DEBUG_SPINLOCKS,
377 		     "%s: line=%d\n", __func__, line);
378 	spin_lock(&node->lock);
379 	if (node->proc)
380 		binder_inner_proc_lock(node->proc);
381 	else
382 		/* annotation for sparse */
383 		__acquire(&node->proc->inner_lock);
384 }
385 
386 /**
387  * binder_node_inner_unlock() - Release node and inner locks
388  * @node:         struct binder_node to acquire
389  *
390  * Release lock acquired via binder_node_lock()
391  */
392 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
393 static void
394 _binder_node_inner_unlock(struct binder_node *node, int line)
395 	__releases(&node->lock) __releases(&node->proc->inner_lock)
396 {
397 	struct binder_proc *proc = node->proc;
398 
399 	binder_debug(BINDER_DEBUG_SPINLOCKS,
400 		     "%s: line=%d\n", __func__, line);
401 	if (proc)
402 		binder_inner_proc_unlock(proc);
403 	else
404 		/* annotation for sparse */
405 		__release(&node->proc->inner_lock);
406 	spin_unlock(&node->lock);
407 }
408 
409 static bool binder_worklist_empty_ilocked(struct list_head *list)
410 {
411 	return list_empty(list);
412 }
413 
414 /**
415  * binder_worklist_empty() - Check if no items on the work list
416  * @proc:       binder_proc associated with list
417  * @list:	list to check
418  *
419  * Return: true if there are no items on list, else false
420  */
421 static bool binder_worklist_empty(struct binder_proc *proc,
422 				  struct list_head *list)
423 {
424 	bool ret;
425 
426 	binder_inner_proc_lock(proc);
427 	ret = binder_worklist_empty_ilocked(list);
428 	binder_inner_proc_unlock(proc);
429 	return ret;
430 }
431 
432 /**
433  * binder_enqueue_work_ilocked() - Add an item to the work list
434  * @work:         struct binder_work to add to list
435  * @target_list:  list to add work to
436  *
437  * Adds the work to the specified list. Asserts that work
438  * is not already on a list.
439  *
440  * Requires the proc->inner_lock to be held.
441  */
442 static void
443 binder_enqueue_work_ilocked(struct binder_work *work,
444 			   struct list_head *target_list)
445 {
446 	BUG_ON(target_list == NULL);
447 	BUG_ON(work->entry.next && !list_empty(&work->entry));
448 	list_add_tail(&work->entry, target_list);
449 }
450 
451 /**
452  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
453  * @thread:       thread to queue work to
454  * @work:         struct binder_work to add to list
455  *
456  * Adds the work to the todo list of the thread. Doesn't set the process_todo
457  * flag, which means that (if it wasn't already set) the thread will go to
458  * sleep without handling this work when it calls read.
459  *
460  * Requires the proc->inner_lock to be held.
461  */
462 static void
463 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
464 					    struct binder_work *work)
465 {
466 	WARN_ON(!list_empty(&thread->waiting_thread_node));
467 	binder_enqueue_work_ilocked(work, &thread->todo);
468 }
469 
470 /**
471  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
472  * @thread:       thread to queue work to
473  * @work:         struct binder_work to add to list
474  *
475  * Adds the work to the todo list of the thread, and enables processing
476  * of the todo queue.
477  *
478  * Requires the proc->inner_lock to be held.
479  */
480 static void
481 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
482 				   struct binder_work *work)
483 {
484 	WARN_ON(!list_empty(&thread->waiting_thread_node));
485 	binder_enqueue_work_ilocked(work, &thread->todo);
486 
487 	/* (e)poll-based threads require an explicit wakeup signal when
488 	 * queuing their own work; they rely on these events to consume
489 	 * messages without I/O block. Without it, threads risk waiting
490 	 * indefinitely without handling the work.
491 	 */
492 	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
493 	    thread->pid == current->pid && !thread->process_todo)
494 		wake_up_interruptible_sync(&thread->wait);
495 
496 	thread->process_todo = true;
497 }
498 
499 /**
500  * binder_enqueue_thread_work() - Add an item to the thread work list
501  * @thread:       thread to queue work to
502  * @work:         struct binder_work to add to list
503  *
504  * Adds the work to the todo list of the thread, and enables processing
505  * of the todo queue.
506  */
507 static void
508 binder_enqueue_thread_work(struct binder_thread *thread,
509 			   struct binder_work *work)
510 {
511 	binder_inner_proc_lock(thread->proc);
512 	binder_enqueue_thread_work_ilocked(thread, work);
513 	binder_inner_proc_unlock(thread->proc);
514 }
515 
516 static void
517 binder_dequeue_work_ilocked(struct binder_work *work)
518 {
519 	list_del_init(&work->entry);
520 }
521 
522 /**
523  * binder_dequeue_work() - Removes an item from the work list
524  * @proc:         binder_proc associated with list
525  * @work:         struct binder_work to remove from list
526  *
527  * Removes the specified work item from whatever list it is on.
528  * Can safely be called if work is not on any list.
529  */
530 static void
531 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
532 {
533 	binder_inner_proc_lock(proc);
534 	binder_dequeue_work_ilocked(work);
535 	binder_inner_proc_unlock(proc);
536 }
537 
538 static struct binder_work *binder_dequeue_work_head_ilocked(
539 					struct list_head *list)
540 {
541 	struct binder_work *w;
542 
543 	w = list_first_entry_or_null(list, struct binder_work, entry);
544 	if (w)
545 		list_del_init(&w->entry);
546 	return w;
547 }
548 
549 static void
550 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
551 static void binder_free_thread(struct binder_thread *thread);
552 static void binder_free_proc(struct binder_proc *proc);
553 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
554 
555 static bool binder_has_work_ilocked(struct binder_thread *thread,
556 				    bool do_proc_work)
557 {
558 	return thread->process_todo ||
559 		thread->looper_need_return ||
560 		(do_proc_work &&
561 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
562 }
563 
564 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565 {
566 	bool has_work;
567 
568 	binder_inner_proc_lock(thread->proc);
569 	has_work = binder_has_work_ilocked(thread, do_proc_work);
570 	binder_inner_proc_unlock(thread->proc);
571 
572 	return has_work;
573 }
574 
575 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576 {
577 	return !thread->transaction_stack &&
578 		binder_worklist_empty_ilocked(&thread->todo);
579 }
580 
581 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582 					       bool sync)
583 {
584 	struct rb_node *n;
585 	struct binder_thread *thread;
586 
587 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588 		thread = rb_entry(n, struct binder_thread, rb_node);
589 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590 		    binder_available_for_proc_work_ilocked(thread)) {
591 			if (sync)
592 				wake_up_interruptible_sync(&thread->wait);
593 			else
594 				wake_up_interruptible(&thread->wait);
595 		}
596 	}
597 }
598 
599 /**
600  * binder_select_thread_ilocked() - selects a thread for doing proc work.
601  * @proc:	process to select a thread from
602  *
603  * Note that calling this function moves the thread off the waiting_threads
604  * list, so it can only be woken up by the caller of this function, or a
605  * signal. Therefore, callers *should* always wake up the thread this function
606  * returns.
607  *
608  * Return:	If there's a thread currently waiting for process work,
609  *		returns that thread. Otherwise returns NULL.
610  */
611 static struct binder_thread *
612 binder_select_thread_ilocked(struct binder_proc *proc)
613 {
614 	struct binder_thread *thread;
615 
616 	assert_spin_locked(&proc->inner_lock);
617 	thread = list_first_entry_or_null(&proc->waiting_threads,
618 					  struct binder_thread,
619 					  waiting_thread_node);
620 
621 	if (thread)
622 		list_del_init(&thread->waiting_thread_node);
623 
624 	return thread;
625 }
626 
627 /**
628  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629  * @proc:	process to wake up a thread in
630  * @thread:	specific thread to wake-up (may be NULL)
631  * @sync:	whether to do a synchronous wake-up
632  *
633  * This function wakes up a thread in the @proc process.
634  * The caller may provide a specific thread to wake-up in
635  * the @thread parameter. If @thread is NULL, this function
636  * will wake up threads that have called poll().
637  *
638  * Note that for this function to work as expected, callers
639  * should first call binder_select_thread() to find a thread
640  * to handle the work (if they don't have a thread already),
641  * and pass the result into the @thread parameter.
642  */
643 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644 					 struct binder_thread *thread,
645 					 bool sync)
646 {
647 	assert_spin_locked(&proc->inner_lock);
648 
649 	if (thread) {
650 		if (sync)
651 			wake_up_interruptible_sync(&thread->wait);
652 		else
653 			wake_up_interruptible(&thread->wait);
654 		return;
655 	}
656 
657 	/* Didn't find a thread waiting for proc work; this can happen
658 	 * in two scenarios:
659 	 * 1. All threads are busy handling transactions
660 	 *    In that case, one of those threads should call back into
661 	 *    the kernel driver soon and pick up this work.
662 	 * 2. Threads are using the (e)poll interface, in which case
663 	 *    they may be blocked on the waitqueue without having been
664 	 *    added to waiting_threads. For this case, we just iterate
665 	 *    over all threads not handling transaction work, and
666 	 *    wake them all up. We wake all because we don't know whether
667 	 *    a thread that called into (e)poll is handling non-binder
668 	 *    work currently.
669 	 */
670 	binder_wakeup_poll_threads_ilocked(proc, sync);
671 }
672 
673 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674 {
675 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
676 
677 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678 }
679 
680 static void binder_set_nice(long nice)
681 {
682 	long min_nice;
683 
684 	if (can_nice(current, nice)) {
685 		set_user_nice(current, nice);
686 		return;
687 	}
688 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
689 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
690 		     "%d: nice value %ld not allowed use %ld instead\n",
691 		      current->pid, nice, min_nice);
692 	set_user_nice(current, min_nice);
693 	if (min_nice <= MAX_NICE)
694 		return;
695 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
696 }
697 
698 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
699 						   binder_uintptr_t ptr)
700 {
701 	struct rb_node *n = proc->nodes.rb_node;
702 	struct binder_node *node;
703 
704 	assert_spin_locked(&proc->inner_lock);
705 
706 	while (n) {
707 		node = rb_entry(n, struct binder_node, rb_node);
708 
709 		if (ptr < node->ptr)
710 			n = n->rb_left;
711 		else if (ptr > node->ptr)
712 			n = n->rb_right;
713 		else {
714 			/*
715 			 * take an implicit weak reference
716 			 * to ensure node stays alive until
717 			 * call to binder_put_node()
718 			 */
719 			binder_inc_node_tmpref_ilocked(node);
720 			return node;
721 		}
722 	}
723 	return NULL;
724 }
725 
726 static struct binder_node *binder_get_node(struct binder_proc *proc,
727 					   binder_uintptr_t ptr)
728 {
729 	struct binder_node *node;
730 
731 	binder_inner_proc_lock(proc);
732 	node = binder_get_node_ilocked(proc, ptr);
733 	binder_inner_proc_unlock(proc);
734 	return node;
735 }
736 
737 static struct binder_node *binder_init_node_ilocked(
738 						struct binder_proc *proc,
739 						struct binder_node *new_node,
740 						struct flat_binder_object *fp)
741 {
742 	struct rb_node **p = &proc->nodes.rb_node;
743 	struct rb_node *parent = NULL;
744 	struct binder_node *node;
745 	binder_uintptr_t ptr = fp ? fp->binder : 0;
746 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
747 	__u32 flags = fp ? fp->flags : 0;
748 
749 	assert_spin_locked(&proc->inner_lock);
750 
751 	while (*p) {
752 
753 		parent = *p;
754 		node = rb_entry(parent, struct binder_node, rb_node);
755 
756 		if (ptr < node->ptr)
757 			p = &(*p)->rb_left;
758 		else if (ptr > node->ptr)
759 			p = &(*p)->rb_right;
760 		else {
761 			/*
762 			 * A matching node is already in
763 			 * the rb tree. Abandon the init
764 			 * and return it.
765 			 */
766 			binder_inc_node_tmpref_ilocked(node);
767 			return node;
768 		}
769 	}
770 	node = new_node;
771 	binder_stats_created(BINDER_STAT_NODE);
772 	node->tmp_refs++;
773 	rb_link_node(&node->rb_node, parent, p);
774 	rb_insert_color(&node->rb_node, &proc->nodes);
775 	node->debug_id = atomic_inc_return(&binder_last_id);
776 	node->proc = proc;
777 	node->ptr = ptr;
778 	node->cookie = cookie;
779 	node->work.type = BINDER_WORK_NODE;
780 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
781 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
782 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
783 	spin_lock_init(&node->lock);
784 	INIT_LIST_HEAD(&node->work.entry);
785 	INIT_LIST_HEAD(&node->async_todo);
786 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
787 		     "%d:%d node %d u%016llx c%016llx created\n",
788 		     proc->pid, current->pid, node->debug_id,
789 		     (u64)node->ptr, (u64)node->cookie);
790 
791 	return node;
792 }
793 
794 static struct binder_node *binder_new_node(struct binder_proc *proc,
795 					   struct flat_binder_object *fp)
796 {
797 	struct binder_node *node;
798 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
799 
800 	if (!new_node)
801 		return NULL;
802 	binder_inner_proc_lock(proc);
803 	node = binder_init_node_ilocked(proc, new_node, fp);
804 	binder_inner_proc_unlock(proc);
805 	if (node != new_node)
806 		/*
807 		 * The node was already added by another thread
808 		 */
809 		kfree(new_node);
810 
811 	return node;
812 }
813 
814 static void binder_free_node(struct binder_node *node)
815 {
816 	kfree(node);
817 	binder_stats_deleted(BINDER_STAT_NODE);
818 }
819 
820 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
821 				    int internal,
822 				    struct list_head *target_list)
823 {
824 	struct binder_proc *proc = node->proc;
825 
826 	assert_spin_locked(&node->lock);
827 	if (proc)
828 		assert_spin_locked(&proc->inner_lock);
829 	if (strong) {
830 		if (internal) {
831 			if (target_list == NULL &&
832 			    node->internal_strong_refs == 0 &&
833 			    !(node->proc &&
834 			      node == node->proc->context->binder_context_mgr_node &&
835 			      node->has_strong_ref)) {
836 				pr_err("invalid inc strong node for %d\n",
837 					node->debug_id);
838 				return -EINVAL;
839 			}
840 			node->internal_strong_refs++;
841 		} else
842 			node->local_strong_refs++;
843 		if (!node->has_strong_ref && target_list) {
844 			struct binder_thread *thread = container_of(target_list,
845 						    struct binder_thread, todo);
846 			binder_dequeue_work_ilocked(&node->work);
847 			BUG_ON(&thread->todo != target_list);
848 			binder_enqueue_deferred_thread_work_ilocked(thread,
849 								   &node->work);
850 		}
851 	} else {
852 		if (!internal)
853 			node->local_weak_refs++;
854 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
855 			if (target_list == NULL) {
856 				pr_err("invalid inc weak node for %d\n",
857 					node->debug_id);
858 				return -EINVAL;
859 			}
860 			/*
861 			 * See comment above
862 			 */
863 			binder_enqueue_work_ilocked(&node->work, target_list);
864 		}
865 	}
866 	return 0;
867 }
868 
869 static int binder_inc_node(struct binder_node *node, int strong, int internal,
870 			   struct list_head *target_list)
871 {
872 	int ret;
873 
874 	binder_node_inner_lock(node);
875 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
876 	binder_node_inner_unlock(node);
877 
878 	return ret;
879 }
880 
881 static bool binder_dec_node_nilocked(struct binder_node *node,
882 				     int strong, int internal)
883 {
884 	struct binder_proc *proc = node->proc;
885 
886 	assert_spin_locked(&node->lock);
887 	if (proc)
888 		assert_spin_locked(&proc->inner_lock);
889 	if (strong) {
890 		if (internal)
891 			node->internal_strong_refs--;
892 		else
893 			node->local_strong_refs--;
894 		if (node->local_strong_refs || node->internal_strong_refs)
895 			return false;
896 	} else {
897 		if (!internal)
898 			node->local_weak_refs--;
899 		if (node->local_weak_refs || node->tmp_refs ||
900 				!hlist_empty(&node->refs))
901 			return false;
902 	}
903 
904 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
905 		if (list_empty(&node->work.entry)) {
906 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
907 			binder_wakeup_proc_ilocked(proc);
908 		}
909 	} else {
910 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
911 		    !node->local_weak_refs && !node->tmp_refs) {
912 			if (proc) {
913 				binder_dequeue_work_ilocked(&node->work);
914 				rb_erase(&node->rb_node, &proc->nodes);
915 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
916 					     "refless node %d deleted\n",
917 					     node->debug_id);
918 			} else {
919 				BUG_ON(!list_empty(&node->work.entry));
920 				spin_lock(&binder_dead_nodes_lock);
921 				/*
922 				 * tmp_refs could have changed so
923 				 * check it again
924 				 */
925 				if (node->tmp_refs) {
926 					spin_unlock(&binder_dead_nodes_lock);
927 					return false;
928 				}
929 				hlist_del(&node->dead_node);
930 				spin_unlock(&binder_dead_nodes_lock);
931 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
932 					     "dead node %d deleted\n",
933 					     node->debug_id);
934 			}
935 			return true;
936 		}
937 	}
938 	return false;
939 }
940 
941 static void binder_dec_node(struct binder_node *node, int strong, int internal)
942 {
943 	bool free_node;
944 
945 	binder_node_inner_lock(node);
946 	free_node = binder_dec_node_nilocked(node, strong, internal);
947 	binder_node_inner_unlock(node);
948 	if (free_node)
949 		binder_free_node(node);
950 }
951 
952 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
953 {
954 	/*
955 	 * No call to binder_inc_node() is needed since we
956 	 * don't need to inform userspace of any changes to
957 	 * tmp_refs
958 	 */
959 	node->tmp_refs++;
960 }
961 
962 /**
963  * binder_inc_node_tmpref() - take a temporary reference on node
964  * @node:	node to reference
965  *
966  * Take reference on node to prevent the node from being freed
967  * while referenced only by a local variable. The inner lock is
968  * needed to serialize with the node work on the queue (which
969  * isn't needed after the node is dead). If the node is dead
970  * (node->proc is NULL), use binder_dead_nodes_lock to protect
971  * node->tmp_refs against dead-node-only cases where the node
972  * lock cannot be acquired (eg traversing the dead node list to
973  * print nodes)
974  */
975 static void binder_inc_node_tmpref(struct binder_node *node)
976 {
977 	binder_node_lock(node);
978 	if (node->proc)
979 		binder_inner_proc_lock(node->proc);
980 	else
981 		spin_lock(&binder_dead_nodes_lock);
982 	binder_inc_node_tmpref_ilocked(node);
983 	if (node->proc)
984 		binder_inner_proc_unlock(node->proc);
985 	else
986 		spin_unlock(&binder_dead_nodes_lock);
987 	binder_node_unlock(node);
988 }
989 
990 /**
991  * binder_dec_node_tmpref() - remove a temporary reference on node
992  * @node:	node to reference
993  *
994  * Release temporary reference on node taken via binder_inc_node_tmpref()
995  */
996 static void binder_dec_node_tmpref(struct binder_node *node)
997 {
998 	bool free_node;
999 
1000 	binder_node_inner_lock(node);
1001 	if (!node->proc)
1002 		spin_lock(&binder_dead_nodes_lock);
1003 	else
1004 		__acquire(&binder_dead_nodes_lock);
1005 	node->tmp_refs--;
1006 	BUG_ON(node->tmp_refs < 0);
1007 	if (!node->proc)
1008 		spin_unlock(&binder_dead_nodes_lock);
1009 	else
1010 		__release(&binder_dead_nodes_lock);
1011 	/*
1012 	 * Call binder_dec_node() to check if all refcounts are 0
1013 	 * and cleanup is needed. Calling with strong=0 and internal=1
1014 	 * causes no actual reference to be released in binder_dec_node().
1015 	 * If that changes, a change is needed here too.
1016 	 */
1017 	free_node = binder_dec_node_nilocked(node, 0, 1);
1018 	binder_node_inner_unlock(node);
1019 	if (free_node)
1020 		binder_free_node(node);
1021 }
1022 
1023 static void binder_put_node(struct binder_node *node)
1024 {
1025 	binder_dec_node_tmpref(node);
1026 }
1027 
1028 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1029 						 u32 desc, bool need_strong_ref)
1030 {
1031 	struct rb_node *n = proc->refs_by_desc.rb_node;
1032 	struct binder_ref *ref;
1033 
1034 	while (n) {
1035 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1036 
1037 		if (desc < ref->data.desc) {
1038 			n = n->rb_left;
1039 		} else if (desc > ref->data.desc) {
1040 			n = n->rb_right;
1041 		} else if (need_strong_ref && !ref->data.strong) {
1042 			binder_user_error("tried to use weak ref as strong ref\n");
1043 			return NULL;
1044 		} else {
1045 			return ref;
1046 		}
1047 	}
1048 	return NULL;
1049 }
1050 
1051 /* Find the smallest unused descriptor the "slow way" */
1052 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1053 {
1054 	struct binder_ref *ref;
1055 	struct rb_node *n;
1056 	u32 desc;
1057 
1058 	desc = offset;
1059 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1060 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1061 		if (ref->data.desc > desc)
1062 			break;
1063 		desc = ref->data.desc + 1;
1064 	}
1065 
1066 	return desc;
1067 }
1068 
1069 /*
1070  * Find an available reference descriptor ID. The proc->outer_lock might
1071  * be released in the process, in which case -EAGAIN is returned and the
1072  * @desc should be considered invalid.
1073  */
1074 static int get_ref_desc_olocked(struct binder_proc *proc,
1075 				struct binder_node *node,
1076 				u32 *desc)
1077 {
1078 	struct dbitmap *dmap = &proc->dmap;
1079 	unsigned int nbits, offset;
1080 	unsigned long *new, bit;
1081 
1082 	/* 0 is reserved for the context manager */
1083 	offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1084 
1085 	if (!dbitmap_enabled(dmap)) {
1086 		*desc = slow_desc_lookup_olocked(proc, offset);
1087 		return 0;
1088 	}
1089 
1090 	if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1091 		*desc = bit;
1092 		return 0;
1093 	}
1094 
1095 	/*
1096 	 * The dbitmap is full and needs to grow. The proc->outer_lock
1097 	 * is briefly released to allocate the new bitmap safely.
1098 	 */
1099 	nbits = dbitmap_grow_nbits(dmap);
1100 	binder_proc_unlock(proc);
1101 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1102 	binder_proc_lock(proc);
1103 	dbitmap_grow(dmap, new, nbits);
1104 
1105 	return -EAGAIN;
1106 }
1107 
1108 /**
1109  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1110  * @proc:	binder_proc that owns the ref
1111  * @node:	binder_node of target
1112  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1113  *
1114  * Look up the ref for the given node and return it if it exists
1115  *
1116  * If it doesn't exist and the caller provides a newly allocated
1117  * ref, initialize the fields of the newly allocated ref and insert
1118  * into the given proc rb_trees and node refs list.
1119  *
1120  * Return:	the ref for node. It is possible that another thread
1121  *		allocated/initialized the ref first in which case the
1122  *		returned ref would be different than the passed-in
1123  *		new_ref. new_ref must be kfree'd by the caller in
1124  *		this case.
1125  */
1126 static struct binder_ref *binder_get_ref_for_node_olocked(
1127 					struct binder_proc *proc,
1128 					struct binder_node *node,
1129 					struct binder_ref *new_ref)
1130 {
1131 	struct binder_ref *ref;
1132 	struct rb_node *parent;
1133 	struct rb_node **p;
1134 	u32 desc;
1135 
1136 retry:
1137 	p = &proc->refs_by_node.rb_node;
1138 	parent = NULL;
1139 	while (*p) {
1140 		parent = *p;
1141 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1142 
1143 		if (node < ref->node)
1144 			p = &(*p)->rb_left;
1145 		else if (node > ref->node)
1146 			p = &(*p)->rb_right;
1147 		else
1148 			return ref;
1149 	}
1150 	if (!new_ref)
1151 		return NULL;
1152 
1153 	/* might release the proc->outer_lock */
1154 	if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1155 		goto retry;
1156 
1157 	binder_stats_created(BINDER_STAT_REF);
1158 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1159 	new_ref->proc = proc;
1160 	new_ref->node = node;
1161 	rb_link_node(&new_ref->rb_node_node, parent, p);
1162 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1163 
1164 	new_ref->data.desc = desc;
1165 	p = &proc->refs_by_desc.rb_node;
1166 	while (*p) {
1167 		parent = *p;
1168 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1169 
1170 		if (new_ref->data.desc < ref->data.desc)
1171 			p = &(*p)->rb_left;
1172 		else if (new_ref->data.desc > ref->data.desc)
1173 			p = &(*p)->rb_right;
1174 		else
1175 			BUG();
1176 	}
1177 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1178 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1179 
1180 	binder_node_lock(node);
1181 	hlist_add_head(&new_ref->node_entry, &node->refs);
1182 
1183 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1184 		     "%d new ref %d desc %d for node %d\n",
1185 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1186 		      node->debug_id);
1187 	binder_node_unlock(node);
1188 	return new_ref;
1189 }
1190 
1191 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1192 {
1193 	struct dbitmap *dmap = &ref->proc->dmap;
1194 	bool delete_node = false;
1195 
1196 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1197 		     "%d delete ref %d desc %d for node %d\n",
1198 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1199 		      ref->node->debug_id);
1200 
1201 	if (dbitmap_enabled(dmap))
1202 		dbitmap_clear_bit(dmap, ref->data.desc);
1203 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1204 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1205 
1206 	binder_node_inner_lock(ref->node);
1207 	if (ref->data.strong)
1208 		binder_dec_node_nilocked(ref->node, 1, 1);
1209 
1210 	hlist_del(&ref->node_entry);
1211 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1212 	binder_node_inner_unlock(ref->node);
1213 	/*
1214 	 * Clear ref->node unless we want the caller to free the node
1215 	 */
1216 	if (!delete_node) {
1217 		/*
1218 		 * The caller uses ref->node to determine
1219 		 * whether the node needs to be freed. Clear
1220 		 * it since the node is still alive.
1221 		 */
1222 		ref->node = NULL;
1223 	}
1224 
1225 	if (ref->death) {
1226 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1227 			     "%d delete ref %d desc %d has death notification\n",
1228 			      ref->proc->pid, ref->data.debug_id,
1229 			      ref->data.desc);
1230 		binder_dequeue_work(ref->proc, &ref->death->work);
1231 		binder_stats_deleted(BINDER_STAT_DEATH);
1232 	}
1233 
1234 	if (ref->freeze) {
1235 		binder_dequeue_work(ref->proc, &ref->freeze->work);
1236 		binder_stats_deleted(BINDER_STAT_FREEZE);
1237 	}
1238 
1239 	binder_stats_deleted(BINDER_STAT_REF);
1240 }
1241 
1242 /**
1243  * binder_inc_ref_olocked() - increment the ref for given handle
1244  * @ref:         ref to be incremented
1245  * @strong:      if true, strong increment, else weak
1246  * @target_list: list to queue node work on
1247  *
1248  * Increment the ref. @ref->proc->outer_lock must be held on entry
1249  *
1250  * Return: 0, if successful, else errno
1251  */
1252 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1253 				  struct list_head *target_list)
1254 {
1255 	int ret;
1256 
1257 	if (strong) {
1258 		if (ref->data.strong == 0) {
1259 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1260 			if (ret)
1261 				return ret;
1262 		}
1263 		ref->data.strong++;
1264 	} else {
1265 		if (ref->data.weak == 0) {
1266 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1267 			if (ret)
1268 				return ret;
1269 		}
1270 		ref->data.weak++;
1271 	}
1272 	return 0;
1273 }
1274 
1275 /**
1276  * binder_dec_ref_olocked() - dec the ref for given handle
1277  * @ref:	ref to be decremented
1278  * @strong:	if true, strong decrement, else weak
1279  *
1280  * Decrement the ref.
1281  *
1282  * Return: %true if ref is cleaned up and ready to be freed.
1283  */
1284 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1285 {
1286 	if (strong) {
1287 		if (ref->data.strong == 0) {
1288 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1289 					  ref->proc->pid, ref->data.debug_id,
1290 					  ref->data.desc, ref->data.strong,
1291 					  ref->data.weak);
1292 			return false;
1293 		}
1294 		ref->data.strong--;
1295 		if (ref->data.strong == 0)
1296 			binder_dec_node(ref->node, strong, 1);
1297 	} else {
1298 		if (ref->data.weak == 0) {
1299 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1300 					  ref->proc->pid, ref->data.debug_id,
1301 					  ref->data.desc, ref->data.strong,
1302 					  ref->data.weak);
1303 			return false;
1304 		}
1305 		ref->data.weak--;
1306 	}
1307 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1308 		binder_cleanup_ref_olocked(ref);
1309 		return true;
1310 	}
1311 	return false;
1312 }
1313 
1314 /**
1315  * binder_get_node_from_ref() - get the node from the given proc/desc
1316  * @proc:	proc containing the ref
1317  * @desc:	the handle associated with the ref
1318  * @need_strong_ref: if true, only return node if ref is strong
1319  * @rdata:	the id/refcount data for the ref
1320  *
1321  * Given a proc and ref handle, return the associated binder_node
1322  *
1323  * Return: a binder_node or NULL if not found or not strong when strong required
1324  */
1325 static struct binder_node *binder_get_node_from_ref(
1326 		struct binder_proc *proc,
1327 		u32 desc, bool need_strong_ref,
1328 		struct binder_ref_data *rdata)
1329 {
1330 	struct binder_node *node;
1331 	struct binder_ref *ref;
1332 
1333 	binder_proc_lock(proc);
1334 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1335 	if (!ref)
1336 		goto err_no_ref;
1337 	node = ref->node;
1338 	/*
1339 	 * Take an implicit reference on the node to ensure
1340 	 * it stays alive until the call to binder_put_node()
1341 	 */
1342 	binder_inc_node_tmpref(node);
1343 	if (rdata)
1344 		*rdata = ref->data;
1345 	binder_proc_unlock(proc);
1346 
1347 	return node;
1348 
1349 err_no_ref:
1350 	binder_proc_unlock(proc);
1351 	return NULL;
1352 }
1353 
1354 /**
1355  * binder_free_ref() - free the binder_ref
1356  * @ref:	ref to free
1357  *
1358  * Free the binder_ref. Free the binder_node indicated by ref->node
1359  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1360  */
1361 static void binder_free_ref(struct binder_ref *ref)
1362 {
1363 	if (ref->node)
1364 		binder_free_node(ref->node);
1365 	kfree(ref->death);
1366 	kfree(ref->freeze);
1367 	kfree(ref);
1368 }
1369 
1370 /* shrink descriptor bitmap if needed */
1371 static void try_shrink_dmap(struct binder_proc *proc)
1372 {
1373 	unsigned long *new;
1374 	int nbits;
1375 
1376 	binder_proc_lock(proc);
1377 	nbits = dbitmap_shrink_nbits(&proc->dmap);
1378 	binder_proc_unlock(proc);
1379 
1380 	if (!nbits)
1381 		return;
1382 
1383 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1384 	binder_proc_lock(proc);
1385 	dbitmap_shrink(&proc->dmap, new, nbits);
1386 	binder_proc_unlock(proc);
1387 }
1388 
1389 /**
1390  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1391  * @proc:	proc containing the ref
1392  * @desc:	the handle associated with the ref
1393  * @increment:	true=inc reference, false=dec reference
1394  * @strong:	true=strong reference, false=weak reference
1395  * @rdata:	the id/refcount data for the ref
1396  *
1397  * Given a proc and ref handle, increment or decrement the ref
1398  * according to "increment" arg.
1399  *
1400  * Return: 0 if successful, else errno
1401  */
1402 static int binder_update_ref_for_handle(struct binder_proc *proc,
1403 		uint32_t desc, bool increment, bool strong,
1404 		struct binder_ref_data *rdata)
1405 {
1406 	int ret = 0;
1407 	struct binder_ref *ref;
1408 	bool delete_ref = false;
1409 
1410 	binder_proc_lock(proc);
1411 	ref = binder_get_ref_olocked(proc, desc, strong);
1412 	if (!ref) {
1413 		ret = -EINVAL;
1414 		goto err_no_ref;
1415 	}
1416 	if (increment)
1417 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1418 	else
1419 		delete_ref = binder_dec_ref_olocked(ref, strong);
1420 
1421 	if (rdata)
1422 		*rdata = ref->data;
1423 	binder_proc_unlock(proc);
1424 
1425 	if (delete_ref) {
1426 		binder_free_ref(ref);
1427 		try_shrink_dmap(proc);
1428 	}
1429 	return ret;
1430 
1431 err_no_ref:
1432 	binder_proc_unlock(proc);
1433 	return ret;
1434 }
1435 
1436 /**
1437  * binder_dec_ref_for_handle() - dec the ref for given handle
1438  * @proc:	proc containing the ref
1439  * @desc:	the handle associated with the ref
1440  * @strong:	true=strong reference, false=weak reference
1441  * @rdata:	the id/refcount data for the ref
1442  *
1443  * Just calls binder_update_ref_for_handle() to decrement the ref.
1444  *
1445  * Return: 0 if successful, else errno
1446  */
1447 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1448 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1449 {
1450 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1451 }
1452 
1453 
1454 /**
1455  * binder_inc_ref_for_node() - increment the ref for given proc/node
1456  * @proc:	 proc containing the ref
1457  * @node:	 target node
1458  * @strong:	 true=strong reference, false=weak reference
1459  * @target_list: worklist to use if node is incremented
1460  * @rdata:	 the id/refcount data for the ref
1461  *
1462  * Given a proc and node, increment the ref. Create the ref if it
1463  * doesn't already exist
1464  *
1465  * Return: 0 if successful, else errno
1466  */
1467 static int binder_inc_ref_for_node(struct binder_proc *proc,
1468 			struct binder_node *node,
1469 			bool strong,
1470 			struct list_head *target_list,
1471 			struct binder_ref_data *rdata)
1472 {
1473 	struct binder_ref *ref;
1474 	struct binder_ref *new_ref = NULL;
1475 	int ret = 0;
1476 
1477 	binder_proc_lock(proc);
1478 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1479 	if (!ref) {
1480 		binder_proc_unlock(proc);
1481 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1482 		if (!new_ref)
1483 			return -ENOMEM;
1484 		binder_proc_lock(proc);
1485 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1486 	}
1487 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1488 	*rdata = ref->data;
1489 	if (ret && ref == new_ref) {
1490 		/*
1491 		 * Cleanup the failed reference here as the target
1492 		 * could now be dead and have already released its
1493 		 * references by now. Calling on the new reference
1494 		 * with strong=0 and a tmp_refs will not decrement
1495 		 * the node. The new_ref gets kfree'd below.
1496 		 */
1497 		binder_cleanup_ref_olocked(new_ref);
1498 		ref = NULL;
1499 	}
1500 
1501 	binder_proc_unlock(proc);
1502 	if (new_ref && ref != new_ref)
1503 		/*
1504 		 * Another thread created the ref first so
1505 		 * free the one we allocated
1506 		 */
1507 		kfree(new_ref);
1508 	return ret;
1509 }
1510 
1511 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1512 					   struct binder_transaction *t)
1513 {
1514 	BUG_ON(!target_thread);
1515 	assert_spin_locked(&target_thread->proc->inner_lock);
1516 	BUG_ON(target_thread->transaction_stack != t);
1517 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1518 	target_thread->transaction_stack =
1519 		target_thread->transaction_stack->from_parent;
1520 	t->from = NULL;
1521 }
1522 
1523 /**
1524  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1525  * @thread:	thread to decrement
1526  *
1527  * A thread needs to be kept alive while being used to create or
1528  * handle a transaction. binder_get_txn_from() is used to safely
1529  * extract t->from from a binder_transaction and keep the thread
1530  * indicated by t->from from being freed. When done with that
1531  * binder_thread, this function is called to decrement the
1532  * tmp_ref and free if appropriate (thread has been released
1533  * and no transaction being processed by the driver)
1534  */
1535 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1536 {
1537 	/*
1538 	 * atomic is used to protect the counter value while
1539 	 * it cannot reach zero or thread->is_dead is false
1540 	 */
1541 	binder_inner_proc_lock(thread->proc);
1542 	atomic_dec(&thread->tmp_ref);
1543 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1544 		binder_inner_proc_unlock(thread->proc);
1545 		binder_free_thread(thread);
1546 		return;
1547 	}
1548 	binder_inner_proc_unlock(thread->proc);
1549 }
1550 
1551 /**
1552  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1553  * @proc:	proc to decrement
1554  *
1555  * A binder_proc needs to be kept alive while being used to create or
1556  * handle a transaction. proc->tmp_ref is incremented when
1557  * creating a new transaction or the binder_proc is currently in-use
1558  * by threads that are being released. When done with the binder_proc,
1559  * this function is called to decrement the counter and free the
1560  * proc if appropriate (proc has been released, all threads have
1561  * been released and not currently in-use to process a transaction).
1562  */
1563 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1564 {
1565 	binder_inner_proc_lock(proc);
1566 	proc->tmp_ref--;
1567 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1568 			!proc->tmp_ref) {
1569 		binder_inner_proc_unlock(proc);
1570 		binder_free_proc(proc);
1571 		return;
1572 	}
1573 	binder_inner_proc_unlock(proc);
1574 }
1575 
1576 /**
1577  * binder_get_txn_from() - safely extract the "from" thread in transaction
1578  * @t:	binder transaction for t->from
1579  *
1580  * Atomically return the "from" thread and increment the tmp_ref
1581  * count for the thread to ensure it stays alive until
1582  * binder_thread_dec_tmpref() is called.
1583  *
1584  * Return: the value of t->from
1585  */
1586 static struct binder_thread *binder_get_txn_from(
1587 		struct binder_transaction *t)
1588 {
1589 	struct binder_thread *from;
1590 
1591 	guard(spinlock)(&t->lock);
1592 	from = t->from;
1593 	if (from)
1594 		atomic_inc(&from->tmp_ref);
1595 	return from;
1596 }
1597 
1598 /**
1599  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1600  * @t:	binder transaction for t->from
1601  *
1602  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1603  * to guarantee that the thread cannot be released while operating on it.
1604  * The caller must call binder_inner_proc_unlock() to release the inner lock
1605  * as well as call binder_dec_thread_txn() to release the reference.
1606  *
1607  * Return: the value of t->from
1608  */
1609 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1610 		struct binder_transaction *t)
1611 	__acquires(&t->from->proc->inner_lock)
1612 {
1613 	struct binder_thread *from;
1614 
1615 	from = binder_get_txn_from(t);
1616 	if (!from) {
1617 		__acquire(&from->proc->inner_lock);
1618 		return NULL;
1619 	}
1620 	binder_inner_proc_lock(from->proc);
1621 	if (t->from) {
1622 		BUG_ON(from != t->from);
1623 		return from;
1624 	}
1625 	binder_inner_proc_unlock(from->proc);
1626 	__acquire(&from->proc->inner_lock);
1627 	binder_thread_dec_tmpref(from);
1628 	return NULL;
1629 }
1630 
1631 /**
1632  * binder_free_txn_fixups() - free unprocessed fd fixups
1633  * @t:	binder transaction for t->from
1634  *
1635  * If the transaction is being torn down prior to being
1636  * processed by the target process, free all of the
1637  * fd fixups and fput the file structs. It is safe to
1638  * call this function after the fixups have been
1639  * processed -- in that case, the list will be empty.
1640  */
1641 static void binder_free_txn_fixups(struct binder_transaction *t)
1642 {
1643 	struct binder_txn_fd_fixup *fixup, *tmp;
1644 
1645 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1646 		fput(fixup->file);
1647 		if (fixup->target_fd >= 0)
1648 			put_unused_fd(fixup->target_fd);
1649 		list_del(&fixup->fixup_entry);
1650 		kfree(fixup);
1651 	}
1652 }
1653 
1654 static void binder_txn_latency_free(struct binder_transaction *t)
1655 {
1656 	int from_proc, from_thread, to_proc, to_thread;
1657 
1658 	spin_lock(&t->lock);
1659 	from_proc = t->from ? t->from->proc->pid : 0;
1660 	from_thread = t->from ? t->from->pid : 0;
1661 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1662 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1663 	spin_unlock(&t->lock);
1664 
1665 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1666 }
1667 
1668 static void binder_free_transaction(struct binder_transaction *t)
1669 {
1670 	struct binder_proc *target_proc = t->to_proc;
1671 
1672 	if (target_proc) {
1673 		binder_inner_proc_lock(target_proc);
1674 		target_proc->outstanding_txns--;
1675 		if (target_proc->outstanding_txns < 0)
1676 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1677 				__func__, target_proc->outstanding_txns);
1678 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1679 			wake_up_interruptible_all(&target_proc->freeze_wait);
1680 		if (t->buffer)
1681 			t->buffer->transaction = NULL;
1682 		binder_inner_proc_unlock(target_proc);
1683 	}
1684 	if (trace_binder_txn_latency_free_enabled())
1685 		binder_txn_latency_free(t);
1686 	/*
1687 	 * If the transaction has no target_proc, then
1688 	 * t->buffer->transaction has already been cleared.
1689 	 */
1690 	binder_free_txn_fixups(t);
1691 	kfree(t);
1692 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1693 }
1694 
1695 static void binder_send_failed_reply(struct binder_transaction *t,
1696 				     uint32_t error_code)
1697 {
1698 	struct binder_thread *target_thread;
1699 	struct binder_transaction *next;
1700 
1701 	BUG_ON(t->flags & TF_ONE_WAY);
1702 	while (1) {
1703 		target_thread = binder_get_txn_from_and_acq_inner(t);
1704 		if (target_thread) {
1705 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1706 				     "send failed reply for transaction %d to %d:%d\n",
1707 				      t->debug_id,
1708 				      target_thread->proc->pid,
1709 				      target_thread->pid);
1710 
1711 			binder_pop_transaction_ilocked(target_thread, t);
1712 			if (target_thread->reply_error.cmd == BR_OK) {
1713 				target_thread->reply_error.cmd = error_code;
1714 				binder_enqueue_thread_work_ilocked(
1715 					target_thread,
1716 					&target_thread->reply_error.work);
1717 				wake_up_interruptible(&target_thread->wait);
1718 			} else {
1719 				/*
1720 				 * Cannot get here for normal operation, but
1721 				 * we can if multiple synchronous transactions
1722 				 * are sent without blocking for responses.
1723 				 * Just ignore the 2nd error in this case.
1724 				 */
1725 				pr_warn("Unexpected reply error: %u\n",
1726 					target_thread->reply_error.cmd);
1727 			}
1728 			binder_inner_proc_unlock(target_thread->proc);
1729 			binder_thread_dec_tmpref(target_thread);
1730 			binder_free_transaction(t);
1731 			return;
1732 		}
1733 		__release(&target_thread->proc->inner_lock);
1734 		next = t->from_parent;
1735 
1736 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1737 			     "send failed reply for transaction %d, target dead\n",
1738 			     t->debug_id);
1739 
1740 		binder_free_transaction(t);
1741 		if (next == NULL) {
1742 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1743 				     "reply failed, no target thread at root\n");
1744 			return;
1745 		}
1746 		t = next;
1747 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1748 			     "reply failed, no target thread -- retry %d\n",
1749 			      t->debug_id);
1750 	}
1751 }
1752 
1753 /**
1754  * binder_cleanup_transaction() - cleans up undelivered transaction
1755  * @t:		transaction that needs to be cleaned up
1756  * @reason:	reason the transaction wasn't delivered
1757  * @error_code:	error to return to caller (if synchronous call)
1758  */
1759 static void binder_cleanup_transaction(struct binder_transaction *t,
1760 				       const char *reason,
1761 				       uint32_t error_code)
1762 {
1763 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1764 		binder_send_failed_reply(t, error_code);
1765 	} else {
1766 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1767 			"undelivered transaction %d, %s\n",
1768 			t->debug_id, reason);
1769 		binder_free_transaction(t);
1770 	}
1771 }
1772 
1773 /**
1774  * binder_get_object() - gets object and checks for valid metadata
1775  * @proc:	binder_proc owning the buffer
1776  * @u:		sender's user pointer to base of buffer
1777  * @buffer:	binder_buffer that we're parsing.
1778  * @offset:	offset in the @buffer at which to validate an object.
1779  * @object:	struct binder_object to read into
1780  *
1781  * Copy the binder object at the given offset into @object. If @u is
1782  * provided then the copy is from the sender's buffer. If not, then
1783  * it is copied from the target's @buffer.
1784  *
1785  * Return:	If there's a valid metadata object at @offset, the
1786  *		size of that object. Otherwise, it returns zero. The object
1787  *		is read into the struct binder_object pointed to by @object.
1788  */
1789 static size_t binder_get_object(struct binder_proc *proc,
1790 				const void __user *u,
1791 				struct binder_buffer *buffer,
1792 				unsigned long offset,
1793 				struct binder_object *object)
1794 {
1795 	size_t read_size;
1796 	struct binder_object_header *hdr;
1797 	size_t object_size = 0;
1798 
1799 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1800 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1801 	    !IS_ALIGNED(offset, sizeof(u32)))
1802 		return 0;
1803 
1804 	if (u) {
1805 		if (copy_from_user(object, u + offset, read_size))
1806 			return 0;
1807 	} else {
1808 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1809 						  offset, read_size))
1810 			return 0;
1811 	}
1812 
1813 	/* Ok, now see if we read a complete object. */
1814 	hdr = &object->hdr;
1815 	switch (hdr->type) {
1816 	case BINDER_TYPE_BINDER:
1817 	case BINDER_TYPE_WEAK_BINDER:
1818 	case BINDER_TYPE_HANDLE:
1819 	case BINDER_TYPE_WEAK_HANDLE:
1820 		object_size = sizeof(struct flat_binder_object);
1821 		break;
1822 	case BINDER_TYPE_FD:
1823 		object_size = sizeof(struct binder_fd_object);
1824 		break;
1825 	case BINDER_TYPE_PTR:
1826 		object_size = sizeof(struct binder_buffer_object);
1827 		break;
1828 	case BINDER_TYPE_FDA:
1829 		object_size = sizeof(struct binder_fd_array_object);
1830 		break;
1831 	default:
1832 		return 0;
1833 	}
1834 	if (offset <= buffer->data_size - object_size &&
1835 	    buffer->data_size >= object_size)
1836 		return object_size;
1837 	else
1838 		return 0;
1839 }
1840 
1841 /**
1842  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1843  * @proc:	binder_proc owning the buffer
1844  * @b:		binder_buffer containing the object
1845  * @object:	struct binder_object to read into
1846  * @index:	index in offset array at which the binder_buffer_object is
1847  *		located
1848  * @start_offset: points to the start of the offset array
1849  * @object_offsetp: offset of @object read from @b
1850  * @num_valid:	the number of valid offsets in the offset array
1851  *
1852  * Return:	If @index is within the valid range of the offset array
1853  *		described by @start and @num_valid, and if there's a valid
1854  *		binder_buffer_object at the offset found in index @index
1855  *		of the offset array, that object is returned. Otherwise,
1856  *		%NULL is returned.
1857  *		Note that the offset found in index @index itself is not
1858  *		verified; this function assumes that @num_valid elements
1859  *		from @start were previously verified to have valid offsets.
1860  *		If @object_offsetp is non-NULL, then the offset within
1861  *		@b is written to it.
1862  */
1863 static struct binder_buffer_object *binder_validate_ptr(
1864 						struct binder_proc *proc,
1865 						struct binder_buffer *b,
1866 						struct binder_object *object,
1867 						binder_size_t index,
1868 						binder_size_t start_offset,
1869 						binder_size_t *object_offsetp,
1870 						binder_size_t num_valid)
1871 {
1872 	size_t object_size;
1873 	binder_size_t object_offset;
1874 	unsigned long buffer_offset;
1875 
1876 	if (index >= num_valid)
1877 		return NULL;
1878 
1879 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1880 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1881 					  b, buffer_offset,
1882 					  sizeof(object_offset)))
1883 		return NULL;
1884 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1885 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1886 		return NULL;
1887 	if (object_offsetp)
1888 		*object_offsetp = object_offset;
1889 
1890 	return &object->bbo;
1891 }
1892 
1893 /**
1894  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1895  * @proc:		binder_proc owning the buffer
1896  * @b:			transaction buffer
1897  * @objects_start_offset: offset to start of objects buffer
1898  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1899  * @fixup_offset:	start offset in @buffer to fix up
1900  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1901  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1902  *
1903  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1904  *			allowed.
1905  *
1906  * For safety reasons, we only allow fixups inside a buffer to happen
1907  * at increasing offsets; additionally, we only allow fixup on the last
1908  * buffer object that was verified, or one of its parents.
1909  *
1910  * Example of what is allowed:
1911  *
1912  * A
1913  *   B (parent = A, offset = 0)
1914  *   C (parent = A, offset = 16)
1915  *     D (parent = C, offset = 0)
1916  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1917  *
1918  * Examples of what is not allowed:
1919  *
1920  * Decreasing offsets within the same parent:
1921  * A
1922  *   C (parent = A, offset = 16)
1923  *   B (parent = A, offset = 0) // decreasing offset within A
1924  *
1925  * Referring to a parent that wasn't the last object or any of its parents:
1926  * A
1927  *   B (parent = A, offset = 0)
1928  *   C (parent = A, offset = 0)
1929  *   C (parent = A, offset = 16)
1930  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1931  */
1932 static bool binder_validate_fixup(struct binder_proc *proc,
1933 				  struct binder_buffer *b,
1934 				  binder_size_t objects_start_offset,
1935 				  binder_size_t buffer_obj_offset,
1936 				  binder_size_t fixup_offset,
1937 				  binder_size_t last_obj_offset,
1938 				  binder_size_t last_min_offset)
1939 {
1940 	if (!last_obj_offset) {
1941 		/* Nothing to fix up in */
1942 		return false;
1943 	}
1944 
1945 	while (last_obj_offset != buffer_obj_offset) {
1946 		unsigned long buffer_offset;
1947 		struct binder_object last_object;
1948 		struct binder_buffer_object *last_bbo;
1949 		size_t object_size = binder_get_object(proc, NULL, b,
1950 						       last_obj_offset,
1951 						       &last_object);
1952 		if (object_size != sizeof(*last_bbo))
1953 			return false;
1954 
1955 		last_bbo = &last_object.bbo;
1956 		/*
1957 		 * Safe to retrieve the parent of last_obj, since it
1958 		 * was already previously verified by the driver.
1959 		 */
1960 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1961 			return false;
1962 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1963 		buffer_offset = objects_start_offset +
1964 			sizeof(binder_size_t) * last_bbo->parent;
1965 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1966 						  &last_obj_offset,
1967 						  b, buffer_offset,
1968 						  sizeof(last_obj_offset)))
1969 			return false;
1970 	}
1971 	return (fixup_offset >= last_min_offset);
1972 }
1973 
1974 /**
1975  * struct binder_task_work_cb - for deferred close
1976  *
1977  * @twork:                callback_head for task work
1978  * @file:                 file to close
1979  *
1980  * Structure to pass task work to be handled after
1981  * returning from binder_ioctl() via task_work_add().
1982  */
1983 struct binder_task_work_cb {
1984 	struct callback_head twork;
1985 	struct file *file;
1986 };
1987 
1988 /**
1989  * binder_do_fd_close() - close list of file descriptors
1990  * @twork:	callback head for task work
1991  *
1992  * It is not safe to call ksys_close() during the binder_ioctl()
1993  * function if there is a chance that binder's own file descriptor
1994  * might be closed. This is to meet the requirements for using
1995  * fdget() (see comments for __fget_light()). Therefore use
1996  * task_work_add() to schedule the close operation once we have
1997  * returned from binder_ioctl(). This function is a callback
1998  * for that mechanism and does the actual ksys_close() on the
1999  * given file descriptor.
2000  */
2001 static void binder_do_fd_close(struct callback_head *twork)
2002 {
2003 	struct binder_task_work_cb *twcb = container_of(twork,
2004 			struct binder_task_work_cb, twork);
2005 
2006 	fput(twcb->file);
2007 	kfree(twcb);
2008 }
2009 
2010 /**
2011  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2012  * @fd:		file-descriptor to close
2013  *
2014  * See comments in binder_do_fd_close(). This function is used to schedule
2015  * a file-descriptor to be closed after returning from binder_ioctl().
2016  */
2017 static void binder_deferred_fd_close(int fd)
2018 {
2019 	struct binder_task_work_cb *twcb;
2020 
2021 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2022 	if (!twcb)
2023 		return;
2024 	init_task_work(&twcb->twork, binder_do_fd_close);
2025 	twcb->file = file_close_fd(fd);
2026 	if (twcb->file) {
2027 		// pin it until binder_do_fd_close(); see comments there
2028 		get_file(twcb->file);
2029 		filp_close(twcb->file, current->files);
2030 		task_work_add(current, &twcb->twork, TWA_RESUME);
2031 	} else {
2032 		kfree(twcb);
2033 	}
2034 }
2035 
2036 static void binder_transaction_buffer_release(struct binder_proc *proc,
2037 					      struct binder_thread *thread,
2038 					      struct binder_buffer *buffer,
2039 					      binder_size_t off_end_offset,
2040 					      bool is_failure)
2041 {
2042 	int debug_id = buffer->debug_id;
2043 	binder_size_t off_start_offset, buffer_offset;
2044 
2045 	binder_debug(BINDER_DEBUG_TRANSACTION,
2046 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2047 		     proc->pid, buffer->debug_id,
2048 		     buffer->data_size, buffer->offsets_size,
2049 		     (unsigned long long)off_end_offset);
2050 
2051 	if (buffer->target_node)
2052 		binder_dec_node(buffer->target_node, 1, 0);
2053 
2054 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2055 
2056 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2057 	     buffer_offset += sizeof(binder_size_t)) {
2058 		struct binder_object_header *hdr;
2059 		size_t object_size = 0;
2060 		struct binder_object object;
2061 		binder_size_t object_offset;
2062 
2063 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2064 						   buffer, buffer_offset,
2065 						   sizeof(object_offset)))
2066 			object_size = binder_get_object(proc, NULL, buffer,
2067 							object_offset, &object);
2068 		if (object_size == 0) {
2069 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2070 			       debug_id, (u64)object_offset, buffer->data_size);
2071 			continue;
2072 		}
2073 		hdr = &object.hdr;
2074 		switch (hdr->type) {
2075 		case BINDER_TYPE_BINDER:
2076 		case BINDER_TYPE_WEAK_BINDER: {
2077 			struct flat_binder_object *fp;
2078 			struct binder_node *node;
2079 
2080 			fp = to_flat_binder_object(hdr);
2081 			node = binder_get_node(proc, fp->binder);
2082 			if (node == NULL) {
2083 				pr_err("transaction release %d bad node %016llx\n",
2084 				       debug_id, (u64)fp->binder);
2085 				break;
2086 			}
2087 			binder_debug(BINDER_DEBUG_TRANSACTION,
2088 				     "        node %d u%016llx\n",
2089 				     node->debug_id, (u64)node->ptr);
2090 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2091 					0);
2092 			binder_put_node(node);
2093 		} break;
2094 		case BINDER_TYPE_HANDLE:
2095 		case BINDER_TYPE_WEAK_HANDLE: {
2096 			struct flat_binder_object *fp;
2097 			struct binder_ref_data rdata;
2098 			int ret;
2099 
2100 			fp = to_flat_binder_object(hdr);
2101 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2102 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2103 
2104 			if (ret) {
2105 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2106 				 debug_id, fp->handle, ret);
2107 				break;
2108 			}
2109 			binder_debug(BINDER_DEBUG_TRANSACTION,
2110 				     "        ref %d desc %d\n",
2111 				     rdata.debug_id, rdata.desc);
2112 		} break;
2113 
2114 		case BINDER_TYPE_FD: {
2115 			/*
2116 			 * No need to close the file here since user-space
2117 			 * closes it for successfully delivered
2118 			 * transactions. For transactions that weren't
2119 			 * delivered, the new fd was never allocated so
2120 			 * there is no need to close and the fput on the
2121 			 * file is done when the transaction is torn
2122 			 * down.
2123 			 */
2124 		} break;
2125 		case BINDER_TYPE_PTR:
2126 			/*
2127 			 * Nothing to do here, this will get cleaned up when the
2128 			 * transaction buffer gets freed
2129 			 */
2130 			break;
2131 		case BINDER_TYPE_FDA: {
2132 			struct binder_fd_array_object *fda;
2133 			struct binder_buffer_object *parent;
2134 			struct binder_object ptr_object;
2135 			binder_size_t fda_offset;
2136 			size_t fd_index;
2137 			binder_size_t fd_buf_size;
2138 			binder_size_t num_valid;
2139 
2140 			if (is_failure) {
2141 				/*
2142 				 * The fd fixups have not been applied so no
2143 				 * fds need to be closed.
2144 				 */
2145 				continue;
2146 			}
2147 
2148 			num_valid = (buffer_offset - off_start_offset) /
2149 						sizeof(binder_size_t);
2150 			fda = to_binder_fd_array_object(hdr);
2151 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2152 						     fda->parent,
2153 						     off_start_offset,
2154 						     NULL,
2155 						     num_valid);
2156 			if (!parent) {
2157 				pr_err("transaction release %d bad parent offset\n",
2158 				       debug_id);
2159 				continue;
2160 			}
2161 			fd_buf_size = sizeof(u32) * fda->num_fds;
2162 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2163 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2164 				       debug_id, (u64)fda->num_fds);
2165 				continue;
2166 			}
2167 			if (fd_buf_size > parent->length ||
2168 			    fda->parent_offset > parent->length - fd_buf_size) {
2169 				/* No space for all file descriptors here. */
2170 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2171 				       debug_id, (u64)fda->num_fds);
2172 				continue;
2173 			}
2174 			/*
2175 			 * the source data for binder_buffer_object is visible
2176 			 * to user-space and the @buffer element is the user
2177 			 * pointer to the buffer_object containing the fd_array.
2178 			 * Convert the address to an offset relative to
2179 			 * the base of the transaction buffer.
2180 			 */
2181 			fda_offset = parent->buffer - buffer->user_data +
2182 				fda->parent_offset;
2183 			for (fd_index = 0; fd_index < fda->num_fds;
2184 			     fd_index++) {
2185 				u32 fd;
2186 				int err;
2187 				binder_size_t offset = fda_offset +
2188 					fd_index * sizeof(fd);
2189 
2190 				err = binder_alloc_copy_from_buffer(
2191 						&proc->alloc, &fd, buffer,
2192 						offset, sizeof(fd));
2193 				WARN_ON(err);
2194 				if (!err) {
2195 					binder_deferred_fd_close(fd);
2196 					/*
2197 					 * Need to make sure the thread goes
2198 					 * back to userspace to complete the
2199 					 * deferred close
2200 					 */
2201 					if (thread)
2202 						thread->looper_need_return = true;
2203 				}
2204 			}
2205 		} break;
2206 		default:
2207 			pr_err("transaction release %d bad object type %x\n",
2208 				debug_id, hdr->type);
2209 			break;
2210 		}
2211 	}
2212 }
2213 
2214 /* Clean up all the objects in the buffer */
2215 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2216 						struct binder_thread *thread,
2217 						struct binder_buffer *buffer,
2218 						bool is_failure)
2219 {
2220 	binder_size_t off_end_offset;
2221 
2222 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2223 	off_end_offset += buffer->offsets_size;
2224 
2225 	binder_transaction_buffer_release(proc, thread, buffer,
2226 					  off_end_offset, is_failure);
2227 }
2228 
2229 static int binder_translate_binder(struct flat_binder_object *fp,
2230 				   struct binder_transaction *t,
2231 				   struct binder_thread *thread)
2232 {
2233 	struct binder_node *node;
2234 	struct binder_proc *proc = thread->proc;
2235 	struct binder_proc *target_proc = t->to_proc;
2236 	struct binder_ref_data rdata;
2237 	int ret = 0;
2238 
2239 	node = binder_get_node(proc, fp->binder);
2240 	if (!node) {
2241 		node = binder_new_node(proc, fp);
2242 		if (!node)
2243 			return -ENOMEM;
2244 	}
2245 	if (fp->cookie != node->cookie) {
2246 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2247 				  proc->pid, thread->pid, (u64)fp->binder,
2248 				  node->debug_id, (u64)fp->cookie,
2249 				  (u64)node->cookie);
2250 		ret = -EINVAL;
2251 		goto done;
2252 	}
2253 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2254 		ret = -EPERM;
2255 		goto done;
2256 	}
2257 
2258 	ret = binder_inc_ref_for_node(target_proc, node,
2259 			fp->hdr.type == BINDER_TYPE_BINDER,
2260 			&thread->todo, &rdata);
2261 	if (ret)
2262 		goto done;
2263 
2264 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2265 		fp->hdr.type = BINDER_TYPE_HANDLE;
2266 	else
2267 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2268 	fp->binder = 0;
2269 	fp->handle = rdata.desc;
2270 	fp->cookie = 0;
2271 
2272 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2273 	binder_debug(BINDER_DEBUG_TRANSACTION,
2274 		     "        node %d u%016llx -> ref %d desc %d\n",
2275 		     node->debug_id, (u64)node->ptr,
2276 		     rdata.debug_id, rdata.desc);
2277 done:
2278 	binder_put_node(node);
2279 	return ret;
2280 }
2281 
2282 static int binder_translate_handle(struct flat_binder_object *fp,
2283 				   struct binder_transaction *t,
2284 				   struct binder_thread *thread)
2285 {
2286 	struct binder_proc *proc = thread->proc;
2287 	struct binder_proc *target_proc = t->to_proc;
2288 	struct binder_node *node;
2289 	struct binder_ref_data src_rdata;
2290 	int ret = 0;
2291 
2292 	node = binder_get_node_from_ref(proc, fp->handle,
2293 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2294 	if (!node) {
2295 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2296 				  proc->pid, thread->pid, fp->handle);
2297 		return -EINVAL;
2298 	}
2299 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2300 		ret = -EPERM;
2301 		goto done;
2302 	}
2303 
2304 	binder_node_lock(node);
2305 	if (node->proc == target_proc) {
2306 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2307 			fp->hdr.type = BINDER_TYPE_BINDER;
2308 		else
2309 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2310 		fp->binder = node->ptr;
2311 		fp->cookie = node->cookie;
2312 		if (node->proc)
2313 			binder_inner_proc_lock(node->proc);
2314 		else
2315 			__acquire(&node->proc->inner_lock);
2316 		binder_inc_node_nilocked(node,
2317 					 fp->hdr.type == BINDER_TYPE_BINDER,
2318 					 0, NULL);
2319 		if (node->proc)
2320 			binder_inner_proc_unlock(node->proc);
2321 		else
2322 			__release(&node->proc->inner_lock);
2323 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2324 		binder_debug(BINDER_DEBUG_TRANSACTION,
2325 			     "        ref %d desc %d -> node %d u%016llx\n",
2326 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2327 			     (u64)node->ptr);
2328 		binder_node_unlock(node);
2329 	} else {
2330 		struct binder_ref_data dest_rdata;
2331 
2332 		binder_node_unlock(node);
2333 		ret = binder_inc_ref_for_node(target_proc, node,
2334 				fp->hdr.type == BINDER_TYPE_HANDLE,
2335 				NULL, &dest_rdata);
2336 		if (ret)
2337 			goto done;
2338 
2339 		fp->binder = 0;
2340 		fp->handle = dest_rdata.desc;
2341 		fp->cookie = 0;
2342 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2343 						    &dest_rdata);
2344 		binder_debug(BINDER_DEBUG_TRANSACTION,
2345 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2346 			     src_rdata.debug_id, src_rdata.desc,
2347 			     dest_rdata.debug_id, dest_rdata.desc,
2348 			     node->debug_id);
2349 	}
2350 done:
2351 	binder_put_node(node);
2352 	return ret;
2353 }
2354 
2355 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2356 			       struct binder_transaction *t,
2357 			       struct binder_thread *thread,
2358 			       struct binder_transaction *in_reply_to)
2359 {
2360 	struct binder_proc *proc = thread->proc;
2361 	struct binder_proc *target_proc = t->to_proc;
2362 	struct binder_txn_fd_fixup *fixup;
2363 	struct file *file;
2364 	int ret = 0;
2365 	bool target_allows_fd;
2366 
2367 	if (in_reply_to)
2368 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2369 	else
2370 		target_allows_fd = t->buffer->target_node->accept_fds;
2371 	if (!target_allows_fd) {
2372 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2373 				  proc->pid, thread->pid,
2374 				  in_reply_to ? "reply" : "transaction",
2375 				  fd);
2376 		ret = -EPERM;
2377 		goto err_fd_not_accepted;
2378 	}
2379 
2380 	file = fget(fd);
2381 	if (!file) {
2382 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2383 				  proc->pid, thread->pid, fd);
2384 		ret = -EBADF;
2385 		goto err_fget;
2386 	}
2387 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2388 	if (ret < 0) {
2389 		ret = -EPERM;
2390 		goto err_security;
2391 	}
2392 
2393 	/*
2394 	 * Add fixup record for this transaction. The allocation
2395 	 * of the fd in the target needs to be done from a
2396 	 * target thread.
2397 	 */
2398 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2399 	if (!fixup) {
2400 		ret = -ENOMEM;
2401 		goto err_alloc;
2402 	}
2403 	fixup->file = file;
2404 	fixup->offset = fd_offset;
2405 	fixup->target_fd = -1;
2406 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2407 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2408 
2409 	return ret;
2410 
2411 err_alloc:
2412 err_security:
2413 	fput(file);
2414 err_fget:
2415 err_fd_not_accepted:
2416 	return ret;
2417 }
2418 
2419 /**
2420  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2421  * @offset	offset in target buffer to fixup
2422  * @skip_size	bytes to skip in copy (fixup will be written later)
2423  * @fixup_data	data to write at fixup offset
2424  * @node	list node
2425  *
2426  * This is used for the pointer fixup list (pf) which is created and consumed
2427  * during binder_transaction() and is only accessed locally. No
2428  * locking is necessary.
2429  *
2430  * The list is ordered by @offset.
2431  */
2432 struct binder_ptr_fixup {
2433 	binder_size_t offset;
2434 	size_t skip_size;
2435 	binder_uintptr_t fixup_data;
2436 	struct list_head node;
2437 };
2438 
2439 /**
2440  * struct binder_sg_copy - scatter-gather data to be copied
2441  * @offset		offset in target buffer
2442  * @sender_uaddr	user address in source buffer
2443  * @length		bytes to copy
2444  * @node		list node
2445  *
2446  * This is used for the sg copy list (sgc) which is created and consumed
2447  * during binder_transaction() and is only accessed locally. No
2448  * locking is necessary.
2449  *
2450  * The list is ordered by @offset.
2451  */
2452 struct binder_sg_copy {
2453 	binder_size_t offset;
2454 	const void __user *sender_uaddr;
2455 	size_t length;
2456 	struct list_head node;
2457 };
2458 
2459 /**
2460  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2461  * @alloc:	binder_alloc associated with @buffer
2462  * @buffer:	binder buffer in target process
2463  * @sgc_head:	list_head of scatter-gather copy list
2464  * @pf_head:	list_head of pointer fixup list
2465  *
2466  * Processes all elements of @sgc_head, applying fixups from @pf_head
2467  * and copying the scatter-gather data from the source process' user
2468  * buffer to the target's buffer. It is expected that the list creation
2469  * and processing all occurs during binder_transaction() so these lists
2470  * are only accessed in local context.
2471  *
2472  * Return: 0=success, else -errno
2473  */
2474 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2475 					 struct binder_buffer *buffer,
2476 					 struct list_head *sgc_head,
2477 					 struct list_head *pf_head)
2478 {
2479 	int ret = 0;
2480 	struct binder_sg_copy *sgc, *tmpsgc;
2481 	struct binder_ptr_fixup *tmppf;
2482 	struct binder_ptr_fixup *pf =
2483 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2484 					 node);
2485 
2486 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2487 		size_t bytes_copied = 0;
2488 
2489 		while (bytes_copied < sgc->length) {
2490 			size_t copy_size;
2491 			size_t bytes_left = sgc->length - bytes_copied;
2492 			size_t offset = sgc->offset + bytes_copied;
2493 
2494 			/*
2495 			 * We copy up to the fixup (pointed to by pf)
2496 			 */
2497 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2498 				       : bytes_left;
2499 			if (!ret && copy_size)
2500 				ret = binder_alloc_copy_user_to_buffer(
2501 						alloc, buffer,
2502 						offset,
2503 						sgc->sender_uaddr + bytes_copied,
2504 						copy_size);
2505 			bytes_copied += copy_size;
2506 			if (copy_size != bytes_left) {
2507 				BUG_ON(!pf);
2508 				/* we stopped at a fixup offset */
2509 				if (pf->skip_size) {
2510 					/*
2511 					 * we are just skipping. This is for
2512 					 * BINDER_TYPE_FDA where the translated
2513 					 * fds will be fixed up when we get
2514 					 * to target context.
2515 					 */
2516 					bytes_copied += pf->skip_size;
2517 				} else {
2518 					/* apply the fixup indicated by pf */
2519 					if (!ret)
2520 						ret = binder_alloc_copy_to_buffer(
2521 							alloc, buffer,
2522 							pf->offset,
2523 							&pf->fixup_data,
2524 							sizeof(pf->fixup_data));
2525 					bytes_copied += sizeof(pf->fixup_data);
2526 				}
2527 				list_del(&pf->node);
2528 				kfree(pf);
2529 				pf = list_first_entry_or_null(pf_head,
2530 						struct binder_ptr_fixup, node);
2531 			}
2532 		}
2533 		list_del(&sgc->node);
2534 		kfree(sgc);
2535 	}
2536 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2537 		BUG_ON(pf->skip_size == 0);
2538 		list_del(&pf->node);
2539 		kfree(pf);
2540 	}
2541 	BUG_ON(!list_empty(sgc_head));
2542 
2543 	return ret > 0 ? -EINVAL : ret;
2544 }
2545 
2546 /**
2547  * binder_cleanup_deferred_txn_lists() - free specified lists
2548  * @sgc_head:	list_head of scatter-gather copy list
2549  * @pf_head:	list_head of pointer fixup list
2550  *
2551  * Called to clean up @sgc_head and @pf_head if there is an
2552  * error.
2553  */
2554 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2555 					      struct list_head *pf_head)
2556 {
2557 	struct binder_sg_copy *sgc, *tmpsgc;
2558 	struct binder_ptr_fixup *pf, *tmppf;
2559 
2560 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2561 		list_del(&sgc->node);
2562 		kfree(sgc);
2563 	}
2564 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2565 		list_del(&pf->node);
2566 		kfree(pf);
2567 	}
2568 }
2569 
2570 /**
2571  * binder_defer_copy() - queue a scatter-gather buffer for copy
2572  * @sgc_head:		list_head of scatter-gather copy list
2573  * @offset:		binder buffer offset in target process
2574  * @sender_uaddr:	user address in source process
2575  * @length:		bytes to copy
2576  *
2577  * Specify a scatter-gather block to be copied. The actual copy must
2578  * be deferred until all the needed fixups are identified and queued.
2579  * Then the copy and fixups are done together so un-translated values
2580  * from the source are never visible in the target buffer.
2581  *
2582  * We are guaranteed that repeated calls to this function will have
2583  * monotonically increasing @offset values so the list will naturally
2584  * be ordered.
2585  *
2586  * Return: 0=success, else -errno
2587  */
2588 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2589 			     const void __user *sender_uaddr, size_t length)
2590 {
2591 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2592 
2593 	if (!bc)
2594 		return -ENOMEM;
2595 
2596 	bc->offset = offset;
2597 	bc->sender_uaddr = sender_uaddr;
2598 	bc->length = length;
2599 	INIT_LIST_HEAD(&bc->node);
2600 
2601 	/*
2602 	 * We are guaranteed that the deferred copies are in-order
2603 	 * so just add to the tail.
2604 	 */
2605 	list_add_tail(&bc->node, sgc_head);
2606 
2607 	return 0;
2608 }
2609 
2610 /**
2611  * binder_add_fixup() - queue a fixup to be applied to sg copy
2612  * @pf_head:	list_head of binder ptr fixup list
2613  * @offset:	binder buffer offset in target process
2614  * @fixup:	bytes to be copied for fixup
2615  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2616  *
2617  * Add the specified fixup to a list ordered by @offset. When copying
2618  * the scatter-gather buffers, the fixup will be copied instead of
2619  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2620  * will be applied later (in target process context), so we just skip
2621  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2622  * value in @fixup.
2623  *
2624  * This function is called *mostly* in @offset order, but there are
2625  * exceptions. Since out-of-order inserts are relatively uncommon,
2626  * we insert the new element by searching backward from the tail of
2627  * the list.
2628  *
2629  * Return: 0=success, else -errno
2630  */
2631 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2632 			    binder_uintptr_t fixup, size_t skip_size)
2633 {
2634 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2635 	struct binder_ptr_fixup *tmppf;
2636 
2637 	if (!pf)
2638 		return -ENOMEM;
2639 
2640 	pf->offset = offset;
2641 	pf->fixup_data = fixup;
2642 	pf->skip_size = skip_size;
2643 	INIT_LIST_HEAD(&pf->node);
2644 
2645 	/* Fixups are *mostly* added in-order, but there are some
2646 	 * exceptions. Look backwards through list for insertion point.
2647 	 */
2648 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2649 		if (tmppf->offset < pf->offset) {
2650 			list_add(&pf->node, &tmppf->node);
2651 			return 0;
2652 		}
2653 	}
2654 	/*
2655 	 * if we get here, then the new offset is the lowest so
2656 	 * insert at the head
2657 	 */
2658 	list_add(&pf->node, pf_head);
2659 	return 0;
2660 }
2661 
2662 static int binder_translate_fd_array(struct list_head *pf_head,
2663 				     struct binder_fd_array_object *fda,
2664 				     const void __user *sender_ubuffer,
2665 				     struct binder_buffer_object *parent,
2666 				     struct binder_buffer_object *sender_uparent,
2667 				     struct binder_transaction *t,
2668 				     struct binder_thread *thread,
2669 				     struct binder_transaction *in_reply_to)
2670 {
2671 	binder_size_t fdi, fd_buf_size;
2672 	binder_size_t fda_offset;
2673 	const void __user *sender_ufda_base;
2674 	struct binder_proc *proc = thread->proc;
2675 	int ret;
2676 
2677 	if (fda->num_fds == 0)
2678 		return 0;
2679 
2680 	fd_buf_size = sizeof(u32) * fda->num_fds;
2681 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2682 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2683 				  proc->pid, thread->pid, (u64)fda->num_fds);
2684 		return -EINVAL;
2685 	}
2686 	if (fd_buf_size > parent->length ||
2687 	    fda->parent_offset > parent->length - fd_buf_size) {
2688 		/* No space for all file descriptors here. */
2689 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2690 				  proc->pid, thread->pid, (u64)fda->num_fds);
2691 		return -EINVAL;
2692 	}
2693 	/*
2694 	 * the source data for binder_buffer_object is visible
2695 	 * to user-space and the @buffer element is the user
2696 	 * pointer to the buffer_object containing the fd_array.
2697 	 * Convert the address to an offset relative to
2698 	 * the base of the transaction buffer.
2699 	 */
2700 	fda_offset = parent->buffer - t->buffer->user_data +
2701 		fda->parent_offset;
2702 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2703 				fda->parent_offset;
2704 
2705 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2706 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2707 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2708 				  proc->pid, thread->pid);
2709 		return -EINVAL;
2710 	}
2711 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2712 	if (ret)
2713 		return ret;
2714 
2715 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2716 		u32 fd;
2717 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2718 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2719 
2720 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2721 		if (!ret)
2722 			ret = binder_translate_fd(fd, offset, t, thread,
2723 						  in_reply_to);
2724 		if (ret)
2725 			return ret > 0 ? -EINVAL : ret;
2726 	}
2727 	return 0;
2728 }
2729 
2730 static int binder_fixup_parent(struct list_head *pf_head,
2731 			       struct binder_transaction *t,
2732 			       struct binder_thread *thread,
2733 			       struct binder_buffer_object *bp,
2734 			       binder_size_t off_start_offset,
2735 			       binder_size_t num_valid,
2736 			       binder_size_t last_fixup_obj_off,
2737 			       binder_size_t last_fixup_min_off)
2738 {
2739 	struct binder_buffer_object *parent;
2740 	struct binder_buffer *b = t->buffer;
2741 	struct binder_proc *proc = thread->proc;
2742 	struct binder_proc *target_proc = t->to_proc;
2743 	struct binder_object object;
2744 	binder_size_t buffer_offset;
2745 	binder_size_t parent_offset;
2746 
2747 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2748 		return 0;
2749 
2750 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2751 				     off_start_offset, &parent_offset,
2752 				     num_valid);
2753 	if (!parent) {
2754 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2755 				  proc->pid, thread->pid);
2756 		return -EINVAL;
2757 	}
2758 
2759 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2760 				   parent_offset, bp->parent_offset,
2761 				   last_fixup_obj_off,
2762 				   last_fixup_min_off)) {
2763 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2764 				  proc->pid, thread->pid);
2765 		return -EINVAL;
2766 	}
2767 
2768 	if (parent->length < sizeof(binder_uintptr_t) ||
2769 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2770 		/* No space for a pointer here! */
2771 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2772 				  proc->pid, thread->pid);
2773 		return -EINVAL;
2774 	}
2775 
2776 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2777 
2778 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2779 }
2780 
2781 /**
2782  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2783  * @t1: the pending async txn in the frozen process
2784  * @t2: the new async txn to supersede the outdated pending one
2785  *
2786  * Return:  true if t2 can supersede t1
2787  *          false if t2 can not supersede t1
2788  */
2789 static bool binder_can_update_transaction(struct binder_transaction *t1,
2790 					  struct binder_transaction *t2)
2791 {
2792 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2793 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2794 		return false;
2795 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2796 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2797 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2798 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2799 		return true;
2800 	return false;
2801 }
2802 
2803 /**
2804  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2805  * @t:		 new async transaction
2806  * @target_list: list to find outdated transaction
2807  *
2808  * Return: the outdated transaction if found
2809  *         NULL if no outdated transacton can be found
2810  *
2811  * Requires the proc->inner_lock to be held.
2812  */
2813 static struct binder_transaction *
2814 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2815 					 struct list_head *target_list)
2816 {
2817 	struct binder_work *w;
2818 
2819 	list_for_each_entry(w, target_list, entry) {
2820 		struct binder_transaction *t_queued;
2821 
2822 		if (w->type != BINDER_WORK_TRANSACTION)
2823 			continue;
2824 		t_queued = container_of(w, struct binder_transaction, work);
2825 		if (binder_can_update_transaction(t_queued, t))
2826 			return t_queued;
2827 	}
2828 	return NULL;
2829 }
2830 
2831 /**
2832  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2833  * @t:		transaction to send
2834  * @proc:	process to send the transaction to
2835  * @thread:	thread in @proc to send the transaction to (may be NULL)
2836  *
2837  * This function queues a transaction to the specified process. It will try
2838  * to find a thread in the target process to handle the transaction and
2839  * wake it up. If no thread is found, the work is queued to the proc
2840  * waitqueue.
2841  *
2842  * If the @thread parameter is not NULL, the transaction is always queued
2843  * to the waitlist of that specific thread.
2844  *
2845  * Return:	0 if the transaction was successfully queued
2846  *		BR_DEAD_REPLY if the target process or thread is dead
2847  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2848  *			the sync transaction was rejected
2849  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2850  *		and the async transaction was successfully queued
2851  */
2852 static int binder_proc_transaction(struct binder_transaction *t,
2853 				    struct binder_proc *proc,
2854 				    struct binder_thread *thread)
2855 {
2856 	struct binder_node *node = t->buffer->target_node;
2857 	bool oneway = !!(t->flags & TF_ONE_WAY);
2858 	bool pending_async = false;
2859 	struct binder_transaction *t_outdated = NULL;
2860 	bool frozen = false;
2861 
2862 	BUG_ON(!node);
2863 	binder_node_lock(node);
2864 	if (oneway) {
2865 		BUG_ON(thread);
2866 		if (node->has_async_transaction)
2867 			pending_async = true;
2868 		else
2869 			node->has_async_transaction = true;
2870 	}
2871 
2872 	binder_inner_proc_lock(proc);
2873 	if (proc->is_frozen) {
2874 		frozen = true;
2875 		proc->sync_recv |= !oneway;
2876 		proc->async_recv |= oneway;
2877 	}
2878 
2879 	if ((frozen && !oneway) || proc->is_dead ||
2880 			(thread && thread->is_dead)) {
2881 		binder_inner_proc_unlock(proc);
2882 		binder_node_unlock(node);
2883 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2884 	}
2885 
2886 	if (!thread && !pending_async)
2887 		thread = binder_select_thread_ilocked(proc);
2888 
2889 	if (thread) {
2890 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2891 	} else if (!pending_async) {
2892 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2893 	} else {
2894 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2895 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2896 									      &node->async_todo);
2897 			if (t_outdated) {
2898 				binder_debug(BINDER_DEBUG_TRANSACTION,
2899 					     "txn %d supersedes %d\n",
2900 					     t->debug_id, t_outdated->debug_id);
2901 				list_del_init(&t_outdated->work.entry);
2902 				proc->outstanding_txns--;
2903 			}
2904 		}
2905 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2906 	}
2907 
2908 	if (!pending_async)
2909 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2910 
2911 	proc->outstanding_txns++;
2912 	binder_inner_proc_unlock(proc);
2913 	binder_node_unlock(node);
2914 
2915 	/*
2916 	 * To reduce potential contention, free the outdated transaction and
2917 	 * buffer after releasing the locks.
2918 	 */
2919 	if (t_outdated) {
2920 		struct binder_buffer *buffer = t_outdated->buffer;
2921 
2922 		t_outdated->buffer = NULL;
2923 		buffer->transaction = NULL;
2924 		trace_binder_transaction_update_buffer_release(buffer);
2925 		binder_release_entire_buffer(proc, NULL, buffer, false);
2926 		binder_alloc_free_buf(&proc->alloc, buffer);
2927 		kfree(t_outdated);
2928 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2929 	}
2930 
2931 	if (oneway && frozen)
2932 		return BR_TRANSACTION_PENDING_FROZEN;
2933 
2934 	return 0;
2935 }
2936 
2937 /**
2938  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2939  * @node:         struct binder_node for which to get refs
2940  * @procp:        returns @node->proc if valid
2941  * @error:        if no @procp then returns BR_DEAD_REPLY
2942  *
2943  * User-space normally keeps the node alive when creating a transaction
2944  * since it has a reference to the target. The local strong ref keeps it
2945  * alive if the sending process dies before the target process processes
2946  * the transaction. If the source process is malicious or has a reference
2947  * counting bug, relying on the local strong ref can fail.
2948  *
2949  * Since user-space can cause the local strong ref to go away, we also take
2950  * a tmpref on the node to ensure it survives while we are constructing
2951  * the transaction. We also need a tmpref on the proc while we are
2952  * constructing the transaction, so we take that here as well.
2953  *
2954  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2955  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2956  * target proc has died, @error is set to BR_DEAD_REPLY.
2957  */
2958 static struct binder_node *binder_get_node_refs_for_txn(
2959 		struct binder_node *node,
2960 		struct binder_proc **procp,
2961 		uint32_t *error)
2962 {
2963 	struct binder_node *target_node = NULL;
2964 
2965 	binder_node_inner_lock(node);
2966 	if (node->proc) {
2967 		target_node = node;
2968 		binder_inc_node_nilocked(node, 1, 0, NULL);
2969 		binder_inc_node_tmpref_ilocked(node);
2970 		node->proc->tmp_ref++;
2971 		*procp = node->proc;
2972 	} else
2973 		*error = BR_DEAD_REPLY;
2974 	binder_node_inner_unlock(node);
2975 
2976 	return target_node;
2977 }
2978 
2979 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2980 				      uint32_t command, int32_t param)
2981 {
2982 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2983 
2984 	if (!from) {
2985 		/* annotation for sparse */
2986 		__release(&from->proc->inner_lock);
2987 		return;
2988 	}
2989 
2990 	/* don't override existing errors */
2991 	if (from->ee.command == BR_OK)
2992 		binder_set_extended_error(&from->ee, id, command, param);
2993 	binder_inner_proc_unlock(from->proc);
2994 	binder_thread_dec_tmpref(from);
2995 }
2996 
2997 /**
2998  * binder_netlink_report() - report a transaction failure via netlink
2999  * @proc:	the binder proc sending the transaction
3000  * @t:		the binder transaction that failed
3001  * @data_size:	the user provided data size for the transaction
3002  * @error:	enum binder_driver_return_protocol returned to sender
3003  */
3004 static void binder_netlink_report(struct binder_proc *proc,
3005 				  struct binder_transaction *t,
3006 				  u32 data_size,
3007 				  u32 error)
3008 {
3009 	const char *context = proc->context->name;
3010 	struct sk_buff *skb;
3011 	void *hdr;
3012 
3013 	if (!genl_has_listeners(&binder_nl_family, &init_net,
3014 				BINDER_NLGRP_REPORT))
3015 		return;
3016 
3017 	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
3018 	if (!skb)
3019 		return;
3020 
3021 	hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
3022 	if (!hdr)
3023 		goto free_skb;
3024 
3025 	if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
3026 	    nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
3027 	    nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
3028 	    nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
3029 		goto cancel_skb;
3030 
3031 	if (t->to_proc &&
3032 	    nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
3033 		goto cancel_skb;
3034 
3035 	if (t->to_thread &&
3036 	    nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
3037 		goto cancel_skb;
3038 
3039 	if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
3040 		goto cancel_skb;
3041 
3042 	if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
3043 	    nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
3044 	    nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
3045 		goto cancel_skb;
3046 
3047 	genlmsg_end(skb, hdr);
3048 	genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
3049 			  GFP_KERNEL);
3050 	return;
3051 
3052 cancel_skb:
3053 	genlmsg_cancel(skb, hdr);
3054 free_skb:
3055 	nlmsg_free(skb);
3056 }
3057 
3058 static void binder_transaction(struct binder_proc *proc,
3059 			       struct binder_thread *thread,
3060 			       struct binder_transaction_data *tr, int reply,
3061 			       binder_size_t extra_buffers_size)
3062 {
3063 	int ret;
3064 	struct binder_transaction *t;
3065 	struct binder_work *w;
3066 	struct binder_work *tcomplete;
3067 	binder_size_t buffer_offset = 0;
3068 	binder_size_t off_start_offset, off_end_offset;
3069 	binder_size_t off_min;
3070 	binder_size_t sg_buf_offset, sg_buf_end_offset;
3071 	binder_size_t user_offset = 0;
3072 	struct binder_proc *target_proc = NULL;
3073 	struct binder_thread *target_thread = NULL;
3074 	struct binder_node *target_node = NULL;
3075 	struct binder_transaction *in_reply_to = NULL;
3076 	struct binder_transaction_log_entry *e;
3077 	uint32_t return_error = 0;
3078 	uint32_t return_error_param = 0;
3079 	uint32_t return_error_line = 0;
3080 	binder_size_t last_fixup_obj_off = 0;
3081 	binder_size_t last_fixup_min_off = 0;
3082 	struct binder_context *context = proc->context;
3083 	int t_debug_id = atomic_inc_return(&binder_last_id);
3084 	ktime_t t_start_time = ktime_get();
3085 	struct lsm_context lsmctx = { };
3086 	struct list_head sgc_head;
3087 	struct list_head pf_head;
3088 	const void __user *user_buffer = (const void __user *)
3089 				(uintptr_t)tr->data.ptr.buffer;
3090 	INIT_LIST_HEAD(&sgc_head);
3091 	INIT_LIST_HEAD(&pf_head);
3092 
3093 	e = binder_transaction_log_add(&binder_transaction_log);
3094 	e->debug_id = t_debug_id;
3095 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3096 	e->from_proc = proc->pid;
3097 	e->from_thread = thread->pid;
3098 	e->target_handle = tr->target.handle;
3099 	e->data_size = tr->data_size;
3100 	e->offsets_size = tr->offsets_size;
3101 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3102 
3103 	binder_inner_proc_lock(proc);
3104 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3105 	binder_inner_proc_unlock(proc);
3106 
3107 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3108 	if (!t) {
3109 		binder_txn_error("%d:%d cannot allocate transaction\n",
3110 				 thread->pid, proc->pid);
3111 		return_error = BR_FAILED_REPLY;
3112 		return_error_param = -ENOMEM;
3113 		return_error_line = __LINE__;
3114 		goto err_alloc_t_failed;
3115 	}
3116 	INIT_LIST_HEAD(&t->fd_fixups);
3117 	binder_stats_created(BINDER_STAT_TRANSACTION);
3118 	spin_lock_init(&t->lock);
3119 	t->debug_id = t_debug_id;
3120 	t->start_time = t_start_time;
3121 	t->from_pid = proc->pid;
3122 	t->from_tid = thread->pid;
3123 	t->sender_euid = task_euid(proc->tsk);
3124 	t->code = tr->code;
3125 	t->flags = tr->flags;
3126 	t->priority = task_nice(current);
3127 	t->work.type = BINDER_WORK_TRANSACTION;
3128 	t->is_async = !reply && (tr->flags & TF_ONE_WAY);
3129 	t->is_reply = reply;
3130 	if (!reply && !(tr->flags & TF_ONE_WAY))
3131 		t->from = thread;
3132 
3133 	if (reply) {
3134 		binder_inner_proc_lock(proc);
3135 		in_reply_to = thread->transaction_stack;
3136 		if (in_reply_to == NULL) {
3137 			binder_inner_proc_unlock(proc);
3138 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3139 					  proc->pid, thread->pid);
3140 			return_error = BR_FAILED_REPLY;
3141 			return_error_param = -EPROTO;
3142 			return_error_line = __LINE__;
3143 			goto err_empty_call_stack;
3144 		}
3145 		if (in_reply_to->to_thread != thread) {
3146 			spin_lock(&in_reply_to->lock);
3147 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3148 				proc->pid, thread->pid, in_reply_to->debug_id,
3149 				in_reply_to->to_proc ?
3150 				in_reply_to->to_proc->pid : 0,
3151 				in_reply_to->to_thread ?
3152 				in_reply_to->to_thread->pid : 0);
3153 			spin_unlock(&in_reply_to->lock);
3154 			binder_inner_proc_unlock(proc);
3155 			return_error = BR_FAILED_REPLY;
3156 			return_error_param = -EPROTO;
3157 			return_error_line = __LINE__;
3158 			in_reply_to = NULL;
3159 			goto err_bad_call_stack;
3160 		}
3161 		thread->transaction_stack = in_reply_to->to_parent;
3162 		binder_inner_proc_unlock(proc);
3163 		binder_set_nice(in_reply_to->saved_priority);
3164 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3165 		if (target_thread == NULL) {
3166 			/* annotation for sparse */
3167 			__release(&target_thread->proc->inner_lock);
3168 			binder_txn_error("%d:%d reply target not found\n",
3169 				thread->pid, proc->pid);
3170 			return_error = BR_DEAD_REPLY;
3171 			return_error_line = __LINE__;
3172 			goto err_dead_binder;
3173 		}
3174 		if (target_thread->transaction_stack != in_reply_to) {
3175 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3176 				proc->pid, thread->pid,
3177 				target_thread->transaction_stack ?
3178 				target_thread->transaction_stack->debug_id : 0,
3179 				in_reply_to->debug_id);
3180 			binder_inner_proc_unlock(target_thread->proc);
3181 			return_error = BR_FAILED_REPLY;
3182 			return_error_param = -EPROTO;
3183 			return_error_line = __LINE__;
3184 			in_reply_to = NULL;
3185 			target_thread = NULL;
3186 			goto err_dead_binder;
3187 		}
3188 		target_proc = target_thread->proc;
3189 		target_proc->tmp_ref++;
3190 		binder_inner_proc_unlock(target_thread->proc);
3191 	} else {
3192 		if (tr->target.handle) {
3193 			struct binder_ref *ref;
3194 
3195 			/*
3196 			 * There must already be a strong ref
3197 			 * on this node. If so, do a strong
3198 			 * increment on the node to ensure it
3199 			 * stays alive until the transaction is
3200 			 * done.
3201 			 */
3202 			binder_proc_lock(proc);
3203 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3204 						     true);
3205 			if (ref) {
3206 				target_node = binder_get_node_refs_for_txn(
3207 						ref->node, &target_proc,
3208 						&return_error);
3209 			} else {
3210 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3211 						  proc->pid, thread->pid, tr->target.handle);
3212 				return_error = BR_FAILED_REPLY;
3213 			}
3214 			binder_proc_unlock(proc);
3215 		} else {
3216 			mutex_lock(&context->context_mgr_node_lock);
3217 			target_node = context->binder_context_mgr_node;
3218 			if (target_node)
3219 				target_node = binder_get_node_refs_for_txn(
3220 						target_node, &target_proc,
3221 						&return_error);
3222 			else
3223 				return_error = BR_DEAD_REPLY;
3224 			mutex_unlock(&context->context_mgr_node_lock);
3225 			if (target_node && target_proc->pid == proc->pid) {
3226 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3227 						  proc->pid, thread->pid);
3228 				return_error = BR_FAILED_REPLY;
3229 				return_error_param = -EINVAL;
3230 				return_error_line = __LINE__;
3231 				goto err_invalid_target_handle;
3232 			}
3233 		}
3234 		if (!target_node) {
3235 			binder_txn_error("%d:%d cannot find target node\n",
3236 					 proc->pid, thread->pid);
3237 			/* return_error is set above */
3238 			return_error_param = -EINVAL;
3239 			return_error_line = __LINE__;
3240 			goto err_dead_binder;
3241 		}
3242 		e->to_node = target_node->debug_id;
3243 		if (WARN_ON(proc == target_proc)) {
3244 			binder_txn_error("%d:%d self transactions not allowed\n",
3245 				thread->pid, proc->pid);
3246 			return_error = BR_FAILED_REPLY;
3247 			return_error_param = -EINVAL;
3248 			return_error_line = __LINE__;
3249 			goto err_invalid_target_handle;
3250 		}
3251 		if (security_binder_transaction(proc->cred,
3252 						target_proc->cred) < 0) {
3253 			binder_txn_error("%d:%d transaction credentials failed\n",
3254 				thread->pid, proc->pid);
3255 			return_error = BR_FAILED_REPLY;
3256 			return_error_param = -EPERM;
3257 			return_error_line = __LINE__;
3258 			goto err_invalid_target_handle;
3259 		}
3260 		binder_inner_proc_lock(proc);
3261 
3262 		w = list_first_entry_or_null(&thread->todo,
3263 					     struct binder_work, entry);
3264 		if (!(tr->flags & TF_ONE_WAY) && w &&
3265 		    w->type == BINDER_WORK_TRANSACTION) {
3266 			/*
3267 			 * Do not allow new outgoing transaction from a
3268 			 * thread that has a transaction at the head of
3269 			 * its todo list. Only need to check the head
3270 			 * because binder_select_thread_ilocked picks a
3271 			 * thread from proc->waiting_threads to enqueue
3272 			 * the transaction, and nothing is queued to the
3273 			 * todo list while the thread is on waiting_threads.
3274 			 */
3275 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3276 					  proc->pid, thread->pid);
3277 			binder_inner_proc_unlock(proc);
3278 			return_error = BR_FAILED_REPLY;
3279 			return_error_param = -EPROTO;
3280 			return_error_line = __LINE__;
3281 			goto err_bad_todo_list;
3282 		}
3283 
3284 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3285 			struct binder_transaction *tmp;
3286 
3287 			tmp = thread->transaction_stack;
3288 			if (tmp->to_thread != thread) {
3289 				spin_lock(&tmp->lock);
3290 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3291 					proc->pid, thread->pid, tmp->debug_id,
3292 					tmp->to_proc ? tmp->to_proc->pid : 0,
3293 					tmp->to_thread ?
3294 					tmp->to_thread->pid : 0);
3295 				spin_unlock(&tmp->lock);
3296 				binder_inner_proc_unlock(proc);
3297 				return_error = BR_FAILED_REPLY;
3298 				return_error_param = -EPROTO;
3299 				return_error_line = __LINE__;
3300 				goto err_bad_call_stack;
3301 			}
3302 			while (tmp) {
3303 				struct binder_thread *from;
3304 
3305 				spin_lock(&tmp->lock);
3306 				from = tmp->from;
3307 				if (from && from->proc == target_proc) {
3308 					atomic_inc(&from->tmp_ref);
3309 					target_thread = from;
3310 					spin_unlock(&tmp->lock);
3311 					break;
3312 				}
3313 				spin_unlock(&tmp->lock);
3314 				tmp = tmp->from_parent;
3315 			}
3316 		}
3317 		binder_inner_proc_unlock(proc);
3318 	}
3319 
3320 	t->to_proc = target_proc;
3321 	t->to_thread = target_thread;
3322 	if (target_thread)
3323 		e->to_thread = target_thread->pid;
3324 	e->to_proc = target_proc->pid;
3325 
3326 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3327 	if (tcomplete == NULL) {
3328 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3329 			thread->pid, proc->pid);
3330 		return_error = BR_FAILED_REPLY;
3331 		return_error_param = -ENOMEM;
3332 		return_error_line = __LINE__;
3333 		goto err_alloc_tcomplete_failed;
3334 	}
3335 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3336 
3337 	if (reply)
3338 		binder_debug(BINDER_DEBUG_TRANSACTION,
3339 			     "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3340 			     proc->pid, thread->pid, t->debug_id,
3341 			     target_proc->pid, target_thread->pid,
3342 			     (u64)tr->data_size, (u64)tr->offsets_size,
3343 			     (u64)extra_buffers_size);
3344 	else
3345 		binder_debug(BINDER_DEBUG_TRANSACTION,
3346 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3347 			     proc->pid, thread->pid, t->debug_id,
3348 			     target_proc->pid, target_node->debug_id,
3349 			     (u64)tr->data_size, (u64)tr->offsets_size,
3350 			     (u64)extra_buffers_size);
3351 
3352 	if (target_node && target_node->txn_security_ctx) {
3353 		u32 secid;
3354 		size_t added_size;
3355 
3356 		security_cred_getsecid(proc->cred, &secid);
3357 		ret = security_secid_to_secctx(secid, &lsmctx);
3358 		if (ret < 0) {
3359 			binder_txn_error("%d:%d failed to get security context\n",
3360 				thread->pid, proc->pid);
3361 			return_error = BR_FAILED_REPLY;
3362 			return_error_param = ret;
3363 			return_error_line = __LINE__;
3364 			goto err_get_secctx_failed;
3365 		}
3366 		added_size = ALIGN(lsmctx.len, sizeof(u64));
3367 		extra_buffers_size += added_size;
3368 		if (extra_buffers_size < added_size) {
3369 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3370 				thread->pid, proc->pid);
3371 			return_error = BR_FAILED_REPLY;
3372 			return_error_param = -EINVAL;
3373 			return_error_line = __LINE__;
3374 			goto err_bad_extra_size;
3375 		}
3376 	}
3377 
3378 	trace_binder_transaction(reply, t, target_node);
3379 
3380 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3381 		tr->offsets_size, extra_buffers_size,
3382 		!reply && (t->flags & TF_ONE_WAY));
3383 	if (IS_ERR(t->buffer)) {
3384 		char *s;
3385 
3386 		ret = PTR_ERR(t->buffer);
3387 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3388 			: (ret == -ENOSPC) ? ": no space left"
3389 			: (ret == -ENOMEM) ? ": memory allocation failed"
3390 			: "";
3391 		binder_txn_error("cannot allocate buffer%s", s);
3392 
3393 		return_error_param = PTR_ERR(t->buffer);
3394 		return_error = return_error_param == -ESRCH ?
3395 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3396 		return_error_line = __LINE__;
3397 		t->buffer = NULL;
3398 		goto err_binder_alloc_buf_failed;
3399 	}
3400 	if (lsmctx.context) {
3401 		int err;
3402 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3403 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3404 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3405 				    ALIGN(lsmctx.len, sizeof(u64));
3406 
3407 		t->security_ctx = t->buffer->user_data + buf_offset;
3408 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3409 						  t->buffer, buf_offset,
3410 						  lsmctx.context, lsmctx.len);
3411 		if (err) {
3412 			t->security_ctx = 0;
3413 			WARN_ON(1);
3414 		}
3415 		security_release_secctx(&lsmctx);
3416 		lsmctx.context = NULL;
3417 	}
3418 	t->buffer->debug_id = t->debug_id;
3419 	t->buffer->transaction = t;
3420 	t->buffer->target_node = target_node;
3421 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3422 	trace_binder_transaction_alloc_buf(t->buffer);
3423 
3424 	if (binder_alloc_copy_user_to_buffer(
3425 				&target_proc->alloc,
3426 				t->buffer,
3427 				ALIGN(tr->data_size, sizeof(void *)),
3428 				(const void __user *)
3429 					(uintptr_t)tr->data.ptr.offsets,
3430 				tr->offsets_size)) {
3431 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3432 				proc->pid, thread->pid);
3433 		return_error = BR_FAILED_REPLY;
3434 		return_error_param = -EFAULT;
3435 		return_error_line = __LINE__;
3436 		goto err_copy_data_failed;
3437 	}
3438 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3439 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3440 				proc->pid, thread->pid, (u64)tr->offsets_size);
3441 		return_error = BR_FAILED_REPLY;
3442 		return_error_param = -EINVAL;
3443 		return_error_line = __LINE__;
3444 		goto err_bad_offset;
3445 	}
3446 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3447 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3448 				  proc->pid, thread->pid,
3449 				  (u64)extra_buffers_size);
3450 		return_error = BR_FAILED_REPLY;
3451 		return_error_param = -EINVAL;
3452 		return_error_line = __LINE__;
3453 		goto err_bad_offset;
3454 	}
3455 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3456 	buffer_offset = off_start_offset;
3457 	off_end_offset = off_start_offset + tr->offsets_size;
3458 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3459 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3460 		ALIGN(lsmctx.len, sizeof(u64));
3461 	off_min = 0;
3462 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3463 	     buffer_offset += sizeof(binder_size_t)) {
3464 		struct binder_object_header *hdr;
3465 		size_t object_size;
3466 		struct binder_object object;
3467 		binder_size_t object_offset;
3468 		binder_size_t copy_size;
3469 
3470 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3471 						  &object_offset,
3472 						  t->buffer,
3473 						  buffer_offset,
3474 						  sizeof(object_offset))) {
3475 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3476 				thread->pid, proc->pid);
3477 			return_error = BR_FAILED_REPLY;
3478 			return_error_param = -EINVAL;
3479 			return_error_line = __LINE__;
3480 			goto err_bad_offset;
3481 		}
3482 
3483 		/*
3484 		 * Copy the source user buffer up to the next object
3485 		 * that will be processed.
3486 		 */
3487 		copy_size = object_offset - user_offset;
3488 		if (copy_size && (user_offset > object_offset ||
3489 				object_offset > tr->data_size ||
3490 				binder_alloc_copy_user_to_buffer(
3491 					&target_proc->alloc,
3492 					t->buffer, user_offset,
3493 					user_buffer + user_offset,
3494 					copy_size))) {
3495 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3496 					proc->pid, thread->pid);
3497 			return_error = BR_FAILED_REPLY;
3498 			return_error_param = -EFAULT;
3499 			return_error_line = __LINE__;
3500 			goto err_copy_data_failed;
3501 		}
3502 		object_size = binder_get_object(target_proc, user_buffer,
3503 				t->buffer, object_offset, &object);
3504 		if (object_size == 0 || object_offset < off_min) {
3505 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3506 					  proc->pid, thread->pid,
3507 					  (u64)object_offset,
3508 					  (u64)off_min,
3509 					  (u64)t->buffer->data_size);
3510 			return_error = BR_FAILED_REPLY;
3511 			return_error_param = -EINVAL;
3512 			return_error_line = __LINE__;
3513 			goto err_bad_offset;
3514 		}
3515 		/*
3516 		 * Set offset to the next buffer fragment to be
3517 		 * copied
3518 		 */
3519 		user_offset = object_offset + object_size;
3520 
3521 		hdr = &object.hdr;
3522 		off_min = object_offset + object_size;
3523 		switch (hdr->type) {
3524 		case BINDER_TYPE_BINDER:
3525 		case BINDER_TYPE_WEAK_BINDER: {
3526 			struct flat_binder_object *fp;
3527 
3528 			fp = to_flat_binder_object(hdr);
3529 			ret = binder_translate_binder(fp, t, thread);
3530 
3531 			if (ret < 0 ||
3532 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3533 							t->buffer,
3534 							object_offset,
3535 							fp, sizeof(*fp))) {
3536 				binder_txn_error("%d:%d translate binder failed\n",
3537 					thread->pid, proc->pid);
3538 				return_error = BR_FAILED_REPLY;
3539 				return_error_param = ret;
3540 				return_error_line = __LINE__;
3541 				goto err_translate_failed;
3542 			}
3543 		} break;
3544 		case BINDER_TYPE_HANDLE:
3545 		case BINDER_TYPE_WEAK_HANDLE: {
3546 			struct flat_binder_object *fp;
3547 
3548 			fp = to_flat_binder_object(hdr);
3549 			ret = binder_translate_handle(fp, t, thread);
3550 			if (ret < 0 ||
3551 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3552 							t->buffer,
3553 							object_offset,
3554 							fp, sizeof(*fp))) {
3555 				binder_txn_error("%d:%d translate handle failed\n",
3556 					thread->pid, proc->pid);
3557 				return_error = BR_FAILED_REPLY;
3558 				return_error_param = ret;
3559 				return_error_line = __LINE__;
3560 				goto err_translate_failed;
3561 			}
3562 		} break;
3563 
3564 		case BINDER_TYPE_FD: {
3565 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3566 			binder_size_t fd_offset = object_offset +
3567 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3568 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3569 						      thread, in_reply_to);
3570 
3571 			fp->pad_binder = 0;
3572 			if (ret < 0 ||
3573 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3574 							t->buffer,
3575 							object_offset,
3576 							fp, sizeof(*fp))) {
3577 				binder_txn_error("%d:%d translate fd failed\n",
3578 					thread->pid, proc->pid);
3579 				return_error = BR_FAILED_REPLY;
3580 				return_error_param = ret;
3581 				return_error_line = __LINE__;
3582 				goto err_translate_failed;
3583 			}
3584 		} break;
3585 		case BINDER_TYPE_FDA: {
3586 			struct binder_object ptr_object;
3587 			binder_size_t parent_offset;
3588 			struct binder_object user_object;
3589 			size_t user_parent_size;
3590 			struct binder_fd_array_object *fda =
3591 				to_binder_fd_array_object(hdr);
3592 			size_t num_valid = (buffer_offset - off_start_offset) /
3593 						sizeof(binder_size_t);
3594 			struct binder_buffer_object *parent =
3595 				binder_validate_ptr(target_proc, t->buffer,
3596 						    &ptr_object, fda->parent,
3597 						    off_start_offset,
3598 						    &parent_offset,
3599 						    num_valid);
3600 			if (!parent) {
3601 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3602 						  proc->pid, thread->pid);
3603 				return_error = BR_FAILED_REPLY;
3604 				return_error_param = -EINVAL;
3605 				return_error_line = __LINE__;
3606 				goto err_bad_parent;
3607 			}
3608 			if (!binder_validate_fixup(target_proc, t->buffer,
3609 						   off_start_offset,
3610 						   parent_offset,
3611 						   fda->parent_offset,
3612 						   last_fixup_obj_off,
3613 						   last_fixup_min_off)) {
3614 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3615 						  proc->pid, thread->pid);
3616 				return_error = BR_FAILED_REPLY;
3617 				return_error_param = -EINVAL;
3618 				return_error_line = __LINE__;
3619 				goto err_bad_parent;
3620 			}
3621 			/*
3622 			 * We need to read the user version of the parent
3623 			 * object to get the original user offset
3624 			 */
3625 			user_parent_size =
3626 				binder_get_object(proc, user_buffer, t->buffer,
3627 						  parent_offset, &user_object);
3628 			if (user_parent_size != sizeof(user_object.bbo)) {
3629 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3630 						  proc->pid, thread->pid,
3631 						  user_parent_size,
3632 						  sizeof(user_object.bbo));
3633 				return_error = BR_FAILED_REPLY;
3634 				return_error_param = -EINVAL;
3635 				return_error_line = __LINE__;
3636 				goto err_bad_parent;
3637 			}
3638 			ret = binder_translate_fd_array(&pf_head, fda,
3639 							user_buffer, parent,
3640 							&user_object.bbo, t,
3641 							thread, in_reply_to);
3642 			if (!ret)
3643 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3644 								  t->buffer,
3645 								  object_offset,
3646 								  fda, sizeof(*fda));
3647 			if (ret) {
3648 				binder_txn_error("%d:%d translate fd array failed\n",
3649 					thread->pid, proc->pid);
3650 				return_error = BR_FAILED_REPLY;
3651 				return_error_param = ret > 0 ? -EINVAL : ret;
3652 				return_error_line = __LINE__;
3653 				goto err_translate_failed;
3654 			}
3655 			last_fixup_obj_off = parent_offset;
3656 			last_fixup_min_off =
3657 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3658 		} break;
3659 		case BINDER_TYPE_PTR: {
3660 			struct binder_buffer_object *bp =
3661 				to_binder_buffer_object(hdr);
3662 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3663 			size_t num_valid;
3664 
3665 			if (bp->length > buf_left) {
3666 				binder_user_error("%d:%d got transaction with too large buffer\n",
3667 						  proc->pid, thread->pid);
3668 				return_error = BR_FAILED_REPLY;
3669 				return_error_param = -EINVAL;
3670 				return_error_line = __LINE__;
3671 				goto err_bad_offset;
3672 			}
3673 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3674 				(const void __user *)(uintptr_t)bp->buffer,
3675 				bp->length);
3676 			if (ret) {
3677 				binder_txn_error("%d:%d deferred copy failed\n",
3678 					thread->pid, proc->pid);
3679 				return_error = BR_FAILED_REPLY;
3680 				return_error_param = ret;
3681 				return_error_line = __LINE__;
3682 				goto err_translate_failed;
3683 			}
3684 			/* Fixup buffer pointer to target proc address space */
3685 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3686 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3687 
3688 			num_valid = (buffer_offset - off_start_offset) /
3689 					sizeof(binder_size_t);
3690 			ret = binder_fixup_parent(&pf_head, t,
3691 						  thread, bp,
3692 						  off_start_offset,
3693 						  num_valid,
3694 						  last_fixup_obj_off,
3695 						  last_fixup_min_off);
3696 			if (ret < 0 ||
3697 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3698 							t->buffer,
3699 							object_offset,
3700 							bp, sizeof(*bp))) {
3701 				binder_txn_error("%d:%d failed to fixup parent\n",
3702 					thread->pid, proc->pid);
3703 				return_error = BR_FAILED_REPLY;
3704 				return_error_param = ret;
3705 				return_error_line = __LINE__;
3706 				goto err_translate_failed;
3707 			}
3708 			last_fixup_obj_off = object_offset;
3709 			last_fixup_min_off = 0;
3710 		} break;
3711 		default:
3712 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3713 				proc->pid, thread->pid, hdr->type);
3714 			return_error = BR_FAILED_REPLY;
3715 			return_error_param = -EINVAL;
3716 			return_error_line = __LINE__;
3717 			goto err_bad_object_type;
3718 		}
3719 	}
3720 	/* Done processing objects, copy the rest of the buffer */
3721 	if (binder_alloc_copy_user_to_buffer(
3722 				&target_proc->alloc,
3723 				t->buffer, user_offset,
3724 				user_buffer + user_offset,
3725 				tr->data_size - user_offset)) {
3726 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3727 				proc->pid, thread->pid);
3728 		return_error = BR_FAILED_REPLY;
3729 		return_error_param = -EFAULT;
3730 		return_error_line = __LINE__;
3731 		goto err_copy_data_failed;
3732 	}
3733 
3734 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3735 					    &sgc_head, &pf_head);
3736 	if (ret) {
3737 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3738 				  proc->pid, thread->pid);
3739 		return_error = BR_FAILED_REPLY;
3740 		return_error_param = ret;
3741 		return_error_line = __LINE__;
3742 		goto err_copy_data_failed;
3743 	}
3744 	if (t->buffer->oneway_spam_suspect) {
3745 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3746 		binder_netlink_report(proc, t, tr->data_size,
3747 				      BR_ONEWAY_SPAM_SUSPECT);
3748 	} else {
3749 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3750 	}
3751 
3752 	if (reply) {
3753 		binder_enqueue_thread_work(thread, tcomplete);
3754 		binder_inner_proc_lock(target_proc);
3755 		if (target_thread->is_dead) {
3756 			return_error = BR_DEAD_REPLY;
3757 			binder_inner_proc_unlock(target_proc);
3758 			goto err_dead_proc_or_thread;
3759 		}
3760 		BUG_ON(t->buffer->async_transaction != 0);
3761 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3762 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3763 		target_proc->outstanding_txns++;
3764 		binder_inner_proc_unlock(target_proc);
3765 		wake_up_interruptible_sync(&target_thread->wait);
3766 		binder_free_transaction(in_reply_to);
3767 	} else if (!(t->flags & TF_ONE_WAY)) {
3768 		BUG_ON(t->buffer->async_transaction != 0);
3769 		binder_inner_proc_lock(proc);
3770 		/*
3771 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3772 		 * userspace immediately; this allows the target process to
3773 		 * immediately start processing this transaction, reducing
3774 		 * latency. We will then return the TRANSACTION_COMPLETE when
3775 		 * the target replies (or there is an error).
3776 		 */
3777 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3778 		t->from_parent = thread->transaction_stack;
3779 		thread->transaction_stack = t;
3780 		binder_inner_proc_unlock(proc);
3781 		return_error = binder_proc_transaction(t,
3782 				target_proc, target_thread);
3783 		if (return_error) {
3784 			binder_inner_proc_lock(proc);
3785 			binder_pop_transaction_ilocked(thread, t);
3786 			binder_inner_proc_unlock(proc);
3787 			goto err_dead_proc_or_thread;
3788 		}
3789 	} else {
3790 		BUG_ON(target_node == NULL);
3791 		BUG_ON(t->buffer->async_transaction != 1);
3792 		return_error = binder_proc_transaction(t, target_proc, NULL);
3793 		/*
3794 		 * Let the caller know when async transaction reaches a frozen
3795 		 * process and is put in a pending queue, waiting for the target
3796 		 * process to be unfrozen.
3797 		 */
3798 		if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
3799 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3800 			binder_netlink_report(proc, t, tr->data_size,
3801 					      return_error);
3802 		}
3803 		binder_enqueue_thread_work(thread, tcomplete);
3804 		if (return_error &&
3805 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3806 			goto err_dead_proc_or_thread;
3807 	}
3808 	if (target_thread)
3809 		binder_thread_dec_tmpref(target_thread);
3810 	binder_proc_dec_tmpref(target_proc);
3811 	if (target_node)
3812 		binder_dec_node_tmpref(target_node);
3813 	/*
3814 	 * write barrier to synchronize with initialization
3815 	 * of log entry
3816 	 */
3817 	smp_wmb();
3818 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3819 	return;
3820 
3821 err_dead_proc_or_thread:
3822 	binder_txn_error("%d:%d dead process or thread\n",
3823 		thread->pid, proc->pid);
3824 	return_error_line = __LINE__;
3825 	binder_dequeue_work(proc, tcomplete);
3826 err_translate_failed:
3827 err_bad_object_type:
3828 err_bad_offset:
3829 err_bad_parent:
3830 err_copy_data_failed:
3831 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3832 	binder_free_txn_fixups(t);
3833 	trace_binder_transaction_failed_buffer_release(t->buffer);
3834 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3835 					  buffer_offset, true);
3836 	if (target_node)
3837 		binder_dec_node_tmpref(target_node);
3838 	target_node = NULL;
3839 	t->buffer->transaction = NULL;
3840 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3841 err_binder_alloc_buf_failed:
3842 err_bad_extra_size:
3843 	if (lsmctx.context)
3844 		security_release_secctx(&lsmctx);
3845 err_get_secctx_failed:
3846 	kfree(tcomplete);
3847 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3848 err_alloc_tcomplete_failed:
3849 	if (trace_binder_txn_latency_free_enabled())
3850 		binder_txn_latency_free(t);
3851 err_bad_todo_list:
3852 err_bad_call_stack:
3853 err_empty_call_stack:
3854 err_dead_binder:
3855 err_invalid_target_handle:
3856 	if (target_node) {
3857 		binder_dec_node(target_node, 1, 0);
3858 		binder_dec_node_tmpref(target_node);
3859 	}
3860 
3861 	binder_netlink_report(proc, t, tr->data_size, return_error);
3862 	kfree(t);
3863 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3864 err_alloc_t_failed:
3865 
3866 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3867 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3868 		     proc->pid, thread->pid, reply ? "reply" :
3869 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3870 		     target_proc ? target_proc->pid : 0,
3871 		     target_thread ? target_thread->pid : 0,
3872 		     t_debug_id, return_error, return_error_param,
3873 		     tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3874 		     return_error_line);
3875 
3876 	if (target_thread)
3877 		binder_thread_dec_tmpref(target_thread);
3878 	if (target_proc)
3879 		binder_proc_dec_tmpref(target_proc);
3880 
3881 	{
3882 		struct binder_transaction_log_entry *fe;
3883 
3884 		e->return_error = return_error;
3885 		e->return_error_param = return_error_param;
3886 		e->return_error_line = return_error_line;
3887 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3888 		*fe = *e;
3889 		/*
3890 		 * write barrier to synchronize with initialization
3891 		 * of log entry
3892 		 */
3893 		smp_wmb();
3894 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3895 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3896 	}
3897 
3898 	BUG_ON(thread->return_error.cmd != BR_OK);
3899 	if (in_reply_to) {
3900 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3901 				return_error, return_error_param);
3902 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3903 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3904 		binder_send_failed_reply(in_reply_to, return_error);
3905 	} else {
3906 		binder_inner_proc_lock(proc);
3907 		binder_set_extended_error(&thread->ee, t_debug_id,
3908 				return_error, return_error_param);
3909 		binder_inner_proc_unlock(proc);
3910 		thread->return_error.cmd = return_error;
3911 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3912 	}
3913 }
3914 
3915 static int
3916 binder_request_freeze_notification(struct binder_proc *proc,
3917 				   struct binder_thread *thread,
3918 				   struct binder_handle_cookie *handle_cookie)
3919 {
3920 	struct binder_ref_freeze *freeze;
3921 	struct binder_ref *ref;
3922 
3923 	freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3924 	if (!freeze)
3925 		return -ENOMEM;
3926 	binder_proc_lock(proc);
3927 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3928 	if (!ref) {
3929 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3930 				  proc->pid, thread->pid, handle_cookie->handle);
3931 		binder_proc_unlock(proc);
3932 		kfree(freeze);
3933 		return -EINVAL;
3934 	}
3935 
3936 	binder_node_lock(ref->node);
3937 	if (ref->freeze) {
3938 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3939 				  proc->pid, thread->pid);
3940 		binder_node_unlock(ref->node);
3941 		binder_proc_unlock(proc);
3942 		kfree(freeze);
3943 		return -EINVAL;
3944 	}
3945 
3946 	binder_stats_created(BINDER_STAT_FREEZE);
3947 	INIT_LIST_HEAD(&freeze->work.entry);
3948 	freeze->cookie = handle_cookie->cookie;
3949 	freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3950 	ref->freeze = freeze;
3951 
3952 	if (ref->node->proc) {
3953 		binder_inner_proc_lock(ref->node->proc);
3954 		freeze->is_frozen = ref->node->proc->is_frozen;
3955 		binder_inner_proc_unlock(ref->node->proc);
3956 
3957 		binder_inner_proc_lock(proc);
3958 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3959 		binder_wakeup_proc_ilocked(proc);
3960 		binder_inner_proc_unlock(proc);
3961 	}
3962 
3963 	binder_node_unlock(ref->node);
3964 	binder_proc_unlock(proc);
3965 	return 0;
3966 }
3967 
3968 static int
3969 binder_clear_freeze_notification(struct binder_proc *proc,
3970 				 struct binder_thread *thread,
3971 				 struct binder_handle_cookie *handle_cookie)
3972 {
3973 	struct binder_ref_freeze *freeze;
3974 	struct binder_ref *ref;
3975 
3976 	binder_proc_lock(proc);
3977 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3978 	if (!ref) {
3979 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3980 				  proc->pid, thread->pid, handle_cookie->handle);
3981 		binder_proc_unlock(proc);
3982 		return -EINVAL;
3983 	}
3984 
3985 	binder_node_lock(ref->node);
3986 
3987 	if (!ref->freeze) {
3988 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3989 				  proc->pid, thread->pid);
3990 		binder_node_unlock(ref->node);
3991 		binder_proc_unlock(proc);
3992 		return -EINVAL;
3993 	}
3994 	freeze = ref->freeze;
3995 	binder_inner_proc_lock(proc);
3996 	if (freeze->cookie != handle_cookie->cookie) {
3997 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3998 				  proc->pid, thread->pid, (u64)freeze->cookie,
3999 				  (u64)handle_cookie->cookie);
4000 		binder_inner_proc_unlock(proc);
4001 		binder_node_unlock(ref->node);
4002 		binder_proc_unlock(proc);
4003 		return -EINVAL;
4004 	}
4005 	ref->freeze = NULL;
4006 	/*
4007 	 * Take the existing freeze object and overwrite its work type. There are three cases here:
4008 	 * 1. No pending notification. In this case just add the work to the queue.
4009 	 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
4010 	 *    should resend with the new work type.
4011 	 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
4012 	 *    needs to be done here.
4013 	 */
4014 	freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
4015 	if (list_empty(&freeze->work.entry)) {
4016 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4017 		binder_wakeup_proc_ilocked(proc);
4018 	} else if (freeze->sent) {
4019 		freeze->resend = true;
4020 	}
4021 	binder_inner_proc_unlock(proc);
4022 	binder_node_unlock(ref->node);
4023 	binder_proc_unlock(proc);
4024 	return 0;
4025 }
4026 
4027 static int
4028 binder_freeze_notification_done(struct binder_proc *proc,
4029 				struct binder_thread *thread,
4030 				binder_uintptr_t cookie)
4031 {
4032 	struct binder_ref_freeze *freeze = NULL;
4033 	struct binder_work *w;
4034 
4035 	binder_inner_proc_lock(proc);
4036 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
4037 		struct binder_ref_freeze *tmp_freeze =
4038 			container_of(w, struct binder_ref_freeze, work);
4039 
4040 		if (tmp_freeze->cookie == cookie) {
4041 			freeze = tmp_freeze;
4042 			break;
4043 		}
4044 	}
4045 	if (!freeze) {
4046 		binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
4047 				  proc->pid, thread->pid, (u64)cookie);
4048 		binder_inner_proc_unlock(proc);
4049 		return -EINVAL;
4050 	}
4051 	binder_dequeue_work_ilocked(&freeze->work);
4052 	freeze->sent = false;
4053 	if (freeze->resend) {
4054 		freeze->resend = false;
4055 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4056 		binder_wakeup_proc_ilocked(proc);
4057 	}
4058 	binder_inner_proc_unlock(proc);
4059 	return 0;
4060 }
4061 
4062 /**
4063  * binder_free_buf() - free the specified buffer
4064  * @proc:	binder proc that owns buffer
4065  * @buffer:	buffer to be freed
4066  * @is_failure:	failed to send transaction
4067  *
4068  * If buffer for an async transaction, enqueue the next async
4069  * transaction from the node.
4070  *
4071  * Cleanup buffer and free it.
4072  */
4073 static void
4074 binder_free_buf(struct binder_proc *proc,
4075 		struct binder_thread *thread,
4076 		struct binder_buffer *buffer, bool is_failure)
4077 {
4078 	binder_inner_proc_lock(proc);
4079 	if (buffer->transaction) {
4080 		buffer->transaction->buffer = NULL;
4081 		buffer->transaction = NULL;
4082 	}
4083 	binder_inner_proc_unlock(proc);
4084 	if (buffer->async_transaction && buffer->target_node) {
4085 		struct binder_node *buf_node;
4086 		struct binder_work *w;
4087 
4088 		buf_node = buffer->target_node;
4089 		binder_node_inner_lock(buf_node);
4090 		BUG_ON(!buf_node->has_async_transaction);
4091 		BUG_ON(buf_node->proc != proc);
4092 		w = binder_dequeue_work_head_ilocked(
4093 				&buf_node->async_todo);
4094 		if (!w) {
4095 			buf_node->has_async_transaction = false;
4096 		} else {
4097 			binder_enqueue_work_ilocked(
4098 					w, &proc->todo);
4099 			binder_wakeup_proc_ilocked(proc);
4100 		}
4101 		binder_node_inner_unlock(buf_node);
4102 	}
4103 	trace_binder_transaction_buffer_release(buffer);
4104 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
4105 	binder_alloc_free_buf(&proc->alloc, buffer);
4106 }
4107 
4108 static int binder_thread_write(struct binder_proc *proc,
4109 			struct binder_thread *thread,
4110 			binder_uintptr_t binder_buffer, size_t size,
4111 			binder_size_t *consumed)
4112 {
4113 	uint32_t cmd;
4114 	struct binder_context *context = proc->context;
4115 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4116 	void __user *ptr = buffer + *consumed;
4117 	void __user *end = buffer + size;
4118 
4119 	while (ptr < end && thread->return_error.cmd == BR_OK) {
4120 		int ret;
4121 
4122 		if (get_user(cmd, (uint32_t __user *)ptr))
4123 			return -EFAULT;
4124 		ptr += sizeof(uint32_t);
4125 		trace_binder_command(cmd);
4126 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4127 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4128 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4129 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4130 		}
4131 		switch (cmd) {
4132 		case BC_INCREFS:
4133 		case BC_ACQUIRE:
4134 		case BC_RELEASE:
4135 		case BC_DECREFS: {
4136 			uint32_t target;
4137 			const char *debug_string;
4138 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4139 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4140 			struct binder_ref_data rdata;
4141 
4142 			if (get_user(target, (uint32_t __user *)ptr))
4143 				return -EFAULT;
4144 
4145 			ptr += sizeof(uint32_t);
4146 			ret = -1;
4147 			if (increment && !target) {
4148 				struct binder_node *ctx_mgr_node;
4149 
4150 				mutex_lock(&context->context_mgr_node_lock);
4151 				ctx_mgr_node = context->binder_context_mgr_node;
4152 				if (ctx_mgr_node) {
4153 					if (ctx_mgr_node->proc == proc) {
4154 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4155 								  proc->pid, thread->pid);
4156 						mutex_unlock(&context->context_mgr_node_lock);
4157 						return -EINVAL;
4158 					}
4159 					ret = binder_inc_ref_for_node(
4160 							proc, ctx_mgr_node,
4161 							strong, NULL, &rdata);
4162 				}
4163 				mutex_unlock(&context->context_mgr_node_lock);
4164 			}
4165 			if (ret)
4166 				ret = binder_update_ref_for_handle(
4167 						proc, target, increment, strong,
4168 						&rdata);
4169 			if (!ret && rdata.desc != target) {
4170 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4171 					proc->pid, thread->pid,
4172 					target, rdata.desc);
4173 			}
4174 			switch (cmd) {
4175 			case BC_INCREFS:
4176 				debug_string = "IncRefs";
4177 				break;
4178 			case BC_ACQUIRE:
4179 				debug_string = "Acquire";
4180 				break;
4181 			case BC_RELEASE:
4182 				debug_string = "Release";
4183 				break;
4184 			case BC_DECREFS:
4185 			default:
4186 				debug_string = "DecRefs";
4187 				break;
4188 			}
4189 			if (ret) {
4190 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4191 					proc->pid, thread->pid, debug_string,
4192 					strong, target, ret);
4193 				break;
4194 			}
4195 			binder_debug(BINDER_DEBUG_USER_REFS,
4196 				     "%d:%d %s ref %d desc %d s %d w %d\n",
4197 				     proc->pid, thread->pid, debug_string,
4198 				     rdata.debug_id, rdata.desc, rdata.strong,
4199 				     rdata.weak);
4200 			break;
4201 		}
4202 		case BC_INCREFS_DONE:
4203 		case BC_ACQUIRE_DONE: {
4204 			binder_uintptr_t node_ptr;
4205 			binder_uintptr_t cookie;
4206 			struct binder_node *node;
4207 			bool free_node;
4208 
4209 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4210 				return -EFAULT;
4211 			ptr += sizeof(binder_uintptr_t);
4212 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4213 				return -EFAULT;
4214 			ptr += sizeof(binder_uintptr_t);
4215 			node = binder_get_node(proc, node_ptr);
4216 			if (node == NULL) {
4217 				binder_user_error("%d:%d %s u%016llx no match\n",
4218 					proc->pid, thread->pid,
4219 					cmd == BC_INCREFS_DONE ?
4220 					"BC_INCREFS_DONE" :
4221 					"BC_ACQUIRE_DONE",
4222 					(u64)node_ptr);
4223 				break;
4224 			}
4225 			if (cookie != node->cookie) {
4226 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4227 					proc->pid, thread->pid,
4228 					cmd == BC_INCREFS_DONE ?
4229 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4230 					(u64)node_ptr, node->debug_id,
4231 					(u64)cookie, (u64)node->cookie);
4232 				binder_put_node(node);
4233 				break;
4234 			}
4235 			binder_node_inner_lock(node);
4236 			if (cmd == BC_ACQUIRE_DONE) {
4237 				if (node->pending_strong_ref == 0) {
4238 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4239 						proc->pid, thread->pid,
4240 						node->debug_id);
4241 					binder_node_inner_unlock(node);
4242 					binder_put_node(node);
4243 					break;
4244 				}
4245 				node->pending_strong_ref = 0;
4246 			} else {
4247 				if (node->pending_weak_ref == 0) {
4248 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4249 						proc->pid, thread->pid,
4250 						node->debug_id);
4251 					binder_node_inner_unlock(node);
4252 					binder_put_node(node);
4253 					break;
4254 				}
4255 				node->pending_weak_ref = 0;
4256 			}
4257 			free_node = binder_dec_node_nilocked(node,
4258 					cmd == BC_ACQUIRE_DONE, 0);
4259 			WARN_ON(free_node);
4260 			binder_debug(BINDER_DEBUG_USER_REFS,
4261 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4262 				     proc->pid, thread->pid,
4263 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4264 				     node->debug_id, node->local_strong_refs,
4265 				     node->local_weak_refs, node->tmp_refs);
4266 			binder_node_inner_unlock(node);
4267 			binder_put_node(node);
4268 			break;
4269 		}
4270 		case BC_ATTEMPT_ACQUIRE:
4271 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4272 			return -EINVAL;
4273 		case BC_ACQUIRE_RESULT:
4274 			pr_err("BC_ACQUIRE_RESULT not supported\n");
4275 			return -EINVAL;
4276 
4277 		case BC_FREE_BUFFER: {
4278 			binder_uintptr_t data_ptr;
4279 			struct binder_buffer *buffer;
4280 
4281 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4282 				return -EFAULT;
4283 			ptr += sizeof(binder_uintptr_t);
4284 
4285 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4286 							      data_ptr);
4287 			if (IS_ERR_OR_NULL(buffer)) {
4288 				if (PTR_ERR(buffer) == -EPERM) {
4289 					binder_user_error(
4290 						"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4291 						proc->pid, thread->pid,
4292 						(unsigned long)data_ptr - proc->alloc.vm_start);
4293 				} else {
4294 					binder_user_error(
4295 						"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4296 						proc->pid, thread->pid,
4297 						(unsigned long)data_ptr - proc->alloc.vm_start);
4298 				}
4299 				break;
4300 			}
4301 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4302 				     "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4303 				     proc->pid, thread->pid,
4304 				     (unsigned long)data_ptr - proc->alloc.vm_start,
4305 				     buffer->debug_id,
4306 				     buffer->transaction ? "active" : "finished");
4307 			binder_free_buf(proc, thread, buffer, false);
4308 			break;
4309 		}
4310 
4311 		case BC_TRANSACTION_SG:
4312 		case BC_REPLY_SG: {
4313 			struct binder_transaction_data_sg tr;
4314 
4315 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4316 				return -EFAULT;
4317 			ptr += sizeof(tr);
4318 			binder_transaction(proc, thread, &tr.transaction_data,
4319 					   cmd == BC_REPLY_SG, tr.buffers_size);
4320 			break;
4321 		}
4322 		case BC_TRANSACTION:
4323 		case BC_REPLY: {
4324 			struct binder_transaction_data tr;
4325 
4326 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4327 				return -EFAULT;
4328 			ptr += sizeof(tr);
4329 			binder_transaction(proc, thread, &tr,
4330 					   cmd == BC_REPLY, 0);
4331 			break;
4332 		}
4333 
4334 		case BC_REGISTER_LOOPER:
4335 			binder_debug(BINDER_DEBUG_THREADS,
4336 				     "%d:%d BC_REGISTER_LOOPER\n",
4337 				     proc->pid, thread->pid);
4338 			binder_inner_proc_lock(proc);
4339 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4340 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4341 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4342 					proc->pid, thread->pid);
4343 			} else if (proc->requested_threads == 0) {
4344 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4345 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4346 					proc->pid, thread->pid);
4347 			} else {
4348 				proc->requested_threads--;
4349 				proc->requested_threads_started++;
4350 			}
4351 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4352 			binder_inner_proc_unlock(proc);
4353 			break;
4354 		case BC_ENTER_LOOPER:
4355 			binder_debug(BINDER_DEBUG_THREADS,
4356 				     "%d:%d BC_ENTER_LOOPER\n",
4357 				     proc->pid, thread->pid);
4358 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4359 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4360 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4361 					proc->pid, thread->pid);
4362 			}
4363 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4364 			break;
4365 		case BC_EXIT_LOOPER:
4366 			binder_debug(BINDER_DEBUG_THREADS,
4367 				     "%d:%d BC_EXIT_LOOPER\n",
4368 				     proc->pid, thread->pid);
4369 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4370 			break;
4371 
4372 		case BC_REQUEST_DEATH_NOTIFICATION:
4373 		case BC_CLEAR_DEATH_NOTIFICATION: {
4374 			uint32_t target;
4375 			binder_uintptr_t cookie;
4376 			struct binder_ref *ref;
4377 			struct binder_ref_death *death = NULL;
4378 
4379 			if (get_user(target, (uint32_t __user *)ptr))
4380 				return -EFAULT;
4381 			ptr += sizeof(uint32_t);
4382 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4383 				return -EFAULT;
4384 			ptr += sizeof(binder_uintptr_t);
4385 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4386 				/*
4387 				 * Allocate memory for death notification
4388 				 * before taking lock
4389 				 */
4390 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4391 				if (death == NULL) {
4392 					WARN_ON(thread->return_error.cmd !=
4393 						BR_OK);
4394 					thread->return_error.cmd = BR_ERROR;
4395 					binder_enqueue_thread_work(
4396 						thread,
4397 						&thread->return_error.work);
4398 					binder_debug(
4399 						BINDER_DEBUG_FAILED_TRANSACTION,
4400 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4401 						proc->pid, thread->pid);
4402 					break;
4403 				}
4404 			}
4405 			binder_proc_lock(proc);
4406 			ref = binder_get_ref_olocked(proc, target, false);
4407 			if (ref == NULL) {
4408 				binder_user_error("%d:%d %s invalid ref %d\n",
4409 					proc->pid, thread->pid,
4410 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4411 					"BC_REQUEST_DEATH_NOTIFICATION" :
4412 					"BC_CLEAR_DEATH_NOTIFICATION",
4413 					target);
4414 				binder_proc_unlock(proc);
4415 				kfree(death);
4416 				break;
4417 			}
4418 
4419 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4420 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4421 				     proc->pid, thread->pid,
4422 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4423 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4424 				     "BC_CLEAR_DEATH_NOTIFICATION",
4425 				     (u64)cookie, ref->data.debug_id,
4426 				     ref->data.desc, ref->data.strong,
4427 				     ref->data.weak, ref->node->debug_id);
4428 
4429 			binder_node_lock(ref->node);
4430 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4431 				if (ref->death) {
4432 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4433 						proc->pid, thread->pid);
4434 					binder_node_unlock(ref->node);
4435 					binder_proc_unlock(proc);
4436 					kfree(death);
4437 					break;
4438 				}
4439 				binder_stats_created(BINDER_STAT_DEATH);
4440 				INIT_LIST_HEAD(&death->work.entry);
4441 				death->cookie = cookie;
4442 				ref->death = death;
4443 				if (ref->node->proc == NULL) {
4444 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4445 
4446 					binder_inner_proc_lock(proc);
4447 					binder_enqueue_work_ilocked(
4448 						&ref->death->work, &proc->todo);
4449 					binder_wakeup_proc_ilocked(proc);
4450 					binder_inner_proc_unlock(proc);
4451 				}
4452 			} else {
4453 				if (ref->death == NULL) {
4454 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4455 						proc->pid, thread->pid);
4456 					binder_node_unlock(ref->node);
4457 					binder_proc_unlock(proc);
4458 					break;
4459 				}
4460 				death = ref->death;
4461 				if (death->cookie != cookie) {
4462 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4463 						proc->pid, thread->pid,
4464 						(u64)death->cookie,
4465 						(u64)cookie);
4466 					binder_node_unlock(ref->node);
4467 					binder_proc_unlock(proc);
4468 					break;
4469 				}
4470 				ref->death = NULL;
4471 				binder_inner_proc_lock(proc);
4472 				if (list_empty(&death->work.entry)) {
4473 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4474 					if (thread->looper &
4475 					    (BINDER_LOOPER_STATE_REGISTERED |
4476 					     BINDER_LOOPER_STATE_ENTERED))
4477 						binder_enqueue_thread_work_ilocked(
4478 								thread,
4479 								&death->work);
4480 					else {
4481 						binder_enqueue_work_ilocked(
4482 								&death->work,
4483 								&proc->todo);
4484 						binder_wakeup_proc_ilocked(
4485 								proc);
4486 					}
4487 				} else {
4488 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4489 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4490 				}
4491 				binder_inner_proc_unlock(proc);
4492 			}
4493 			binder_node_unlock(ref->node);
4494 			binder_proc_unlock(proc);
4495 		} break;
4496 		case BC_DEAD_BINDER_DONE: {
4497 			struct binder_work *w;
4498 			binder_uintptr_t cookie;
4499 			struct binder_ref_death *death = NULL;
4500 
4501 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4502 				return -EFAULT;
4503 
4504 			ptr += sizeof(cookie);
4505 			binder_inner_proc_lock(proc);
4506 			list_for_each_entry(w, &proc->delivered_death,
4507 					    entry) {
4508 				struct binder_ref_death *tmp_death =
4509 					container_of(w,
4510 						     struct binder_ref_death,
4511 						     work);
4512 
4513 				if (tmp_death->cookie == cookie) {
4514 					death = tmp_death;
4515 					break;
4516 				}
4517 			}
4518 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4519 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4520 				     proc->pid, thread->pid, (u64)cookie,
4521 				     death);
4522 			if (death == NULL) {
4523 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4524 					proc->pid, thread->pid, (u64)cookie);
4525 				binder_inner_proc_unlock(proc);
4526 				break;
4527 			}
4528 			binder_dequeue_work_ilocked(&death->work);
4529 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4530 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4531 				if (thread->looper &
4532 					(BINDER_LOOPER_STATE_REGISTERED |
4533 					 BINDER_LOOPER_STATE_ENTERED))
4534 					binder_enqueue_thread_work_ilocked(
4535 						thread, &death->work);
4536 				else {
4537 					binder_enqueue_work_ilocked(
4538 							&death->work,
4539 							&proc->todo);
4540 					binder_wakeup_proc_ilocked(proc);
4541 				}
4542 			}
4543 			binder_inner_proc_unlock(proc);
4544 		} break;
4545 
4546 		case BC_REQUEST_FREEZE_NOTIFICATION: {
4547 			struct binder_handle_cookie handle_cookie;
4548 			int error;
4549 
4550 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4551 				return -EFAULT;
4552 			ptr += sizeof(handle_cookie);
4553 			error = binder_request_freeze_notification(proc, thread,
4554 								   &handle_cookie);
4555 			if (error)
4556 				return error;
4557 		} break;
4558 
4559 		case BC_CLEAR_FREEZE_NOTIFICATION: {
4560 			struct binder_handle_cookie handle_cookie;
4561 			int error;
4562 
4563 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4564 				return -EFAULT;
4565 			ptr += sizeof(handle_cookie);
4566 			error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4567 			if (error)
4568 				return error;
4569 		} break;
4570 
4571 		case BC_FREEZE_NOTIFICATION_DONE: {
4572 			binder_uintptr_t cookie;
4573 			int error;
4574 
4575 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4576 				return -EFAULT;
4577 
4578 			ptr += sizeof(cookie);
4579 			error = binder_freeze_notification_done(proc, thread, cookie);
4580 			if (error)
4581 				return error;
4582 		} break;
4583 
4584 		default:
4585 			pr_err("%d:%d unknown command %u\n",
4586 			       proc->pid, thread->pid, cmd);
4587 			return -EINVAL;
4588 		}
4589 		*consumed = ptr - buffer;
4590 	}
4591 	return 0;
4592 }
4593 
4594 static void binder_stat_br(struct binder_proc *proc,
4595 			   struct binder_thread *thread, uint32_t cmd)
4596 {
4597 	trace_binder_return(cmd);
4598 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4599 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4600 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4601 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4602 	}
4603 }
4604 
4605 static int binder_put_node_cmd(struct binder_proc *proc,
4606 			       struct binder_thread *thread,
4607 			       void __user **ptrp,
4608 			       binder_uintptr_t node_ptr,
4609 			       binder_uintptr_t node_cookie,
4610 			       int node_debug_id,
4611 			       uint32_t cmd, const char *cmd_name)
4612 {
4613 	void __user *ptr = *ptrp;
4614 
4615 	if (put_user(cmd, (uint32_t __user *)ptr))
4616 		return -EFAULT;
4617 	ptr += sizeof(uint32_t);
4618 
4619 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4620 		return -EFAULT;
4621 	ptr += sizeof(binder_uintptr_t);
4622 
4623 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4624 		return -EFAULT;
4625 	ptr += sizeof(binder_uintptr_t);
4626 
4627 	binder_stat_br(proc, thread, cmd);
4628 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4629 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4630 		     (u64)node_ptr, (u64)node_cookie);
4631 
4632 	*ptrp = ptr;
4633 	return 0;
4634 }
4635 
4636 static int binder_wait_for_work(struct binder_thread *thread,
4637 				bool do_proc_work)
4638 {
4639 	DEFINE_WAIT(wait);
4640 	struct binder_proc *proc = thread->proc;
4641 	int ret = 0;
4642 
4643 	binder_inner_proc_lock(proc);
4644 	for (;;) {
4645 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4646 		if (binder_has_work_ilocked(thread, do_proc_work))
4647 			break;
4648 		if (do_proc_work)
4649 			list_add(&thread->waiting_thread_node,
4650 				 &proc->waiting_threads);
4651 		binder_inner_proc_unlock(proc);
4652 		schedule();
4653 		binder_inner_proc_lock(proc);
4654 		list_del_init(&thread->waiting_thread_node);
4655 		if (signal_pending(current)) {
4656 			ret = -EINTR;
4657 			break;
4658 		}
4659 	}
4660 	finish_wait(&thread->wait, &wait);
4661 	binder_inner_proc_unlock(proc);
4662 
4663 	return ret;
4664 }
4665 
4666 /**
4667  * binder_apply_fd_fixups() - finish fd translation
4668  * @proc:         binder_proc associated @t->buffer
4669  * @t:	binder transaction with list of fd fixups
4670  *
4671  * Now that we are in the context of the transaction target
4672  * process, we can allocate and install fds. Process the
4673  * list of fds to translate and fixup the buffer with the
4674  * new fds first and only then install the files.
4675  *
4676  * If we fail to allocate an fd, skip the install and release
4677  * any fds that have already been allocated.
4678  */
4679 static int binder_apply_fd_fixups(struct binder_proc *proc,
4680 				  struct binder_transaction *t)
4681 {
4682 	struct binder_txn_fd_fixup *fixup, *tmp;
4683 	int ret = 0;
4684 
4685 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4686 		int fd = get_unused_fd_flags(O_CLOEXEC);
4687 
4688 		if (fd < 0) {
4689 			binder_debug(BINDER_DEBUG_TRANSACTION,
4690 				     "failed fd fixup txn %d fd %d\n",
4691 				     t->debug_id, fd);
4692 			ret = -ENOMEM;
4693 			goto err;
4694 		}
4695 		binder_debug(BINDER_DEBUG_TRANSACTION,
4696 			     "fd fixup txn %d fd %d\n",
4697 			     t->debug_id, fd);
4698 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4699 		fixup->target_fd = fd;
4700 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4701 						fixup->offset, &fd,
4702 						sizeof(u32))) {
4703 			ret = -EINVAL;
4704 			goto err;
4705 		}
4706 	}
4707 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4708 		fd_install(fixup->target_fd, fixup->file);
4709 		list_del(&fixup->fixup_entry);
4710 		kfree(fixup);
4711 	}
4712 
4713 	return ret;
4714 
4715 err:
4716 	binder_free_txn_fixups(t);
4717 	return ret;
4718 }
4719 
4720 static int binder_thread_read(struct binder_proc *proc,
4721 			      struct binder_thread *thread,
4722 			      binder_uintptr_t binder_buffer, size_t size,
4723 			      binder_size_t *consumed, int non_block)
4724 {
4725 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4726 	void __user *ptr = buffer + *consumed;
4727 	void __user *end = buffer + size;
4728 
4729 	int ret = 0;
4730 	int wait_for_proc_work;
4731 
4732 	if (*consumed == 0) {
4733 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4734 			return -EFAULT;
4735 		ptr += sizeof(uint32_t);
4736 	}
4737 
4738 retry:
4739 	binder_inner_proc_lock(proc);
4740 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4741 	binder_inner_proc_unlock(proc);
4742 
4743 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4744 
4745 	trace_binder_wait_for_work(wait_for_proc_work,
4746 				   !!thread->transaction_stack,
4747 				   !binder_worklist_empty(proc, &thread->todo));
4748 	if (wait_for_proc_work) {
4749 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4750 					BINDER_LOOPER_STATE_ENTERED))) {
4751 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4752 				proc->pid, thread->pid, thread->looper);
4753 			wait_event_interruptible(binder_user_error_wait,
4754 						 binder_stop_on_user_error < 2);
4755 		}
4756 		binder_set_nice(proc->default_priority);
4757 	}
4758 
4759 	if (non_block) {
4760 		if (!binder_has_work(thread, wait_for_proc_work))
4761 			ret = -EAGAIN;
4762 	} else {
4763 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4764 	}
4765 
4766 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4767 
4768 	if (ret)
4769 		return ret;
4770 
4771 	while (1) {
4772 		uint32_t cmd;
4773 		struct binder_transaction_data_secctx tr;
4774 		struct binder_transaction_data *trd = &tr.transaction_data;
4775 		struct binder_work *w = NULL;
4776 		struct list_head *list = NULL;
4777 		struct binder_transaction *t = NULL;
4778 		struct binder_thread *t_from;
4779 		size_t trsize = sizeof(*trd);
4780 
4781 		binder_inner_proc_lock(proc);
4782 		if (!binder_worklist_empty_ilocked(&thread->todo))
4783 			list = &thread->todo;
4784 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4785 			   wait_for_proc_work)
4786 			list = &proc->todo;
4787 		else {
4788 			binder_inner_proc_unlock(proc);
4789 
4790 			/* no data added */
4791 			if (ptr - buffer == 4 && !thread->looper_need_return)
4792 				goto retry;
4793 			break;
4794 		}
4795 
4796 		if (end - ptr < sizeof(tr) + 4) {
4797 			binder_inner_proc_unlock(proc);
4798 			break;
4799 		}
4800 		w = binder_dequeue_work_head_ilocked(list);
4801 		if (binder_worklist_empty_ilocked(&thread->todo))
4802 			thread->process_todo = false;
4803 
4804 		switch (w->type) {
4805 		case BINDER_WORK_TRANSACTION: {
4806 			binder_inner_proc_unlock(proc);
4807 			t = container_of(w, struct binder_transaction, work);
4808 		} break;
4809 		case BINDER_WORK_RETURN_ERROR: {
4810 			struct binder_error *e = container_of(
4811 					w, struct binder_error, work);
4812 
4813 			WARN_ON(e->cmd == BR_OK);
4814 			binder_inner_proc_unlock(proc);
4815 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4816 				return -EFAULT;
4817 			cmd = e->cmd;
4818 			e->cmd = BR_OK;
4819 			ptr += sizeof(uint32_t);
4820 
4821 			binder_stat_br(proc, thread, cmd);
4822 		} break;
4823 		case BINDER_WORK_TRANSACTION_COMPLETE:
4824 		case BINDER_WORK_TRANSACTION_PENDING:
4825 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4826 			if (proc->oneway_spam_detection_enabled &&
4827 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4828 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4829 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4830 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4831 			else
4832 				cmd = BR_TRANSACTION_COMPLETE;
4833 			binder_inner_proc_unlock(proc);
4834 			kfree(w);
4835 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4836 			if (put_user(cmd, (uint32_t __user *)ptr))
4837 				return -EFAULT;
4838 			ptr += sizeof(uint32_t);
4839 
4840 			binder_stat_br(proc, thread, cmd);
4841 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4842 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4843 				     proc->pid, thread->pid);
4844 		} break;
4845 		case BINDER_WORK_NODE: {
4846 			struct binder_node *node = container_of(w, struct binder_node, work);
4847 			int strong, weak;
4848 			binder_uintptr_t node_ptr = node->ptr;
4849 			binder_uintptr_t node_cookie = node->cookie;
4850 			int node_debug_id = node->debug_id;
4851 			int has_weak_ref;
4852 			int has_strong_ref;
4853 			void __user *orig_ptr = ptr;
4854 
4855 			BUG_ON(proc != node->proc);
4856 			strong = node->internal_strong_refs ||
4857 					node->local_strong_refs;
4858 			weak = !hlist_empty(&node->refs) ||
4859 					node->local_weak_refs ||
4860 					node->tmp_refs || strong;
4861 			has_strong_ref = node->has_strong_ref;
4862 			has_weak_ref = node->has_weak_ref;
4863 
4864 			if (weak && !has_weak_ref) {
4865 				node->has_weak_ref = 1;
4866 				node->pending_weak_ref = 1;
4867 				node->local_weak_refs++;
4868 			}
4869 			if (strong && !has_strong_ref) {
4870 				node->has_strong_ref = 1;
4871 				node->pending_strong_ref = 1;
4872 				node->local_strong_refs++;
4873 			}
4874 			if (!strong && has_strong_ref)
4875 				node->has_strong_ref = 0;
4876 			if (!weak && has_weak_ref)
4877 				node->has_weak_ref = 0;
4878 			if (!weak && !strong) {
4879 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4880 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4881 					     proc->pid, thread->pid,
4882 					     node_debug_id,
4883 					     (u64)node_ptr,
4884 					     (u64)node_cookie);
4885 				rb_erase(&node->rb_node, &proc->nodes);
4886 				binder_inner_proc_unlock(proc);
4887 				binder_node_lock(node);
4888 				/*
4889 				 * Acquire the node lock before freeing the
4890 				 * node to serialize with other threads that
4891 				 * may have been holding the node lock while
4892 				 * decrementing this node (avoids race where
4893 				 * this thread frees while the other thread
4894 				 * is unlocking the node after the final
4895 				 * decrement)
4896 				 */
4897 				binder_node_unlock(node);
4898 				binder_free_node(node);
4899 			} else
4900 				binder_inner_proc_unlock(proc);
4901 
4902 			if (weak && !has_weak_ref)
4903 				ret = binder_put_node_cmd(
4904 						proc, thread, &ptr, node_ptr,
4905 						node_cookie, node_debug_id,
4906 						BR_INCREFS, "BR_INCREFS");
4907 			if (!ret && strong && !has_strong_ref)
4908 				ret = binder_put_node_cmd(
4909 						proc, thread, &ptr, node_ptr,
4910 						node_cookie, node_debug_id,
4911 						BR_ACQUIRE, "BR_ACQUIRE");
4912 			if (!ret && !strong && has_strong_ref)
4913 				ret = binder_put_node_cmd(
4914 						proc, thread, &ptr, node_ptr,
4915 						node_cookie, node_debug_id,
4916 						BR_RELEASE, "BR_RELEASE");
4917 			if (!ret && !weak && has_weak_ref)
4918 				ret = binder_put_node_cmd(
4919 						proc, thread, &ptr, node_ptr,
4920 						node_cookie, node_debug_id,
4921 						BR_DECREFS, "BR_DECREFS");
4922 			if (orig_ptr == ptr)
4923 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4924 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4925 					     proc->pid, thread->pid,
4926 					     node_debug_id,
4927 					     (u64)node_ptr,
4928 					     (u64)node_cookie);
4929 			if (ret)
4930 				return ret;
4931 		} break;
4932 		case BINDER_WORK_DEAD_BINDER:
4933 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4934 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4935 			struct binder_ref_death *death;
4936 			uint32_t cmd;
4937 			binder_uintptr_t cookie;
4938 
4939 			death = container_of(w, struct binder_ref_death, work);
4940 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4941 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4942 			else
4943 				cmd = BR_DEAD_BINDER;
4944 			cookie = death->cookie;
4945 
4946 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4947 				     "%d:%d %s %016llx\n",
4948 				      proc->pid, thread->pid,
4949 				      cmd == BR_DEAD_BINDER ?
4950 				      "BR_DEAD_BINDER" :
4951 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4952 				      (u64)cookie);
4953 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4954 				binder_inner_proc_unlock(proc);
4955 				kfree(death);
4956 				binder_stats_deleted(BINDER_STAT_DEATH);
4957 			} else {
4958 				binder_enqueue_work_ilocked(
4959 						w, &proc->delivered_death);
4960 				binder_inner_proc_unlock(proc);
4961 			}
4962 			if (put_user(cmd, (uint32_t __user *)ptr))
4963 				return -EFAULT;
4964 			ptr += sizeof(uint32_t);
4965 			if (put_user(cookie,
4966 				     (binder_uintptr_t __user *)ptr))
4967 				return -EFAULT;
4968 			ptr += sizeof(binder_uintptr_t);
4969 			binder_stat_br(proc, thread, cmd);
4970 			if (cmd == BR_DEAD_BINDER)
4971 				goto done; /* DEAD_BINDER notifications can cause transactions */
4972 		} break;
4973 
4974 		case BINDER_WORK_FROZEN_BINDER: {
4975 			struct binder_ref_freeze *freeze;
4976 			struct binder_frozen_state_info info;
4977 
4978 			memset(&info, 0, sizeof(info));
4979 			freeze = container_of(w, struct binder_ref_freeze, work);
4980 			info.is_frozen = freeze->is_frozen;
4981 			info.cookie = freeze->cookie;
4982 			freeze->sent = true;
4983 			binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4984 			binder_inner_proc_unlock(proc);
4985 
4986 			if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4987 				return -EFAULT;
4988 			ptr += sizeof(uint32_t);
4989 			if (copy_to_user(ptr, &info, sizeof(info)))
4990 				return -EFAULT;
4991 			ptr += sizeof(info);
4992 			binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4993 			goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4994 		} break;
4995 
4996 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4997 			struct binder_ref_freeze *freeze =
4998 			    container_of(w, struct binder_ref_freeze, work);
4999 			binder_uintptr_t cookie = freeze->cookie;
5000 
5001 			binder_inner_proc_unlock(proc);
5002 			kfree(freeze);
5003 			binder_stats_deleted(BINDER_STAT_FREEZE);
5004 			if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
5005 				return -EFAULT;
5006 			ptr += sizeof(uint32_t);
5007 			if (put_user(cookie, (binder_uintptr_t __user *)ptr))
5008 				return -EFAULT;
5009 			ptr += sizeof(binder_uintptr_t);
5010 			binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
5011 		} break;
5012 
5013 		default:
5014 			binder_inner_proc_unlock(proc);
5015 			pr_err("%d:%d: bad work type %d\n",
5016 			       proc->pid, thread->pid, w->type);
5017 			break;
5018 		}
5019 
5020 		if (!t)
5021 			continue;
5022 
5023 		BUG_ON(t->buffer == NULL);
5024 		if (t->buffer->target_node) {
5025 			struct binder_node *target_node = t->buffer->target_node;
5026 
5027 			trd->target.ptr = target_node->ptr;
5028 			trd->cookie =  target_node->cookie;
5029 			t->saved_priority = task_nice(current);
5030 			if (t->priority < target_node->min_priority &&
5031 			    !(t->flags & TF_ONE_WAY))
5032 				binder_set_nice(t->priority);
5033 			else if (!(t->flags & TF_ONE_WAY) ||
5034 				 t->saved_priority > target_node->min_priority)
5035 				binder_set_nice(target_node->min_priority);
5036 			cmd = BR_TRANSACTION;
5037 		} else {
5038 			trd->target.ptr = 0;
5039 			trd->cookie = 0;
5040 			cmd = BR_REPLY;
5041 		}
5042 		trd->code = t->code;
5043 		trd->flags = t->flags;
5044 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
5045 
5046 		t_from = binder_get_txn_from(t);
5047 		if (t_from) {
5048 			struct task_struct *sender = t_from->proc->tsk;
5049 
5050 			trd->sender_pid =
5051 				task_tgid_nr_ns(sender,
5052 						task_active_pid_ns(current));
5053 		} else {
5054 			trd->sender_pid = 0;
5055 		}
5056 
5057 		ret = binder_apply_fd_fixups(proc, t);
5058 		if (ret) {
5059 			struct binder_buffer *buffer = t->buffer;
5060 			bool oneway = !!(t->flags & TF_ONE_WAY);
5061 			int tid = t->debug_id;
5062 
5063 			if (t_from)
5064 				binder_thread_dec_tmpref(t_from);
5065 			buffer->transaction = NULL;
5066 			binder_cleanup_transaction(t, "fd fixups failed",
5067 						   BR_FAILED_REPLY);
5068 			binder_free_buf(proc, thread, buffer, true);
5069 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5070 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5071 				     proc->pid, thread->pid,
5072 				     oneway ? "async " :
5073 					(cmd == BR_REPLY ? "reply " : ""),
5074 				     tid, BR_FAILED_REPLY, ret, __LINE__);
5075 			if (cmd == BR_REPLY) {
5076 				cmd = BR_FAILED_REPLY;
5077 				if (put_user(cmd, (uint32_t __user *)ptr))
5078 					return -EFAULT;
5079 				ptr += sizeof(uint32_t);
5080 				binder_stat_br(proc, thread, cmd);
5081 				break;
5082 			}
5083 			continue;
5084 		}
5085 		trd->data_size = t->buffer->data_size;
5086 		trd->offsets_size = t->buffer->offsets_size;
5087 		trd->data.ptr.buffer = t->buffer->user_data;
5088 		trd->data.ptr.offsets = trd->data.ptr.buffer +
5089 					ALIGN(t->buffer->data_size,
5090 					    sizeof(void *));
5091 
5092 		tr.secctx = t->security_ctx;
5093 		if (t->security_ctx) {
5094 			cmd = BR_TRANSACTION_SEC_CTX;
5095 			trsize = sizeof(tr);
5096 		}
5097 		if (put_user(cmd, (uint32_t __user *)ptr)) {
5098 			if (t_from)
5099 				binder_thread_dec_tmpref(t_from);
5100 
5101 			binder_cleanup_transaction(t, "put_user failed",
5102 						   BR_FAILED_REPLY);
5103 
5104 			return -EFAULT;
5105 		}
5106 		ptr += sizeof(uint32_t);
5107 		if (copy_to_user(ptr, &tr, trsize)) {
5108 			if (t_from)
5109 				binder_thread_dec_tmpref(t_from);
5110 
5111 			binder_cleanup_transaction(t, "copy_to_user failed",
5112 						   BR_FAILED_REPLY);
5113 
5114 			return -EFAULT;
5115 		}
5116 		ptr += trsize;
5117 
5118 		trace_binder_transaction_received(t);
5119 		binder_stat_br(proc, thread, cmd);
5120 		binder_debug(BINDER_DEBUG_TRANSACTION,
5121 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5122 			     proc->pid, thread->pid,
5123 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5124 				(cmd == BR_TRANSACTION_SEC_CTX) ?
5125 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5126 			     t->debug_id, t_from ? t_from->proc->pid : 0,
5127 			     t_from ? t_from->pid : 0, cmd,
5128 			     t->buffer->data_size, t->buffer->offsets_size);
5129 
5130 		if (t_from)
5131 			binder_thread_dec_tmpref(t_from);
5132 		t->buffer->allow_user_free = 1;
5133 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5134 			binder_inner_proc_lock(thread->proc);
5135 			t->to_parent = thread->transaction_stack;
5136 			t->to_thread = thread;
5137 			thread->transaction_stack = t;
5138 			binder_inner_proc_unlock(thread->proc);
5139 		} else {
5140 			binder_free_transaction(t);
5141 		}
5142 		break;
5143 	}
5144 
5145 done:
5146 
5147 	*consumed = ptr - buffer;
5148 	binder_inner_proc_lock(proc);
5149 	if (proc->requested_threads == 0 &&
5150 	    list_empty(&thread->proc->waiting_threads) &&
5151 	    proc->requested_threads_started < proc->max_threads &&
5152 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5153 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5154 	     /*spawn a new thread if we leave this out */) {
5155 		proc->requested_threads++;
5156 		binder_inner_proc_unlock(proc);
5157 		binder_debug(BINDER_DEBUG_THREADS,
5158 			     "%d:%d BR_SPAWN_LOOPER\n",
5159 			     proc->pid, thread->pid);
5160 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5161 			return -EFAULT;
5162 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5163 	} else
5164 		binder_inner_proc_unlock(proc);
5165 	return 0;
5166 }
5167 
5168 static void binder_release_work(struct binder_proc *proc,
5169 				struct list_head *list)
5170 {
5171 	struct binder_work *w;
5172 	enum binder_work_type wtype;
5173 
5174 	while (1) {
5175 		binder_inner_proc_lock(proc);
5176 		w = binder_dequeue_work_head_ilocked(list);
5177 		wtype = w ? w->type : 0;
5178 		binder_inner_proc_unlock(proc);
5179 		if (!w)
5180 			return;
5181 
5182 		switch (wtype) {
5183 		case BINDER_WORK_TRANSACTION: {
5184 			struct binder_transaction *t;
5185 
5186 			t = container_of(w, struct binder_transaction, work);
5187 
5188 			binder_cleanup_transaction(t, "process died.",
5189 						   BR_DEAD_REPLY);
5190 		} break;
5191 		case BINDER_WORK_RETURN_ERROR: {
5192 			struct binder_error *e = container_of(
5193 					w, struct binder_error, work);
5194 
5195 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5196 				"undelivered TRANSACTION_ERROR: %u\n",
5197 				e->cmd);
5198 		} break;
5199 		case BINDER_WORK_TRANSACTION_PENDING:
5200 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5201 		case BINDER_WORK_TRANSACTION_COMPLETE: {
5202 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5203 				"undelivered TRANSACTION_COMPLETE\n");
5204 			kfree(w);
5205 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5206 		} break;
5207 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5208 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5209 			struct binder_ref_death *death;
5210 
5211 			death = container_of(w, struct binder_ref_death, work);
5212 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5213 				"undelivered death notification, %016llx\n",
5214 				(u64)death->cookie);
5215 			kfree(death);
5216 			binder_stats_deleted(BINDER_STAT_DEATH);
5217 		} break;
5218 		case BINDER_WORK_NODE:
5219 			break;
5220 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5221 			struct binder_ref_freeze *freeze;
5222 
5223 			freeze = container_of(w, struct binder_ref_freeze, work);
5224 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5225 				     "undelivered freeze notification, %016llx\n",
5226 				     (u64)freeze->cookie);
5227 			kfree(freeze);
5228 			binder_stats_deleted(BINDER_STAT_FREEZE);
5229 		} break;
5230 		default:
5231 			pr_err("unexpected work type, %d, not freed\n",
5232 			       wtype);
5233 			break;
5234 		}
5235 	}
5236 
5237 }
5238 
5239 static struct binder_thread *binder_get_thread_ilocked(
5240 		struct binder_proc *proc, struct binder_thread *new_thread)
5241 {
5242 	struct binder_thread *thread = NULL;
5243 	struct rb_node *parent = NULL;
5244 	struct rb_node **p = &proc->threads.rb_node;
5245 
5246 	while (*p) {
5247 		parent = *p;
5248 		thread = rb_entry(parent, struct binder_thread, rb_node);
5249 
5250 		if (current->pid < thread->pid)
5251 			p = &(*p)->rb_left;
5252 		else if (current->pid > thread->pid)
5253 			p = &(*p)->rb_right;
5254 		else
5255 			return thread;
5256 	}
5257 	if (!new_thread)
5258 		return NULL;
5259 	thread = new_thread;
5260 	binder_stats_created(BINDER_STAT_THREAD);
5261 	thread->proc = proc;
5262 	thread->pid = current->pid;
5263 	atomic_set(&thread->tmp_ref, 0);
5264 	init_waitqueue_head(&thread->wait);
5265 	INIT_LIST_HEAD(&thread->todo);
5266 	rb_link_node(&thread->rb_node, parent, p);
5267 	rb_insert_color(&thread->rb_node, &proc->threads);
5268 	thread->looper_need_return = true;
5269 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5270 	thread->return_error.cmd = BR_OK;
5271 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5272 	thread->reply_error.cmd = BR_OK;
5273 	thread->ee.command = BR_OK;
5274 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5275 	return thread;
5276 }
5277 
5278 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5279 {
5280 	struct binder_thread *thread;
5281 	struct binder_thread *new_thread;
5282 
5283 	binder_inner_proc_lock(proc);
5284 	thread = binder_get_thread_ilocked(proc, NULL);
5285 	binder_inner_proc_unlock(proc);
5286 	if (!thread) {
5287 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5288 		if (new_thread == NULL)
5289 			return NULL;
5290 		binder_inner_proc_lock(proc);
5291 		thread = binder_get_thread_ilocked(proc, new_thread);
5292 		binder_inner_proc_unlock(proc);
5293 		if (thread != new_thread)
5294 			kfree(new_thread);
5295 	}
5296 	return thread;
5297 }
5298 
5299 static void binder_free_proc(struct binder_proc *proc)
5300 {
5301 	struct binder_device *device;
5302 
5303 	BUG_ON(!list_empty(&proc->todo));
5304 	BUG_ON(!list_empty(&proc->delivered_death));
5305 	if (proc->outstanding_txns)
5306 		pr_warn("%s: Unexpected outstanding_txns %d\n",
5307 			__func__, proc->outstanding_txns);
5308 	device = container_of(proc->context, struct binder_device, context);
5309 	if (refcount_dec_and_test(&device->ref)) {
5310 		binder_remove_device(device);
5311 		kfree(proc->context->name);
5312 		kfree(device);
5313 	}
5314 	binder_alloc_deferred_release(&proc->alloc);
5315 	put_task_struct(proc->tsk);
5316 	put_cred(proc->cred);
5317 	binder_stats_deleted(BINDER_STAT_PROC);
5318 	dbitmap_free(&proc->dmap);
5319 	kfree(proc);
5320 }
5321 
5322 static void binder_free_thread(struct binder_thread *thread)
5323 {
5324 	BUG_ON(!list_empty(&thread->todo));
5325 	binder_stats_deleted(BINDER_STAT_THREAD);
5326 	binder_proc_dec_tmpref(thread->proc);
5327 	kfree(thread);
5328 }
5329 
5330 static int binder_thread_release(struct binder_proc *proc,
5331 				 struct binder_thread *thread)
5332 {
5333 	struct binder_transaction *t;
5334 	struct binder_transaction *send_reply = NULL;
5335 	int active_transactions = 0;
5336 	struct binder_transaction *last_t = NULL;
5337 
5338 	binder_inner_proc_lock(thread->proc);
5339 	/*
5340 	 * take a ref on the proc so it survives
5341 	 * after we remove this thread from proc->threads.
5342 	 * The corresponding dec is when we actually
5343 	 * free the thread in binder_free_thread()
5344 	 */
5345 	proc->tmp_ref++;
5346 	/*
5347 	 * take a ref on this thread to ensure it
5348 	 * survives while we are releasing it
5349 	 */
5350 	atomic_inc(&thread->tmp_ref);
5351 	rb_erase(&thread->rb_node, &proc->threads);
5352 	t = thread->transaction_stack;
5353 	if (t) {
5354 		spin_lock(&t->lock);
5355 		if (t->to_thread == thread)
5356 			send_reply = t;
5357 	} else {
5358 		__acquire(&t->lock);
5359 	}
5360 	thread->is_dead = true;
5361 
5362 	while (t) {
5363 		last_t = t;
5364 		active_transactions++;
5365 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5366 			     "release %d:%d transaction %d %s, still active\n",
5367 			      proc->pid, thread->pid,
5368 			     t->debug_id,
5369 			     (t->to_thread == thread) ? "in" : "out");
5370 
5371 		if (t->to_thread == thread) {
5372 			thread->proc->outstanding_txns--;
5373 			t->to_proc = NULL;
5374 			t->to_thread = NULL;
5375 			if (t->buffer) {
5376 				t->buffer->transaction = NULL;
5377 				t->buffer = NULL;
5378 			}
5379 			t = t->to_parent;
5380 		} else if (t->from == thread) {
5381 			t->from = NULL;
5382 			t = t->from_parent;
5383 		} else
5384 			BUG();
5385 		spin_unlock(&last_t->lock);
5386 		if (t)
5387 			spin_lock(&t->lock);
5388 		else
5389 			__acquire(&t->lock);
5390 	}
5391 	/* annotation for sparse, lock not acquired in last iteration above */
5392 	__release(&t->lock);
5393 
5394 	/*
5395 	 * If this thread used poll, make sure we remove the waitqueue from any
5396 	 * poll data structures holding it.
5397 	 */
5398 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5399 		wake_up_pollfree(&thread->wait);
5400 
5401 	binder_inner_proc_unlock(thread->proc);
5402 
5403 	/*
5404 	 * This is needed to avoid races between wake_up_pollfree() above and
5405 	 * someone else removing the last entry from the queue for other reasons
5406 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5407 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5408 	 * we can be sure they're done after we call synchronize_rcu().
5409 	 */
5410 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5411 		synchronize_rcu();
5412 
5413 	if (send_reply)
5414 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5415 	binder_release_work(proc, &thread->todo);
5416 	binder_thread_dec_tmpref(thread);
5417 	return active_transactions;
5418 }
5419 
5420 static __poll_t binder_poll(struct file *filp,
5421 				struct poll_table_struct *wait)
5422 {
5423 	struct binder_proc *proc = filp->private_data;
5424 	struct binder_thread *thread = NULL;
5425 	bool wait_for_proc_work;
5426 
5427 	thread = binder_get_thread(proc);
5428 	if (!thread)
5429 		return EPOLLERR;
5430 
5431 	binder_inner_proc_lock(thread->proc);
5432 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5433 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5434 
5435 	binder_inner_proc_unlock(thread->proc);
5436 
5437 	poll_wait(filp, &thread->wait, wait);
5438 
5439 	if (binder_has_work(thread, wait_for_proc_work))
5440 		return EPOLLIN;
5441 
5442 	return 0;
5443 }
5444 
5445 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5446 				struct binder_thread *thread)
5447 {
5448 	int ret = 0;
5449 	struct binder_proc *proc = filp->private_data;
5450 	void __user *ubuf = (void __user *)arg;
5451 	struct binder_write_read bwr;
5452 
5453 	if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5454 		return -EFAULT;
5455 
5456 	binder_debug(BINDER_DEBUG_READ_WRITE,
5457 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5458 		     proc->pid, thread->pid,
5459 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5460 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5461 
5462 	if (bwr.write_size > 0) {
5463 		ret = binder_thread_write(proc, thread,
5464 					  bwr.write_buffer,
5465 					  bwr.write_size,
5466 					  &bwr.write_consumed);
5467 		trace_binder_write_done(ret);
5468 		if (ret < 0) {
5469 			bwr.read_consumed = 0;
5470 			goto out;
5471 		}
5472 	}
5473 	if (bwr.read_size > 0) {
5474 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5475 					 bwr.read_size,
5476 					 &bwr.read_consumed,
5477 					 filp->f_flags & O_NONBLOCK);
5478 		trace_binder_read_done(ret);
5479 		binder_inner_proc_lock(proc);
5480 		if (!binder_worklist_empty_ilocked(&proc->todo))
5481 			binder_wakeup_proc_ilocked(proc);
5482 		binder_inner_proc_unlock(proc);
5483 		if (ret < 0)
5484 			goto out;
5485 	}
5486 	binder_debug(BINDER_DEBUG_READ_WRITE,
5487 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5488 		     proc->pid, thread->pid,
5489 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5490 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5491 out:
5492 	if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5493 		ret = -EFAULT;
5494 	return ret;
5495 }
5496 
5497 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5498 				    struct flat_binder_object *fbo)
5499 {
5500 	int ret = 0;
5501 	struct binder_proc *proc = filp->private_data;
5502 	struct binder_context *context = proc->context;
5503 	struct binder_node *new_node;
5504 	kuid_t curr_euid = current_euid();
5505 
5506 	guard(mutex)(&context->context_mgr_node_lock);
5507 	if (context->binder_context_mgr_node) {
5508 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5509 		return -EBUSY;
5510 	}
5511 	ret = security_binder_set_context_mgr(proc->cred);
5512 	if (ret < 0)
5513 		return ret;
5514 	if (uid_valid(context->binder_context_mgr_uid)) {
5515 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5516 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5517 			       from_kuid(&init_user_ns, curr_euid),
5518 			       from_kuid(&init_user_ns,
5519 					 context->binder_context_mgr_uid));
5520 			return -EPERM;
5521 		}
5522 	} else {
5523 		context->binder_context_mgr_uid = curr_euid;
5524 	}
5525 	new_node = binder_new_node(proc, fbo);
5526 	if (!new_node)
5527 		return -ENOMEM;
5528 	binder_node_lock(new_node);
5529 	new_node->local_weak_refs++;
5530 	new_node->local_strong_refs++;
5531 	new_node->has_strong_ref = 1;
5532 	new_node->has_weak_ref = 1;
5533 	context->binder_context_mgr_node = new_node;
5534 	binder_node_unlock(new_node);
5535 	binder_put_node(new_node);
5536 	return ret;
5537 }
5538 
5539 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5540 		struct binder_node_info_for_ref *info)
5541 {
5542 	struct binder_node *node;
5543 	struct binder_context *context = proc->context;
5544 	__u32 handle = info->handle;
5545 
5546 	if (info->strong_count || info->weak_count || info->reserved1 ||
5547 	    info->reserved2 || info->reserved3) {
5548 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5549 				  proc->pid);
5550 		return -EINVAL;
5551 	}
5552 
5553 	/* This ioctl may only be used by the context manager */
5554 	mutex_lock(&context->context_mgr_node_lock);
5555 	if (!context->binder_context_mgr_node ||
5556 		context->binder_context_mgr_node->proc != proc) {
5557 		mutex_unlock(&context->context_mgr_node_lock);
5558 		return -EPERM;
5559 	}
5560 	mutex_unlock(&context->context_mgr_node_lock);
5561 
5562 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5563 	if (!node)
5564 		return -EINVAL;
5565 
5566 	info->strong_count = node->local_strong_refs +
5567 		node->internal_strong_refs;
5568 	info->weak_count = node->local_weak_refs;
5569 
5570 	binder_put_node(node);
5571 
5572 	return 0;
5573 }
5574 
5575 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5576 				struct binder_node_debug_info *info)
5577 {
5578 	struct rb_node *n;
5579 	binder_uintptr_t ptr = info->ptr;
5580 
5581 	memset(info, 0, sizeof(*info));
5582 
5583 	binder_inner_proc_lock(proc);
5584 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5585 		struct binder_node *node = rb_entry(n, struct binder_node,
5586 						    rb_node);
5587 		if (node->ptr > ptr) {
5588 			info->ptr = node->ptr;
5589 			info->cookie = node->cookie;
5590 			info->has_strong_ref = node->has_strong_ref;
5591 			info->has_weak_ref = node->has_weak_ref;
5592 			break;
5593 		}
5594 	}
5595 	binder_inner_proc_unlock(proc);
5596 
5597 	return 0;
5598 }
5599 
5600 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5601 {
5602 	struct rb_node *n;
5603 	struct binder_thread *thread;
5604 
5605 	if (proc->outstanding_txns > 0)
5606 		return true;
5607 
5608 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5609 		thread = rb_entry(n, struct binder_thread, rb_node);
5610 		if (thread->transaction_stack)
5611 			return true;
5612 	}
5613 	return false;
5614 }
5615 
5616 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5617 {
5618 	struct binder_node *prev = NULL;
5619 	struct rb_node *n;
5620 	struct binder_ref *ref;
5621 
5622 	binder_inner_proc_lock(proc);
5623 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5624 		struct binder_node *node;
5625 
5626 		node = rb_entry(n, struct binder_node, rb_node);
5627 		binder_inc_node_tmpref_ilocked(node);
5628 		binder_inner_proc_unlock(proc);
5629 		if (prev)
5630 			binder_put_node(prev);
5631 		binder_node_lock(node);
5632 		hlist_for_each_entry(ref, &node->refs, node_entry) {
5633 			/*
5634 			 * Need the node lock to synchronize
5635 			 * with new notification requests and the
5636 			 * inner lock to synchronize with queued
5637 			 * freeze notifications.
5638 			 */
5639 			binder_inner_proc_lock(ref->proc);
5640 			if (!ref->freeze) {
5641 				binder_inner_proc_unlock(ref->proc);
5642 				continue;
5643 			}
5644 			ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5645 			if (list_empty(&ref->freeze->work.entry)) {
5646 				ref->freeze->is_frozen = is_frozen;
5647 				binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5648 				binder_wakeup_proc_ilocked(ref->proc);
5649 			} else {
5650 				if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5651 					ref->freeze->resend = true;
5652 				ref->freeze->is_frozen = is_frozen;
5653 			}
5654 			binder_inner_proc_unlock(ref->proc);
5655 		}
5656 		prev = node;
5657 		binder_node_unlock(node);
5658 		binder_inner_proc_lock(proc);
5659 		if (proc->is_dead)
5660 			break;
5661 	}
5662 	binder_inner_proc_unlock(proc);
5663 	if (prev)
5664 		binder_put_node(prev);
5665 }
5666 
5667 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5668 			       struct binder_proc *target_proc)
5669 {
5670 	int ret = 0;
5671 
5672 	if (!info->enable) {
5673 		binder_inner_proc_lock(target_proc);
5674 		target_proc->sync_recv = false;
5675 		target_proc->async_recv = false;
5676 		target_proc->is_frozen = false;
5677 		binder_inner_proc_unlock(target_proc);
5678 		binder_add_freeze_work(target_proc, false);
5679 		return 0;
5680 	}
5681 
5682 	/*
5683 	 * Freezing the target. Prevent new transactions by
5684 	 * setting frozen state. If timeout specified, wait
5685 	 * for transactions to drain.
5686 	 */
5687 	binder_inner_proc_lock(target_proc);
5688 	target_proc->sync_recv = false;
5689 	target_proc->async_recv = false;
5690 	target_proc->is_frozen = true;
5691 	binder_inner_proc_unlock(target_proc);
5692 
5693 	if (info->timeout_ms > 0)
5694 		ret = wait_event_interruptible_timeout(
5695 			target_proc->freeze_wait,
5696 			(!target_proc->outstanding_txns),
5697 			msecs_to_jiffies(info->timeout_ms));
5698 
5699 	/* Check pending transactions that wait for reply */
5700 	if (ret >= 0) {
5701 		binder_inner_proc_lock(target_proc);
5702 		if (binder_txns_pending_ilocked(target_proc))
5703 			ret = -EAGAIN;
5704 		binder_inner_proc_unlock(target_proc);
5705 	}
5706 
5707 	if (ret < 0) {
5708 		binder_inner_proc_lock(target_proc);
5709 		target_proc->is_frozen = false;
5710 		binder_inner_proc_unlock(target_proc);
5711 	} else {
5712 		binder_add_freeze_work(target_proc, true);
5713 	}
5714 
5715 	return ret;
5716 }
5717 
5718 static int binder_ioctl_get_freezer_info(
5719 				struct binder_frozen_status_info *info)
5720 {
5721 	struct binder_proc *target_proc;
5722 	bool found = false;
5723 	__u32 txns_pending;
5724 
5725 	info->sync_recv = 0;
5726 	info->async_recv = 0;
5727 
5728 	mutex_lock(&binder_procs_lock);
5729 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5730 		if (target_proc->pid == info->pid) {
5731 			found = true;
5732 			binder_inner_proc_lock(target_proc);
5733 			txns_pending = binder_txns_pending_ilocked(target_proc);
5734 			info->sync_recv |= target_proc->sync_recv |
5735 					(txns_pending << 1);
5736 			info->async_recv |= target_proc->async_recv;
5737 			binder_inner_proc_unlock(target_proc);
5738 		}
5739 	}
5740 	mutex_unlock(&binder_procs_lock);
5741 
5742 	if (!found)
5743 		return -EINVAL;
5744 
5745 	return 0;
5746 }
5747 
5748 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5749 					   void __user *ubuf)
5750 {
5751 	struct binder_extended_error ee;
5752 
5753 	binder_inner_proc_lock(thread->proc);
5754 	ee = thread->ee;
5755 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5756 	binder_inner_proc_unlock(thread->proc);
5757 
5758 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5759 		return -EFAULT;
5760 
5761 	return 0;
5762 }
5763 
5764 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5765 {
5766 	int ret;
5767 	struct binder_proc *proc = filp->private_data;
5768 	struct binder_thread *thread;
5769 	void __user *ubuf = (void __user *)arg;
5770 
5771 	trace_binder_ioctl(cmd, arg);
5772 
5773 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5774 	if (ret)
5775 		goto err_unlocked;
5776 
5777 	thread = binder_get_thread(proc);
5778 	if (thread == NULL) {
5779 		ret = -ENOMEM;
5780 		goto err;
5781 	}
5782 
5783 	switch (cmd) {
5784 	case BINDER_WRITE_READ:
5785 		ret = binder_ioctl_write_read(filp, arg, thread);
5786 		if (ret)
5787 			goto err;
5788 		break;
5789 	case BINDER_SET_MAX_THREADS: {
5790 		u32 max_threads;
5791 
5792 		if (copy_from_user(&max_threads, ubuf,
5793 				   sizeof(max_threads))) {
5794 			ret = -EINVAL;
5795 			goto err;
5796 		}
5797 		binder_inner_proc_lock(proc);
5798 		proc->max_threads = max_threads;
5799 		binder_inner_proc_unlock(proc);
5800 		break;
5801 	}
5802 	case BINDER_SET_CONTEXT_MGR_EXT: {
5803 		struct flat_binder_object fbo;
5804 
5805 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5806 			ret = -EINVAL;
5807 			goto err;
5808 		}
5809 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5810 		if (ret)
5811 			goto err;
5812 		break;
5813 	}
5814 	case BINDER_SET_CONTEXT_MGR:
5815 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5816 		if (ret)
5817 			goto err;
5818 		break;
5819 	case BINDER_THREAD_EXIT:
5820 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5821 			     proc->pid, thread->pid);
5822 		binder_thread_release(proc, thread);
5823 		thread = NULL;
5824 		break;
5825 	case BINDER_VERSION: {
5826 		struct binder_version __user *ver = ubuf;
5827 
5828 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5829 			     &ver->protocol_version)) {
5830 			ret = -EINVAL;
5831 			goto err;
5832 		}
5833 		break;
5834 	}
5835 	case BINDER_GET_NODE_INFO_FOR_REF: {
5836 		struct binder_node_info_for_ref info;
5837 
5838 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5839 			ret = -EFAULT;
5840 			goto err;
5841 		}
5842 
5843 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5844 		if (ret < 0)
5845 			goto err;
5846 
5847 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5848 			ret = -EFAULT;
5849 			goto err;
5850 		}
5851 
5852 		break;
5853 	}
5854 	case BINDER_GET_NODE_DEBUG_INFO: {
5855 		struct binder_node_debug_info info;
5856 
5857 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5858 			ret = -EFAULT;
5859 			goto err;
5860 		}
5861 
5862 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5863 		if (ret < 0)
5864 			goto err;
5865 
5866 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5867 			ret = -EFAULT;
5868 			goto err;
5869 		}
5870 		break;
5871 	}
5872 	case BINDER_FREEZE: {
5873 		struct binder_freeze_info info;
5874 		struct binder_proc **target_procs = NULL, *target_proc;
5875 		int target_procs_count = 0, i = 0;
5876 
5877 		ret = 0;
5878 
5879 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5880 			ret = -EFAULT;
5881 			goto err;
5882 		}
5883 
5884 		mutex_lock(&binder_procs_lock);
5885 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5886 			if (target_proc->pid == info.pid)
5887 				target_procs_count++;
5888 		}
5889 
5890 		if (target_procs_count == 0) {
5891 			mutex_unlock(&binder_procs_lock);
5892 			ret = -EINVAL;
5893 			goto err;
5894 		}
5895 
5896 		target_procs = kcalloc(target_procs_count,
5897 				       sizeof(struct binder_proc *),
5898 				       GFP_KERNEL);
5899 
5900 		if (!target_procs) {
5901 			mutex_unlock(&binder_procs_lock);
5902 			ret = -ENOMEM;
5903 			goto err;
5904 		}
5905 
5906 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5907 			if (target_proc->pid != info.pid)
5908 				continue;
5909 
5910 			binder_inner_proc_lock(target_proc);
5911 			target_proc->tmp_ref++;
5912 			binder_inner_proc_unlock(target_proc);
5913 
5914 			target_procs[i++] = target_proc;
5915 		}
5916 		mutex_unlock(&binder_procs_lock);
5917 
5918 		for (i = 0; i < target_procs_count; i++) {
5919 			if (ret >= 0)
5920 				ret = binder_ioctl_freeze(&info,
5921 							  target_procs[i]);
5922 
5923 			binder_proc_dec_tmpref(target_procs[i]);
5924 		}
5925 
5926 		kfree(target_procs);
5927 
5928 		if (ret < 0)
5929 			goto err;
5930 		break;
5931 	}
5932 	case BINDER_GET_FROZEN_INFO: {
5933 		struct binder_frozen_status_info info;
5934 
5935 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5936 			ret = -EFAULT;
5937 			goto err;
5938 		}
5939 
5940 		ret = binder_ioctl_get_freezer_info(&info);
5941 		if (ret < 0)
5942 			goto err;
5943 
5944 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5945 			ret = -EFAULT;
5946 			goto err;
5947 		}
5948 		break;
5949 	}
5950 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5951 		uint32_t enable;
5952 
5953 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5954 			ret = -EFAULT;
5955 			goto err;
5956 		}
5957 		binder_inner_proc_lock(proc);
5958 		proc->oneway_spam_detection_enabled = (bool)enable;
5959 		binder_inner_proc_unlock(proc);
5960 		break;
5961 	}
5962 	case BINDER_GET_EXTENDED_ERROR:
5963 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5964 		if (ret < 0)
5965 			goto err;
5966 		break;
5967 	default:
5968 		ret = -EINVAL;
5969 		goto err;
5970 	}
5971 	ret = 0;
5972 err:
5973 	if (thread)
5974 		thread->looper_need_return = false;
5975 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5976 	if (ret && ret != -EINTR)
5977 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5978 err_unlocked:
5979 	trace_binder_ioctl_done(ret);
5980 	return ret;
5981 }
5982 
5983 static void binder_vma_open(struct vm_area_struct *vma)
5984 {
5985 	struct binder_proc *proc = vma->vm_private_data;
5986 
5987 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5988 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5989 		     proc->pid, vma->vm_start, vma->vm_end,
5990 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5991 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5992 }
5993 
5994 static void binder_vma_close(struct vm_area_struct *vma)
5995 {
5996 	struct binder_proc *proc = vma->vm_private_data;
5997 
5998 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5999 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
6000 		     proc->pid, vma->vm_start, vma->vm_end,
6001 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6002 		     (unsigned long)pgprot_val(vma->vm_page_prot));
6003 	binder_alloc_vma_close(&proc->alloc);
6004 }
6005 
6006 VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
6007 {
6008 	return VM_FAULT_SIGBUS;
6009 }
6010 EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
6011 
6012 static const struct vm_operations_struct binder_vm_ops = {
6013 	.open = binder_vma_open,
6014 	.close = binder_vma_close,
6015 	.fault = binder_vm_fault,
6016 };
6017 
6018 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
6019 {
6020 	struct binder_proc *proc = filp->private_data;
6021 
6022 	if (proc->tsk != current->group_leader)
6023 		return -EINVAL;
6024 
6025 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6026 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
6027 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
6028 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6029 		     (unsigned long)pgprot_val(vma->vm_page_prot));
6030 
6031 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
6032 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
6033 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
6034 		return -EPERM;
6035 	}
6036 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
6037 
6038 	vma->vm_ops = &binder_vm_ops;
6039 	vma->vm_private_data = proc;
6040 
6041 	return binder_alloc_mmap_handler(&proc->alloc, vma);
6042 }
6043 
6044 static int binder_open(struct inode *nodp, struct file *filp)
6045 {
6046 	struct binder_proc *proc, *itr;
6047 	struct binder_device *binder_dev;
6048 	struct binderfs_info *info;
6049 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
6050 	bool existing_pid = false;
6051 
6052 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6053 		     current->group_leader->pid, current->pid);
6054 
6055 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6056 	if (proc == NULL)
6057 		return -ENOMEM;
6058 
6059 	dbitmap_init(&proc->dmap);
6060 	spin_lock_init(&proc->inner_lock);
6061 	spin_lock_init(&proc->outer_lock);
6062 	get_task_struct(current->group_leader);
6063 	proc->tsk = current->group_leader;
6064 	proc->cred = get_cred(filp->f_cred);
6065 	INIT_LIST_HEAD(&proc->todo);
6066 	init_waitqueue_head(&proc->freeze_wait);
6067 	proc->default_priority = task_nice(current);
6068 	/* binderfs stashes devices in i_private */
6069 	if (is_binderfs_device(nodp)) {
6070 		binder_dev = nodp->i_private;
6071 		info = nodp->i_sb->s_fs_info;
6072 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
6073 	} else {
6074 		binder_dev = container_of(filp->private_data,
6075 					  struct binder_device, miscdev);
6076 	}
6077 	refcount_inc(&binder_dev->ref);
6078 	proc->context = &binder_dev->context;
6079 	binder_alloc_init(&proc->alloc);
6080 
6081 	binder_stats_created(BINDER_STAT_PROC);
6082 	proc->pid = current->group_leader->pid;
6083 	INIT_LIST_HEAD(&proc->delivered_death);
6084 	INIT_LIST_HEAD(&proc->delivered_freeze);
6085 	INIT_LIST_HEAD(&proc->waiting_threads);
6086 	filp->private_data = proc;
6087 
6088 	mutex_lock(&binder_procs_lock);
6089 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6090 		if (itr->pid == proc->pid) {
6091 			existing_pid = true;
6092 			break;
6093 		}
6094 	}
6095 	hlist_add_head(&proc->proc_node, &binder_procs);
6096 	mutex_unlock(&binder_procs_lock);
6097 
6098 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
6099 		char strbuf[11];
6100 
6101 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6102 		/*
6103 		 * proc debug entries are shared between contexts.
6104 		 * Only create for the first PID to avoid debugfs log spamming
6105 		 * The printing code will anyway print all contexts for a given
6106 		 * PID so this is not a problem.
6107 		 */
6108 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6109 			binder_debugfs_dir_entry_proc,
6110 			(void *)(unsigned long)proc->pid,
6111 			&proc_fops);
6112 	}
6113 
6114 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
6115 		char strbuf[11];
6116 		struct dentry *binderfs_entry;
6117 
6118 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6119 		/*
6120 		 * Similar to debugfs, the process specific log file is shared
6121 		 * between contexts. Only create for the first PID.
6122 		 * This is ok since same as debugfs, the log file will contain
6123 		 * information on all contexts of a given PID.
6124 		 */
6125 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6126 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6127 		if (!IS_ERR(binderfs_entry)) {
6128 			proc->binderfs_entry = binderfs_entry;
6129 		} else {
6130 			int error;
6131 
6132 			error = PTR_ERR(binderfs_entry);
6133 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
6134 				strbuf, error);
6135 		}
6136 	}
6137 
6138 	return 0;
6139 }
6140 
6141 static int binder_flush(struct file *filp, fl_owner_t id)
6142 {
6143 	struct binder_proc *proc = filp->private_data;
6144 
6145 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6146 
6147 	return 0;
6148 }
6149 
6150 static void binder_deferred_flush(struct binder_proc *proc)
6151 {
6152 	struct rb_node *n;
6153 	int wake_count = 0;
6154 
6155 	binder_inner_proc_lock(proc);
6156 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6157 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6158 
6159 		thread->looper_need_return = true;
6160 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6161 			wake_up_interruptible(&thread->wait);
6162 			wake_count++;
6163 		}
6164 	}
6165 	binder_inner_proc_unlock(proc);
6166 
6167 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6168 		     "binder_flush: %d woke %d threads\n", proc->pid,
6169 		     wake_count);
6170 }
6171 
6172 static int binder_release(struct inode *nodp, struct file *filp)
6173 {
6174 	struct binder_proc *proc = filp->private_data;
6175 
6176 	debugfs_remove(proc->debugfs_entry);
6177 
6178 	if (proc->binderfs_entry) {
6179 		simple_recursive_removal(proc->binderfs_entry, NULL);
6180 		proc->binderfs_entry = NULL;
6181 	}
6182 
6183 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6184 
6185 	return 0;
6186 }
6187 
6188 static int binder_node_release(struct binder_node *node, int refs)
6189 {
6190 	struct binder_ref *ref;
6191 	int death = 0;
6192 	struct binder_proc *proc = node->proc;
6193 
6194 	binder_release_work(proc, &node->async_todo);
6195 
6196 	binder_node_lock(node);
6197 	binder_inner_proc_lock(proc);
6198 	binder_dequeue_work_ilocked(&node->work);
6199 	/*
6200 	 * The caller must have taken a temporary ref on the node,
6201 	 */
6202 	BUG_ON(!node->tmp_refs);
6203 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6204 		binder_inner_proc_unlock(proc);
6205 		binder_node_unlock(node);
6206 		binder_free_node(node);
6207 
6208 		return refs;
6209 	}
6210 
6211 	node->proc = NULL;
6212 	node->local_strong_refs = 0;
6213 	node->local_weak_refs = 0;
6214 	binder_inner_proc_unlock(proc);
6215 
6216 	spin_lock(&binder_dead_nodes_lock);
6217 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
6218 	spin_unlock(&binder_dead_nodes_lock);
6219 
6220 	hlist_for_each_entry(ref, &node->refs, node_entry) {
6221 		refs++;
6222 		/*
6223 		 * Need the node lock to synchronize
6224 		 * with new notification requests and the
6225 		 * inner lock to synchronize with queued
6226 		 * death notifications.
6227 		 */
6228 		binder_inner_proc_lock(ref->proc);
6229 		if (!ref->death) {
6230 			binder_inner_proc_unlock(ref->proc);
6231 			continue;
6232 		}
6233 
6234 		death++;
6235 
6236 		BUG_ON(!list_empty(&ref->death->work.entry));
6237 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6238 		binder_enqueue_work_ilocked(&ref->death->work,
6239 					    &ref->proc->todo);
6240 		binder_wakeup_proc_ilocked(ref->proc);
6241 		binder_inner_proc_unlock(ref->proc);
6242 	}
6243 
6244 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
6245 		     "node %d now dead, refs %d, death %d\n",
6246 		     node->debug_id, refs, death);
6247 	binder_node_unlock(node);
6248 	binder_put_node(node);
6249 
6250 	return refs;
6251 }
6252 
6253 static void binder_deferred_release(struct binder_proc *proc)
6254 {
6255 	struct binder_context *context = proc->context;
6256 	struct rb_node *n;
6257 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6258 
6259 	mutex_lock(&binder_procs_lock);
6260 	hlist_del(&proc->proc_node);
6261 	mutex_unlock(&binder_procs_lock);
6262 
6263 	mutex_lock(&context->context_mgr_node_lock);
6264 	if (context->binder_context_mgr_node &&
6265 	    context->binder_context_mgr_node->proc == proc) {
6266 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
6267 			     "%s: %d context_mgr_node gone\n",
6268 			     __func__, proc->pid);
6269 		context->binder_context_mgr_node = NULL;
6270 	}
6271 	mutex_unlock(&context->context_mgr_node_lock);
6272 	binder_inner_proc_lock(proc);
6273 	/*
6274 	 * Make sure proc stays alive after we
6275 	 * remove all the threads
6276 	 */
6277 	proc->tmp_ref++;
6278 
6279 	proc->is_dead = true;
6280 	proc->is_frozen = false;
6281 	proc->sync_recv = false;
6282 	proc->async_recv = false;
6283 	threads = 0;
6284 	active_transactions = 0;
6285 	while ((n = rb_first(&proc->threads))) {
6286 		struct binder_thread *thread;
6287 
6288 		thread = rb_entry(n, struct binder_thread, rb_node);
6289 		binder_inner_proc_unlock(proc);
6290 		threads++;
6291 		active_transactions += binder_thread_release(proc, thread);
6292 		binder_inner_proc_lock(proc);
6293 	}
6294 
6295 	nodes = 0;
6296 	incoming_refs = 0;
6297 	while ((n = rb_first(&proc->nodes))) {
6298 		struct binder_node *node;
6299 
6300 		node = rb_entry(n, struct binder_node, rb_node);
6301 		nodes++;
6302 		/*
6303 		 * take a temporary ref on the node before
6304 		 * calling binder_node_release() which will either
6305 		 * kfree() the node or call binder_put_node()
6306 		 */
6307 		binder_inc_node_tmpref_ilocked(node);
6308 		rb_erase(&node->rb_node, &proc->nodes);
6309 		binder_inner_proc_unlock(proc);
6310 		incoming_refs = binder_node_release(node, incoming_refs);
6311 		binder_inner_proc_lock(proc);
6312 	}
6313 	binder_inner_proc_unlock(proc);
6314 
6315 	outgoing_refs = 0;
6316 	binder_proc_lock(proc);
6317 	while ((n = rb_first(&proc->refs_by_desc))) {
6318 		struct binder_ref *ref;
6319 
6320 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
6321 		outgoing_refs++;
6322 		binder_cleanup_ref_olocked(ref);
6323 		binder_proc_unlock(proc);
6324 		binder_free_ref(ref);
6325 		binder_proc_lock(proc);
6326 	}
6327 	binder_proc_unlock(proc);
6328 
6329 	binder_release_work(proc, &proc->todo);
6330 	binder_release_work(proc, &proc->delivered_death);
6331 	binder_release_work(proc, &proc->delivered_freeze);
6332 
6333 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6334 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6335 		     __func__, proc->pid, threads, nodes, incoming_refs,
6336 		     outgoing_refs, active_transactions);
6337 
6338 	binder_proc_dec_tmpref(proc);
6339 }
6340 
6341 static void binder_deferred_func(struct work_struct *work)
6342 {
6343 	struct binder_proc *proc;
6344 
6345 	int defer;
6346 
6347 	do {
6348 		mutex_lock(&binder_deferred_lock);
6349 		if (!hlist_empty(&binder_deferred_list)) {
6350 			proc = hlist_entry(binder_deferred_list.first,
6351 					struct binder_proc, deferred_work_node);
6352 			hlist_del_init(&proc->deferred_work_node);
6353 			defer = proc->deferred_work;
6354 			proc->deferred_work = 0;
6355 		} else {
6356 			proc = NULL;
6357 			defer = 0;
6358 		}
6359 		mutex_unlock(&binder_deferred_lock);
6360 
6361 		if (defer & BINDER_DEFERRED_FLUSH)
6362 			binder_deferred_flush(proc);
6363 
6364 		if (defer & BINDER_DEFERRED_RELEASE)
6365 			binder_deferred_release(proc); /* frees proc */
6366 	} while (proc);
6367 }
6368 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6369 
6370 static void
6371 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6372 {
6373 	guard(mutex)(&binder_deferred_lock);
6374 	proc->deferred_work |= defer;
6375 	if (hlist_unhashed(&proc->deferred_work_node)) {
6376 		hlist_add_head(&proc->deferred_work_node,
6377 				&binder_deferred_list);
6378 		schedule_work(&binder_deferred_work);
6379 	}
6380 }
6381 
6382 static void print_binder_transaction_ilocked(struct seq_file *m,
6383 					     struct binder_proc *proc,
6384 					     const char *prefix,
6385 					     struct binder_transaction *t)
6386 {
6387 	struct binder_proc *to_proc;
6388 	struct binder_buffer *buffer = t->buffer;
6389 	ktime_t current_time = ktime_get();
6390 
6391 	spin_lock(&t->lock);
6392 	to_proc = t->to_proc;
6393 	seq_printf(m,
6394 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
6395 		   prefix, t->debug_id, t,
6396 		   t->from_pid,
6397 		   t->from_tid,
6398 		   to_proc ? to_proc->pid : 0,
6399 		   t->to_thread ? t->to_thread->pid : 0,
6400 		   t->code, t->flags, t->priority, t->is_async, t->is_reply,
6401 		   ktime_ms_delta(current_time, t->start_time));
6402 	spin_unlock(&t->lock);
6403 
6404 	if (proc != to_proc) {
6405 		/*
6406 		 * Can only safely deref buffer if we are holding the
6407 		 * correct proc inner lock for this node
6408 		 */
6409 		seq_puts(m, "\n");
6410 		return;
6411 	}
6412 
6413 	if (buffer == NULL) {
6414 		seq_puts(m, " buffer free\n");
6415 		return;
6416 	}
6417 	if (buffer->target_node)
6418 		seq_printf(m, " node %d", buffer->target_node->debug_id);
6419 	seq_printf(m, " size %zd:%zd offset %lx\n",
6420 		   buffer->data_size, buffer->offsets_size,
6421 		   buffer->user_data - proc->alloc.vm_start);
6422 }
6423 
6424 static void print_binder_work_ilocked(struct seq_file *m,
6425 				      struct binder_proc *proc,
6426 				      const char *prefix,
6427 				      const char *transaction_prefix,
6428 				      struct binder_work *w, bool hash_ptrs)
6429 {
6430 	struct binder_node *node;
6431 	struct binder_transaction *t;
6432 
6433 	switch (w->type) {
6434 	case BINDER_WORK_TRANSACTION:
6435 		t = container_of(w, struct binder_transaction, work);
6436 		print_binder_transaction_ilocked(
6437 				m, proc, transaction_prefix, t);
6438 		break;
6439 	case BINDER_WORK_RETURN_ERROR: {
6440 		struct binder_error *e = container_of(
6441 				w, struct binder_error, work);
6442 
6443 		seq_printf(m, "%stransaction error: %u\n",
6444 			   prefix, e->cmd);
6445 	} break;
6446 	case BINDER_WORK_TRANSACTION_COMPLETE:
6447 		seq_printf(m, "%stransaction complete\n", prefix);
6448 		break;
6449 	case BINDER_WORK_NODE:
6450 		node = container_of(w, struct binder_node, work);
6451 		if (hash_ptrs)
6452 			seq_printf(m, "%snode work %d: u%p c%p\n",
6453 				   prefix, node->debug_id,
6454 				   (void *)(long)node->ptr,
6455 				   (void *)(long)node->cookie);
6456 		else
6457 			seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6458 				   prefix, node->debug_id,
6459 				   (u64)node->ptr, (u64)node->cookie);
6460 		break;
6461 	case BINDER_WORK_DEAD_BINDER:
6462 		seq_printf(m, "%shas dead binder\n", prefix);
6463 		break;
6464 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6465 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6466 		break;
6467 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6468 		seq_printf(m, "%shas cleared death notification\n", prefix);
6469 		break;
6470 	case BINDER_WORK_FROZEN_BINDER:
6471 		seq_printf(m, "%shas frozen binder\n", prefix);
6472 		break;
6473 	case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6474 		seq_printf(m, "%shas cleared freeze notification\n", prefix);
6475 		break;
6476 	default:
6477 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6478 		break;
6479 	}
6480 }
6481 
6482 static void print_binder_thread_ilocked(struct seq_file *m,
6483 					struct binder_thread *thread,
6484 					bool print_always, bool hash_ptrs)
6485 {
6486 	struct binder_transaction *t;
6487 	struct binder_work *w;
6488 	size_t start_pos = m->count;
6489 	size_t header_pos;
6490 
6491 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6492 			thread->pid, thread->looper,
6493 			thread->looper_need_return,
6494 			atomic_read(&thread->tmp_ref));
6495 	header_pos = m->count;
6496 	t = thread->transaction_stack;
6497 	while (t) {
6498 		if (t->from == thread) {
6499 			print_binder_transaction_ilocked(m, thread->proc,
6500 					"    outgoing transaction", t);
6501 			t = t->from_parent;
6502 		} else if (t->to_thread == thread) {
6503 			print_binder_transaction_ilocked(m, thread->proc,
6504 						 "    incoming transaction", t);
6505 			t = t->to_parent;
6506 		} else {
6507 			print_binder_transaction_ilocked(m, thread->proc,
6508 					"    bad transaction", t);
6509 			t = NULL;
6510 		}
6511 	}
6512 	list_for_each_entry(w, &thread->todo, entry) {
6513 		print_binder_work_ilocked(m, thread->proc, "    ",
6514 					  "    pending transaction",
6515 					  w, hash_ptrs);
6516 	}
6517 	if (!print_always && m->count == header_pos)
6518 		m->count = start_pos;
6519 }
6520 
6521 static void print_binder_node_nilocked(struct seq_file *m,
6522 				       struct binder_node *node,
6523 				       bool hash_ptrs)
6524 {
6525 	struct binder_ref *ref;
6526 	struct binder_work *w;
6527 	int count;
6528 
6529 	count = hlist_count_nodes(&node->refs);
6530 
6531 	if (hash_ptrs)
6532 		seq_printf(m, "  node %d: u%p c%p", node->debug_id,
6533 			   (void *)(long)node->ptr, (void *)(long)node->cookie);
6534 	else
6535 		seq_printf(m, "  node %d: u%016llx c%016llx", node->debug_id,
6536 			   (u64)node->ptr, (u64)node->cookie);
6537 	seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6538 		   node->has_strong_ref, node->has_weak_ref,
6539 		   node->local_strong_refs, node->local_weak_refs,
6540 		   node->internal_strong_refs, count, node->tmp_refs);
6541 	if (count) {
6542 		seq_puts(m, " proc");
6543 		hlist_for_each_entry(ref, &node->refs, node_entry)
6544 			seq_printf(m, " %d", ref->proc->pid);
6545 	}
6546 	seq_puts(m, "\n");
6547 	if (node->proc) {
6548 		list_for_each_entry(w, &node->async_todo, entry)
6549 			print_binder_work_ilocked(m, node->proc, "    ",
6550 					  "    pending async transaction",
6551 					  w, hash_ptrs);
6552 	}
6553 }
6554 
6555 static void print_binder_ref_olocked(struct seq_file *m,
6556 				     struct binder_ref *ref)
6557 {
6558 	binder_node_lock(ref->node);
6559 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6560 		   ref->data.debug_id, ref->data.desc,
6561 		   ref->node->proc ? "" : "dead ",
6562 		   ref->node->debug_id, ref->data.strong,
6563 		   ref->data.weak, ref->death);
6564 	binder_node_unlock(ref->node);
6565 }
6566 
6567 /**
6568  * print_next_binder_node_ilocked() - Print binder_node from a locked list
6569  * @m:          struct seq_file for output via seq_printf()
6570  * @proc:       struct binder_proc we hold the inner_proc_lock to (if any)
6571  * @node:       struct binder_node to print fields of
6572  * @prev_node:	struct binder_node we hold a temporary reference to (if any)
6573  * @hash_ptrs:  whether to hash @node's binder_uintptr_t fields
6574  *
6575  * Helper function to handle synchronization around printing a struct
6576  * binder_node while iterating through @proc->nodes or the dead nodes list.
6577  * Caller must hold either @proc->inner_lock (for live nodes) or
6578  * binder_dead_nodes_lock. This lock will be released during the body of this
6579  * function, but it will be reacquired before returning to the caller.
6580  *
6581  * Return:	pointer to the struct binder_node we hold a tmpref on
6582  */
6583 static struct binder_node *
6584 print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6585 			       struct binder_node *node,
6586 			       struct binder_node *prev_node, bool hash_ptrs)
6587 {
6588 	/*
6589 	 * Take a temporary reference on the node so that isn't freed while
6590 	 * we print it.
6591 	 */
6592 	binder_inc_node_tmpref_ilocked(node);
6593 	/*
6594 	 * Live nodes need to drop the inner proc lock and dead nodes need to
6595 	 * drop the binder_dead_nodes_lock before trying to take the node lock.
6596 	 */
6597 	if (proc)
6598 		binder_inner_proc_unlock(proc);
6599 	else
6600 		spin_unlock(&binder_dead_nodes_lock);
6601 	if (prev_node)
6602 		binder_put_node(prev_node);
6603 	binder_node_inner_lock(node);
6604 	print_binder_node_nilocked(m, node, hash_ptrs);
6605 	binder_node_inner_unlock(node);
6606 	if (proc)
6607 		binder_inner_proc_lock(proc);
6608 	else
6609 		spin_lock(&binder_dead_nodes_lock);
6610 	return node;
6611 }
6612 
6613 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6614 			      bool print_all, bool hash_ptrs)
6615 {
6616 	struct binder_work *w;
6617 	struct rb_node *n;
6618 	size_t start_pos = m->count;
6619 	size_t header_pos;
6620 	struct binder_node *last_node = NULL;
6621 
6622 	seq_printf(m, "proc %d\n", proc->pid);
6623 	seq_printf(m, "context %s\n", proc->context->name);
6624 	header_pos = m->count;
6625 
6626 	binder_inner_proc_lock(proc);
6627 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6628 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6629 						rb_node), print_all, hash_ptrs);
6630 
6631 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6632 		struct binder_node *node = rb_entry(n, struct binder_node,
6633 						    rb_node);
6634 		if (!print_all && !node->has_async_transaction)
6635 			continue;
6636 
6637 		last_node = print_next_binder_node_ilocked(m, proc, node,
6638 							   last_node,
6639 							   hash_ptrs);
6640 	}
6641 	binder_inner_proc_unlock(proc);
6642 	if (last_node)
6643 		binder_put_node(last_node);
6644 
6645 	if (print_all) {
6646 		binder_proc_lock(proc);
6647 		for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6648 			print_binder_ref_olocked(m, rb_entry(n,
6649 							     struct binder_ref,
6650 							     rb_node_desc));
6651 		binder_proc_unlock(proc);
6652 	}
6653 	binder_alloc_print_allocated(m, &proc->alloc);
6654 	binder_inner_proc_lock(proc);
6655 	list_for_each_entry(w, &proc->todo, entry)
6656 		print_binder_work_ilocked(m, proc, "  ",
6657 					  "  pending transaction", w,
6658 					  hash_ptrs);
6659 	list_for_each_entry(w, &proc->delivered_death, entry) {
6660 		seq_puts(m, "  has delivered dead binder\n");
6661 		break;
6662 	}
6663 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
6664 		seq_puts(m, "  has delivered freeze binder\n");
6665 		break;
6666 	}
6667 	binder_inner_proc_unlock(proc);
6668 	if (!print_all && m->count == header_pos)
6669 		m->count = start_pos;
6670 }
6671 
6672 static const char * const binder_return_strings[] = {
6673 	"BR_ERROR",
6674 	"BR_OK",
6675 	"BR_TRANSACTION",
6676 	"BR_REPLY",
6677 	"BR_ACQUIRE_RESULT",
6678 	"BR_DEAD_REPLY",
6679 	"BR_TRANSACTION_COMPLETE",
6680 	"BR_INCREFS",
6681 	"BR_ACQUIRE",
6682 	"BR_RELEASE",
6683 	"BR_DECREFS",
6684 	"BR_ATTEMPT_ACQUIRE",
6685 	"BR_NOOP",
6686 	"BR_SPAWN_LOOPER",
6687 	"BR_FINISHED",
6688 	"BR_DEAD_BINDER",
6689 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6690 	"BR_FAILED_REPLY",
6691 	"BR_FROZEN_REPLY",
6692 	"BR_ONEWAY_SPAM_SUSPECT",
6693 	"BR_TRANSACTION_PENDING_FROZEN",
6694 	"BR_FROZEN_BINDER",
6695 	"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6696 };
6697 
6698 static const char * const binder_command_strings[] = {
6699 	"BC_TRANSACTION",
6700 	"BC_REPLY",
6701 	"BC_ACQUIRE_RESULT",
6702 	"BC_FREE_BUFFER",
6703 	"BC_INCREFS",
6704 	"BC_ACQUIRE",
6705 	"BC_RELEASE",
6706 	"BC_DECREFS",
6707 	"BC_INCREFS_DONE",
6708 	"BC_ACQUIRE_DONE",
6709 	"BC_ATTEMPT_ACQUIRE",
6710 	"BC_REGISTER_LOOPER",
6711 	"BC_ENTER_LOOPER",
6712 	"BC_EXIT_LOOPER",
6713 	"BC_REQUEST_DEATH_NOTIFICATION",
6714 	"BC_CLEAR_DEATH_NOTIFICATION",
6715 	"BC_DEAD_BINDER_DONE",
6716 	"BC_TRANSACTION_SG",
6717 	"BC_REPLY_SG",
6718 	"BC_REQUEST_FREEZE_NOTIFICATION",
6719 	"BC_CLEAR_FREEZE_NOTIFICATION",
6720 	"BC_FREEZE_NOTIFICATION_DONE",
6721 };
6722 
6723 static const char * const binder_objstat_strings[] = {
6724 	"proc",
6725 	"thread",
6726 	"node",
6727 	"ref",
6728 	"death",
6729 	"transaction",
6730 	"transaction_complete",
6731 	"freeze",
6732 };
6733 
6734 static void print_binder_stats(struct seq_file *m, const char *prefix,
6735 			       struct binder_stats *stats)
6736 {
6737 	int i;
6738 
6739 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6740 		     ARRAY_SIZE(binder_command_strings));
6741 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6742 		int temp = atomic_read(&stats->bc[i]);
6743 
6744 		if (temp)
6745 			seq_printf(m, "%s%s: %d\n", prefix,
6746 				   binder_command_strings[i], temp);
6747 	}
6748 
6749 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6750 		     ARRAY_SIZE(binder_return_strings));
6751 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6752 		int temp = atomic_read(&stats->br[i]);
6753 
6754 		if (temp)
6755 			seq_printf(m, "%s%s: %d\n", prefix,
6756 				   binder_return_strings[i], temp);
6757 	}
6758 
6759 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6760 		     ARRAY_SIZE(binder_objstat_strings));
6761 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6762 		     ARRAY_SIZE(stats->obj_deleted));
6763 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6764 		int created = atomic_read(&stats->obj_created[i]);
6765 		int deleted = atomic_read(&stats->obj_deleted[i]);
6766 
6767 		if (created || deleted)
6768 			seq_printf(m, "%s%s: active %d total %d\n",
6769 				prefix,
6770 				binder_objstat_strings[i],
6771 				created - deleted,
6772 				created);
6773 	}
6774 }
6775 
6776 static void print_binder_proc_stats(struct seq_file *m,
6777 				    struct binder_proc *proc)
6778 {
6779 	struct binder_work *w;
6780 	struct binder_thread *thread;
6781 	struct rb_node *n;
6782 	int count, strong, weak, ready_threads;
6783 	size_t free_async_space =
6784 		binder_alloc_get_free_async_space(&proc->alloc);
6785 
6786 	seq_printf(m, "proc %d\n", proc->pid);
6787 	seq_printf(m, "context %s\n", proc->context->name);
6788 	count = 0;
6789 	ready_threads = 0;
6790 	binder_inner_proc_lock(proc);
6791 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6792 		count++;
6793 
6794 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6795 		ready_threads++;
6796 
6797 	seq_printf(m, "  threads: %d\n", count);
6798 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6799 			"  ready threads %d\n"
6800 			"  free async space %zd\n", proc->requested_threads,
6801 			proc->requested_threads_started, proc->max_threads,
6802 			ready_threads,
6803 			free_async_space);
6804 	count = 0;
6805 	for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6806 		count++;
6807 	binder_inner_proc_unlock(proc);
6808 	seq_printf(m, "  nodes: %d\n", count);
6809 	count = 0;
6810 	strong = 0;
6811 	weak = 0;
6812 	binder_proc_lock(proc);
6813 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6814 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6815 						  rb_node_desc);
6816 		count++;
6817 		strong += ref->data.strong;
6818 		weak += ref->data.weak;
6819 	}
6820 	binder_proc_unlock(proc);
6821 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6822 
6823 	count = binder_alloc_get_allocated_count(&proc->alloc);
6824 	seq_printf(m, "  buffers: %d\n", count);
6825 
6826 	binder_alloc_print_pages(m, &proc->alloc);
6827 
6828 	count = 0;
6829 	binder_inner_proc_lock(proc);
6830 	list_for_each_entry(w, &proc->todo, entry) {
6831 		if (w->type == BINDER_WORK_TRANSACTION)
6832 			count++;
6833 	}
6834 	binder_inner_proc_unlock(proc);
6835 	seq_printf(m, "  pending transactions: %d\n", count);
6836 
6837 	print_binder_stats(m, "  ", &proc->stats);
6838 }
6839 
6840 static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6841 {
6842 	struct binder_proc *proc;
6843 	struct binder_node *node;
6844 	struct binder_node *last_node = NULL;
6845 
6846 	seq_puts(m, "binder state:\n");
6847 
6848 	spin_lock(&binder_dead_nodes_lock);
6849 	if (!hlist_empty(&binder_dead_nodes))
6850 		seq_puts(m, "dead nodes:\n");
6851 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6852 		last_node = print_next_binder_node_ilocked(m, NULL, node,
6853 							   last_node,
6854 							   hash_ptrs);
6855 	spin_unlock(&binder_dead_nodes_lock);
6856 	if (last_node)
6857 		binder_put_node(last_node);
6858 
6859 	mutex_lock(&binder_procs_lock);
6860 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6861 		print_binder_proc(m, proc, true, hash_ptrs);
6862 	mutex_unlock(&binder_procs_lock);
6863 }
6864 
6865 static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6866 {
6867 	struct binder_proc *proc;
6868 
6869 	seq_puts(m, "binder transactions:\n");
6870 	mutex_lock(&binder_procs_lock);
6871 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6872 		print_binder_proc(m, proc, false, hash_ptrs);
6873 	mutex_unlock(&binder_procs_lock);
6874 }
6875 
6876 static int state_show(struct seq_file *m, void *unused)
6877 {
6878 	print_binder_state(m, false);
6879 	return 0;
6880 }
6881 
6882 static int state_hashed_show(struct seq_file *m, void *unused)
6883 {
6884 	print_binder_state(m, true);
6885 	return 0;
6886 }
6887 
6888 static int stats_show(struct seq_file *m, void *unused)
6889 {
6890 	struct binder_proc *proc;
6891 
6892 	seq_puts(m, "binder stats:\n");
6893 
6894 	print_binder_stats(m, "", &binder_stats);
6895 
6896 	mutex_lock(&binder_procs_lock);
6897 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6898 		print_binder_proc_stats(m, proc);
6899 	mutex_unlock(&binder_procs_lock);
6900 
6901 	return 0;
6902 }
6903 
6904 static int transactions_show(struct seq_file *m, void *unused)
6905 {
6906 	print_binder_transactions(m, false);
6907 	return 0;
6908 }
6909 
6910 static int transactions_hashed_show(struct seq_file *m, void *unused)
6911 {
6912 	print_binder_transactions(m, true);
6913 	return 0;
6914 }
6915 
6916 static int proc_show(struct seq_file *m, void *unused)
6917 {
6918 	struct binder_proc *itr;
6919 	int pid = (unsigned long)m->private;
6920 
6921 	guard(mutex)(&binder_procs_lock);
6922 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6923 		if (itr->pid == pid) {
6924 			seq_puts(m, "binder proc state:\n");
6925 			print_binder_proc(m, itr, true, false);
6926 		}
6927 	}
6928 
6929 	return 0;
6930 }
6931 
6932 static void print_binder_transaction_log_entry(struct seq_file *m,
6933 					struct binder_transaction_log_entry *e)
6934 {
6935 	int debug_id = READ_ONCE(e->debug_id_done);
6936 	/*
6937 	 * read barrier to guarantee debug_id_done read before
6938 	 * we print the log values
6939 	 */
6940 	smp_rmb();
6941 	seq_printf(m,
6942 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6943 		   e->debug_id, (e->call_type == 2) ? "reply" :
6944 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6945 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6946 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6947 		   e->return_error, e->return_error_param,
6948 		   e->return_error_line);
6949 	/*
6950 	 * read-barrier to guarantee read of debug_id_done after
6951 	 * done printing the fields of the entry
6952 	 */
6953 	smp_rmb();
6954 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6955 			"\n" : " (incomplete)\n");
6956 }
6957 
6958 static int transaction_log_show(struct seq_file *m, void *unused)
6959 {
6960 	struct binder_transaction_log *log = m->private;
6961 	unsigned int log_cur = atomic_read(&log->cur);
6962 	unsigned int count;
6963 	unsigned int cur;
6964 	int i;
6965 
6966 	count = log_cur + 1;
6967 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6968 		0 : count % ARRAY_SIZE(log->entry);
6969 	if (count > ARRAY_SIZE(log->entry) || log->full)
6970 		count = ARRAY_SIZE(log->entry);
6971 	for (i = 0; i < count; i++) {
6972 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6973 
6974 		print_binder_transaction_log_entry(m, &log->entry[index]);
6975 	}
6976 	return 0;
6977 }
6978 
6979 const struct file_operations binder_fops = {
6980 	.owner = THIS_MODULE,
6981 	.poll = binder_poll,
6982 	.unlocked_ioctl = binder_ioctl,
6983 	.compat_ioctl = compat_ptr_ioctl,
6984 	.mmap = binder_mmap,
6985 	.open = binder_open,
6986 	.flush = binder_flush,
6987 	.release = binder_release,
6988 };
6989 
6990 DEFINE_SHOW_ATTRIBUTE(state);
6991 DEFINE_SHOW_ATTRIBUTE(state_hashed);
6992 DEFINE_SHOW_ATTRIBUTE(stats);
6993 DEFINE_SHOW_ATTRIBUTE(transactions);
6994 DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6995 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6996 
6997 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6998 	{
6999 		.name = "state",
7000 		.mode = 0444,
7001 		.fops = &state_fops,
7002 		.data = NULL,
7003 	},
7004 	{
7005 		.name = "state_hashed",
7006 		.mode = 0444,
7007 		.fops = &state_hashed_fops,
7008 		.data = NULL,
7009 	},
7010 	{
7011 		.name = "stats",
7012 		.mode = 0444,
7013 		.fops = &stats_fops,
7014 		.data = NULL,
7015 	},
7016 	{
7017 		.name = "transactions",
7018 		.mode = 0444,
7019 		.fops = &transactions_fops,
7020 		.data = NULL,
7021 	},
7022 	{
7023 		.name = "transactions_hashed",
7024 		.mode = 0444,
7025 		.fops = &transactions_hashed_fops,
7026 		.data = NULL,
7027 	},
7028 	{
7029 		.name = "transaction_log",
7030 		.mode = 0444,
7031 		.fops = &transaction_log_fops,
7032 		.data = &binder_transaction_log,
7033 	},
7034 	{
7035 		.name = "failed_transaction_log",
7036 		.mode = 0444,
7037 		.fops = &transaction_log_fops,
7038 		.data = &binder_transaction_log_failed,
7039 	},
7040 	{} /* terminator */
7041 };
7042 
7043 void binder_add_device(struct binder_device *device)
7044 {
7045 	guard(spinlock)(&binder_devices_lock);
7046 	hlist_add_head(&device->hlist, &binder_devices);
7047 }
7048 
7049 void binder_remove_device(struct binder_device *device)
7050 {
7051 	guard(spinlock)(&binder_devices_lock);
7052 	hlist_del_init(&device->hlist);
7053 }
7054 
7055 static int __init init_binder_device(const char *name)
7056 {
7057 	int ret;
7058 	struct binder_device *binder_device;
7059 
7060 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7061 	if (!binder_device)
7062 		return -ENOMEM;
7063 
7064 	binder_device->miscdev.fops = &binder_fops;
7065 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7066 	binder_device->miscdev.name = name;
7067 
7068 	refcount_set(&binder_device->ref, 1);
7069 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
7070 	binder_device->context.name = name;
7071 	mutex_init(&binder_device->context.context_mgr_node_lock);
7072 
7073 	ret = misc_register(&binder_device->miscdev);
7074 	if (ret < 0) {
7075 		kfree(binder_device);
7076 		return ret;
7077 	}
7078 
7079 	binder_add_device(binder_device);
7080 
7081 	return ret;
7082 }
7083 
7084 static int __init binder_init(void)
7085 {
7086 	int ret;
7087 	char *device_name, *device_tmp;
7088 	struct binder_device *device;
7089 	struct hlist_node *tmp;
7090 	char *device_names = NULL;
7091 	const struct binder_debugfs_entry *db_entry;
7092 
7093 	ret = binder_alloc_shrinker_init();
7094 	if (ret)
7095 		return ret;
7096 
7097 	atomic_set(&binder_transaction_log.cur, ~0U);
7098 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
7099 
7100 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7101 
7102 	binder_for_each_debugfs_entry(db_entry)
7103 		debugfs_create_file(db_entry->name,
7104 					db_entry->mode,
7105 					binder_debugfs_dir_entry_root,
7106 					db_entry->data,
7107 					db_entry->fops);
7108 
7109 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7110 						binder_debugfs_dir_entry_root);
7111 
7112 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7113 	    strcmp(binder_devices_param, "") != 0) {
7114 		/*
7115 		* Copy the module_parameter string, because we don't want to
7116 		* tokenize it in-place.
7117 		 */
7118 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7119 		if (!device_names) {
7120 			ret = -ENOMEM;
7121 			goto err_alloc_device_names_failed;
7122 		}
7123 
7124 		device_tmp = device_names;
7125 		while ((device_name = strsep(&device_tmp, ","))) {
7126 			ret = init_binder_device(device_name);
7127 			if (ret)
7128 				goto err_init_binder_device_failed;
7129 		}
7130 	}
7131 
7132 	ret = genl_register_family(&binder_nl_family);
7133 	if (ret)
7134 		goto err_init_binder_device_failed;
7135 
7136 	ret = init_binderfs();
7137 	if (ret)
7138 		goto err_init_binderfs_failed;
7139 
7140 	return ret;
7141 
7142 err_init_binderfs_failed:
7143 	genl_unregister_family(&binder_nl_family);
7144 
7145 err_init_binder_device_failed:
7146 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7147 		misc_deregister(&device->miscdev);
7148 		binder_remove_device(device);
7149 		kfree(device);
7150 	}
7151 
7152 	kfree(device_names);
7153 
7154 err_alloc_device_names_failed:
7155 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7156 	binder_alloc_shrinker_exit();
7157 
7158 	return ret;
7159 }
7160 
7161 device_initcall(binder_init);
7162 
7163 #define CREATE_TRACE_POINTS
7164 #include "binder_trace.h"
7165