xref: /linux/drivers/android/binder.c (revision 0d5ec7919f3747193f051036b2301734a4b5e1d6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 
71 #include <kunit/visibility.h>
72 
73 #include <uapi/linux/android/binder.h>
74 
75 #include <linux/cacheflush.h>
76 
77 #include "binder_internal.h"
78 #include "binder_trace.h"
79 
80 static HLIST_HEAD(binder_deferred_list);
81 static DEFINE_MUTEX(binder_deferred_lock);
82 
83 static HLIST_HEAD(binder_devices);
84 static DEFINE_SPINLOCK(binder_devices_lock);
85 
86 static HLIST_HEAD(binder_procs);
87 static DEFINE_MUTEX(binder_procs_lock);
88 
89 static HLIST_HEAD(binder_dead_nodes);
90 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
91 
92 static struct dentry *binder_debugfs_dir_entry_root;
93 static struct dentry *binder_debugfs_dir_entry_proc;
94 static atomic_t binder_last_id;
95 
96 static int proc_show(struct seq_file *m, void *unused);
97 DEFINE_SHOW_ATTRIBUTE(proc);
98 
99 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
100 
101 enum {
102 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
103 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
104 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
105 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
106 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
107 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
108 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
109 	BINDER_DEBUG_USER_REFS              = 1U << 7,
110 	BINDER_DEBUG_THREADS                = 1U << 8,
111 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
112 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
113 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
114 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
115 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
116 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
117 };
118 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
119 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
120 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
121 
122 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
123 module_param_named(devices, binder_devices_param, charp, 0444);
124 
125 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
126 static int binder_stop_on_user_error;
127 
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)128 static int binder_set_stop_on_user_error(const char *val,
129 					 const struct kernel_param *kp)
130 {
131 	int ret;
132 
133 	ret = param_set_int(val, kp);
134 	if (binder_stop_on_user_error < 2)
135 		wake_up(&binder_user_error_wait);
136 	return ret;
137 }
138 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
139 	param_get_int, &binder_stop_on_user_error, 0644);
140 
binder_debug(int mask,const char * format,...)141 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
142 {
143 	struct va_format vaf;
144 	va_list args;
145 
146 	if (binder_debug_mask & mask) {
147 		va_start(args, format);
148 		vaf.va = &args;
149 		vaf.fmt = format;
150 		pr_info_ratelimited("%pV", &vaf);
151 		va_end(args);
152 	}
153 }
154 
155 #define binder_txn_error(x...) \
156 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
157 
binder_user_error(const char * format,...)158 static __printf(1, 2) void binder_user_error(const char *format, ...)
159 {
160 	struct va_format vaf;
161 	va_list args;
162 
163 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
164 		va_start(args, format);
165 		vaf.va = &args;
166 		vaf.fmt = format;
167 		pr_info_ratelimited("%pV", &vaf);
168 		va_end(args);
169 	}
170 
171 	if (binder_stop_on_user_error)
172 		binder_stop_on_user_error = 2;
173 }
174 
175 #define binder_set_extended_error(ee, _id, _command, _param) \
176 	do { \
177 		(ee)->id = _id; \
178 		(ee)->command = _command; \
179 		(ee)->param = _param; \
180 	} while (0)
181 
182 #define to_flat_binder_object(hdr) \
183 	container_of(hdr, struct flat_binder_object, hdr)
184 
185 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186 
187 #define to_binder_buffer_object(hdr) \
188 	container_of(hdr, struct binder_buffer_object, hdr)
189 
190 #define to_binder_fd_array_object(hdr) \
191 	container_of(hdr, struct binder_fd_array_object, hdr)
192 
193 static struct binder_stats binder_stats;
194 
binder_stats_deleted(enum binder_stat_types type)195 static inline void binder_stats_deleted(enum binder_stat_types type)
196 {
197 	atomic_inc(&binder_stats.obj_deleted[type]);
198 }
199 
binder_stats_created(enum binder_stat_types type)200 static inline void binder_stats_created(enum binder_stat_types type)
201 {
202 	atomic_inc(&binder_stats.obj_created[type]);
203 }
204 
205 struct binder_transaction_log_entry {
206 	int debug_id;
207 	int debug_id_done;
208 	int call_type;
209 	int from_proc;
210 	int from_thread;
211 	int target_handle;
212 	int to_proc;
213 	int to_thread;
214 	int to_node;
215 	int data_size;
216 	int offsets_size;
217 	int return_error_line;
218 	uint32_t return_error;
219 	uint32_t return_error_param;
220 	char context_name[BINDERFS_MAX_NAME + 1];
221 };
222 
223 struct binder_transaction_log {
224 	atomic_t cur;
225 	bool full;
226 	struct binder_transaction_log_entry entry[32];
227 };
228 
229 static struct binder_transaction_log binder_transaction_log;
230 static struct binder_transaction_log binder_transaction_log_failed;
231 
binder_transaction_log_add(struct binder_transaction_log * log)232 static struct binder_transaction_log_entry *binder_transaction_log_add(
233 	struct binder_transaction_log *log)
234 {
235 	struct binder_transaction_log_entry *e;
236 	unsigned int cur = atomic_inc_return(&log->cur);
237 
238 	if (cur >= ARRAY_SIZE(log->entry))
239 		log->full = true;
240 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
241 	WRITE_ONCE(e->debug_id_done, 0);
242 	/*
243 	 * write-barrier to synchronize access to e->debug_id_done.
244 	 * We make sure the initialized 0 value is seen before
245 	 * memset() other fields are zeroed by memset.
246 	 */
247 	smp_wmb();
248 	memset(e, 0, sizeof(*e));
249 	return e;
250 }
251 
252 enum binder_deferred_state {
253 	BINDER_DEFERRED_FLUSH        = 0x01,
254 	BINDER_DEFERRED_RELEASE      = 0x02,
255 };
256 
257 enum {
258 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
259 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
260 	BINDER_LOOPER_STATE_EXITED      = 0x04,
261 	BINDER_LOOPER_STATE_INVALID     = 0x08,
262 	BINDER_LOOPER_STATE_WAITING     = 0x10,
263 	BINDER_LOOPER_STATE_POLL        = 0x20,
264 };
265 
266 /**
267  * binder_proc_lock() - Acquire outer lock for given binder_proc
268  * @proc:         struct binder_proc to acquire
269  *
270  * Acquires proc->outer_lock. Used to protect binder_ref
271  * structures associated with the given proc.
272  */
273 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
274 static void
_binder_proc_lock(struct binder_proc * proc,int line)275 _binder_proc_lock(struct binder_proc *proc, int line)
276 	__acquires(&proc->outer_lock)
277 {
278 	binder_debug(BINDER_DEBUG_SPINLOCKS,
279 		     "%s: line=%d\n", __func__, line);
280 	spin_lock(&proc->outer_lock);
281 }
282 
283 /**
284  * binder_proc_unlock() - Release outer lock for given binder_proc
285  * @proc:                struct binder_proc to acquire
286  *
287  * Release lock acquired via binder_proc_lock()
288  */
289 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
290 static void
_binder_proc_unlock(struct binder_proc * proc,int line)291 _binder_proc_unlock(struct binder_proc *proc, int line)
292 	__releases(&proc->outer_lock)
293 {
294 	binder_debug(BINDER_DEBUG_SPINLOCKS,
295 		     "%s: line=%d\n", __func__, line);
296 	spin_unlock(&proc->outer_lock);
297 }
298 
299 /**
300  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
301  * @proc:         struct binder_proc to acquire
302  *
303  * Acquires proc->inner_lock. Used to protect todo lists
304  */
305 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
306 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)307 _binder_inner_proc_lock(struct binder_proc *proc, int line)
308 	__acquires(&proc->inner_lock)
309 {
310 	binder_debug(BINDER_DEBUG_SPINLOCKS,
311 		     "%s: line=%d\n", __func__, line);
312 	spin_lock(&proc->inner_lock);
313 }
314 
315 /**
316  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
317  * @proc:         struct binder_proc to acquire
318  *
319  * Release lock acquired via binder_inner_proc_lock()
320  */
321 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
322 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)323 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
324 	__releases(&proc->inner_lock)
325 {
326 	binder_debug(BINDER_DEBUG_SPINLOCKS,
327 		     "%s: line=%d\n", __func__, line);
328 	spin_unlock(&proc->inner_lock);
329 }
330 
331 /**
332  * binder_node_lock() - Acquire spinlock for given binder_node
333  * @node:         struct binder_node to acquire
334  *
335  * Acquires node->lock. Used to protect binder_node fields
336  */
337 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
338 static void
_binder_node_lock(struct binder_node * node,int line)339 _binder_node_lock(struct binder_node *node, int line)
340 	__acquires(&node->lock)
341 {
342 	binder_debug(BINDER_DEBUG_SPINLOCKS,
343 		     "%s: line=%d\n", __func__, line);
344 	spin_lock(&node->lock);
345 }
346 
347 /**
348  * binder_node_unlock() - Release spinlock for given binder_proc
349  * @node:         struct binder_node to acquire
350  *
351  * Release lock acquired via binder_node_lock()
352  */
353 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
354 static void
_binder_node_unlock(struct binder_node * node,int line)355 _binder_node_unlock(struct binder_node *node, int line)
356 	__releases(&node->lock)
357 {
358 	binder_debug(BINDER_DEBUG_SPINLOCKS,
359 		     "%s: line=%d\n", __func__, line);
360 	spin_unlock(&node->lock);
361 }
362 
363 /**
364  * binder_node_inner_lock() - Acquire node and inner locks
365  * @node:         struct binder_node to acquire
366  *
367  * Acquires node->lock. If node->proc also acquires
368  * proc->inner_lock. Used to protect binder_node fields
369  */
370 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
371 static void
_binder_node_inner_lock(struct binder_node * node,int line)372 _binder_node_inner_lock(struct binder_node *node, int line)
373 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
374 {
375 	binder_debug(BINDER_DEBUG_SPINLOCKS,
376 		     "%s: line=%d\n", __func__, line);
377 	spin_lock(&node->lock);
378 	if (node->proc)
379 		binder_inner_proc_lock(node->proc);
380 	else
381 		/* annotation for sparse */
382 		__acquire(&node->proc->inner_lock);
383 }
384 
385 /**
386  * binder_node_inner_unlock() - Release node and inner locks
387  * @node:         struct binder_node to acquire
388  *
389  * Release lock acquired via binder_node_lock()
390  */
391 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
392 static void
_binder_node_inner_unlock(struct binder_node * node,int line)393 _binder_node_inner_unlock(struct binder_node *node, int line)
394 	__releases(&node->lock) __releases(&node->proc->inner_lock)
395 {
396 	struct binder_proc *proc = node->proc;
397 
398 	binder_debug(BINDER_DEBUG_SPINLOCKS,
399 		     "%s: line=%d\n", __func__, line);
400 	if (proc)
401 		binder_inner_proc_unlock(proc);
402 	else
403 		/* annotation for sparse */
404 		__release(&node->proc->inner_lock);
405 	spin_unlock(&node->lock);
406 }
407 
binder_worklist_empty_ilocked(struct list_head * list)408 static bool binder_worklist_empty_ilocked(struct list_head *list)
409 {
410 	return list_empty(list);
411 }
412 
413 /**
414  * binder_worklist_empty() - Check if no items on the work list
415  * @proc:       binder_proc associated with list
416  * @list:	list to check
417  *
418  * Return: true if there are no items on list, else false
419  */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)420 static bool binder_worklist_empty(struct binder_proc *proc,
421 				  struct list_head *list)
422 {
423 	bool ret;
424 
425 	binder_inner_proc_lock(proc);
426 	ret = binder_worklist_empty_ilocked(list);
427 	binder_inner_proc_unlock(proc);
428 	return ret;
429 }
430 
431 /**
432  * binder_enqueue_work_ilocked() - Add an item to the work list
433  * @work:         struct binder_work to add to list
434  * @target_list:  list to add work to
435  *
436  * Adds the work to the specified list. Asserts that work
437  * is not already on a list.
438  *
439  * Requires the proc->inner_lock to be held.
440  */
441 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)442 binder_enqueue_work_ilocked(struct binder_work *work,
443 			   struct list_head *target_list)
444 {
445 	BUG_ON(target_list == NULL);
446 	BUG_ON(work->entry.next && !list_empty(&work->entry));
447 	list_add_tail(&work->entry, target_list);
448 }
449 
450 /**
451  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
452  * @thread:       thread to queue work to
453  * @work:         struct binder_work to add to list
454  *
455  * Adds the work to the todo list of the thread. Doesn't set the process_todo
456  * flag, which means that (if it wasn't already set) the thread will go to
457  * sleep without handling this work when it calls read.
458  *
459  * Requires the proc->inner_lock to be held.
460  */
461 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)462 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
463 					    struct binder_work *work)
464 {
465 	WARN_ON(!list_empty(&thread->waiting_thread_node));
466 	binder_enqueue_work_ilocked(work, &thread->todo);
467 }
468 
469 /**
470  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
471  * @thread:       thread to queue work to
472  * @work:         struct binder_work to add to list
473  *
474  * Adds the work to the todo list of the thread, and enables processing
475  * of the todo queue.
476  *
477  * Requires the proc->inner_lock to be held.
478  */
479 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)480 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
481 				   struct binder_work *work)
482 {
483 	WARN_ON(!list_empty(&thread->waiting_thread_node));
484 	binder_enqueue_work_ilocked(work, &thread->todo);
485 
486 	/* (e)poll-based threads require an explicit wakeup signal when
487 	 * queuing their own work; they rely on these events to consume
488 	 * messages without I/O block. Without it, threads risk waiting
489 	 * indefinitely without handling the work.
490 	 */
491 	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
492 	    thread->pid == current->pid && !thread->process_todo)
493 		wake_up_interruptible_sync(&thread->wait);
494 
495 	thread->process_todo = true;
496 }
497 
498 /**
499  * binder_enqueue_thread_work() - Add an item to the thread work list
500  * @thread:       thread to queue work to
501  * @work:         struct binder_work to add to list
502  *
503  * Adds the work to the todo list of the thread, and enables processing
504  * of the todo queue.
505  */
506 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)507 binder_enqueue_thread_work(struct binder_thread *thread,
508 			   struct binder_work *work)
509 {
510 	binder_inner_proc_lock(thread->proc);
511 	binder_enqueue_thread_work_ilocked(thread, work);
512 	binder_inner_proc_unlock(thread->proc);
513 }
514 
515 static void
binder_dequeue_work_ilocked(struct binder_work * work)516 binder_dequeue_work_ilocked(struct binder_work *work)
517 {
518 	list_del_init(&work->entry);
519 }
520 
521 /**
522  * binder_dequeue_work() - Removes an item from the work list
523  * @proc:         binder_proc associated with list
524  * @work:         struct binder_work to remove from list
525  *
526  * Removes the specified work item from whatever list it is on.
527  * Can safely be called if work is not on any list.
528  */
529 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)530 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
531 {
532 	binder_inner_proc_lock(proc);
533 	binder_dequeue_work_ilocked(work);
534 	binder_inner_proc_unlock(proc);
535 }
536 
binder_dequeue_work_head_ilocked(struct list_head * list)537 static struct binder_work *binder_dequeue_work_head_ilocked(
538 					struct list_head *list)
539 {
540 	struct binder_work *w;
541 
542 	w = list_first_entry_or_null(list, struct binder_work, entry);
543 	if (w)
544 		list_del_init(&w->entry);
545 	return w;
546 }
547 
548 static void
549 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
550 static void binder_free_thread(struct binder_thread *thread);
551 static void binder_free_proc(struct binder_proc *proc);
552 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
553 
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)554 static bool binder_has_work_ilocked(struct binder_thread *thread,
555 				    bool do_proc_work)
556 {
557 	return thread->process_todo ||
558 		thread->looper_need_return ||
559 		(do_proc_work &&
560 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
561 }
562 
binder_has_work(struct binder_thread * thread,bool do_proc_work)563 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
564 {
565 	bool has_work;
566 
567 	binder_inner_proc_lock(thread->proc);
568 	has_work = binder_has_work_ilocked(thread, do_proc_work);
569 	binder_inner_proc_unlock(thread->proc);
570 
571 	return has_work;
572 }
573 
binder_available_for_proc_work_ilocked(struct binder_thread * thread)574 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
575 {
576 	return !thread->transaction_stack &&
577 		binder_worklist_empty_ilocked(&thread->todo);
578 }
579 
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)580 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
581 					       bool sync)
582 {
583 	struct rb_node *n;
584 	struct binder_thread *thread;
585 
586 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
587 		thread = rb_entry(n, struct binder_thread, rb_node);
588 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
589 		    binder_available_for_proc_work_ilocked(thread)) {
590 			if (sync)
591 				wake_up_interruptible_sync(&thread->wait);
592 			else
593 				wake_up_interruptible(&thread->wait);
594 		}
595 	}
596 }
597 
598 /**
599  * binder_select_thread_ilocked() - selects a thread for doing proc work.
600  * @proc:	process to select a thread from
601  *
602  * Note that calling this function moves the thread off the waiting_threads
603  * list, so it can only be woken up by the caller of this function, or a
604  * signal. Therefore, callers *should* always wake up the thread this function
605  * returns.
606  *
607  * Return:	If there's a thread currently waiting for process work,
608  *		returns that thread. Otherwise returns NULL.
609  */
610 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)611 binder_select_thread_ilocked(struct binder_proc *proc)
612 {
613 	struct binder_thread *thread;
614 
615 	assert_spin_locked(&proc->inner_lock);
616 	thread = list_first_entry_or_null(&proc->waiting_threads,
617 					  struct binder_thread,
618 					  waiting_thread_node);
619 
620 	if (thread)
621 		list_del_init(&thread->waiting_thread_node);
622 
623 	return thread;
624 }
625 
626 /**
627  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
628  * @proc:	process to wake up a thread in
629  * @thread:	specific thread to wake-up (may be NULL)
630  * @sync:	whether to do a synchronous wake-up
631  *
632  * This function wakes up a thread in the @proc process.
633  * The caller may provide a specific thread to wake-up in
634  * the @thread parameter. If @thread is NULL, this function
635  * will wake up threads that have called poll().
636  *
637  * Note that for this function to work as expected, callers
638  * should first call binder_select_thread() to find a thread
639  * to handle the work (if they don't have a thread already),
640  * and pass the result into the @thread parameter.
641  */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)642 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
643 					 struct binder_thread *thread,
644 					 bool sync)
645 {
646 	assert_spin_locked(&proc->inner_lock);
647 
648 	if (thread) {
649 		if (sync)
650 			wake_up_interruptible_sync(&thread->wait);
651 		else
652 			wake_up_interruptible(&thread->wait);
653 		return;
654 	}
655 
656 	/* Didn't find a thread waiting for proc work; this can happen
657 	 * in two scenarios:
658 	 * 1. All threads are busy handling transactions
659 	 *    In that case, one of those threads should call back into
660 	 *    the kernel driver soon and pick up this work.
661 	 * 2. Threads are using the (e)poll interface, in which case
662 	 *    they may be blocked on the waitqueue without having been
663 	 *    added to waiting_threads. For this case, we just iterate
664 	 *    over all threads not handling transaction work, and
665 	 *    wake them all up. We wake all because we don't know whether
666 	 *    a thread that called into (e)poll is handling non-binder
667 	 *    work currently.
668 	 */
669 	binder_wakeup_poll_threads_ilocked(proc, sync);
670 }
671 
binder_wakeup_proc_ilocked(struct binder_proc * proc)672 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
673 {
674 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
675 
676 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
677 }
678 
binder_set_nice(long nice)679 static void binder_set_nice(long nice)
680 {
681 	long min_nice;
682 
683 	if (can_nice(current, nice)) {
684 		set_user_nice(current, nice);
685 		return;
686 	}
687 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
688 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
689 		     "%d: nice value %ld not allowed use %ld instead\n",
690 		      current->pid, nice, min_nice);
691 	set_user_nice(current, min_nice);
692 	if (min_nice <= MAX_NICE)
693 		return;
694 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
695 }
696 
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)697 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
698 						   binder_uintptr_t ptr)
699 {
700 	struct rb_node *n = proc->nodes.rb_node;
701 	struct binder_node *node;
702 
703 	assert_spin_locked(&proc->inner_lock);
704 
705 	while (n) {
706 		node = rb_entry(n, struct binder_node, rb_node);
707 
708 		if (ptr < node->ptr)
709 			n = n->rb_left;
710 		else if (ptr > node->ptr)
711 			n = n->rb_right;
712 		else {
713 			/*
714 			 * take an implicit weak reference
715 			 * to ensure node stays alive until
716 			 * call to binder_put_node()
717 			 */
718 			binder_inc_node_tmpref_ilocked(node);
719 			return node;
720 		}
721 	}
722 	return NULL;
723 }
724 
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)725 static struct binder_node *binder_get_node(struct binder_proc *proc,
726 					   binder_uintptr_t ptr)
727 {
728 	struct binder_node *node;
729 
730 	binder_inner_proc_lock(proc);
731 	node = binder_get_node_ilocked(proc, ptr);
732 	binder_inner_proc_unlock(proc);
733 	return node;
734 }
735 
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)736 static struct binder_node *binder_init_node_ilocked(
737 						struct binder_proc *proc,
738 						struct binder_node *new_node,
739 						struct flat_binder_object *fp)
740 {
741 	struct rb_node **p = &proc->nodes.rb_node;
742 	struct rb_node *parent = NULL;
743 	struct binder_node *node;
744 	binder_uintptr_t ptr = fp ? fp->binder : 0;
745 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
746 	__u32 flags = fp ? fp->flags : 0;
747 
748 	assert_spin_locked(&proc->inner_lock);
749 
750 	while (*p) {
751 
752 		parent = *p;
753 		node = rb_entry(parent, struct binder_node, rb_node);
754 
755 		if (ptr < node->ptr)
756 			p = &(*p)->rb_left;
757 		else if (ptr > node->ptr)
758 			p = &(*p)->rb_right;
759 		else {
760 			/*
761 			 * A matching node is already in
762 			 * the rb tree. Abandon the init
763 			 * and return it.
764 			 */
765 			binder_inc_node_tmpref_ilocked(node);
766 			return node;
767 		}
768 	}
769 	node = new_node;
770 	binder_stats_created(BINDER_STAT_NODE);
771 	node->tmp_refs++;
772 	rb_link_node(&node->rb_node, parent, p);
773 	rb_insert_color(&node->rb_node, &proc->nodes);
774 	node->debug_id = atomic_inc_return(&binder_last_id);
775 	node->proc = proc;
776 	node->ptr = ptr;
777 	node->cookie = cookie;
778 	node->work.type = BINDER_WORK_NODE;
779 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
780 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
781 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
782 	spin_lock_init(&node->lock);
783 	INIT_LIST_HEAD(&node->work.entry);
784 	INIT_LIST_HEAD(&node->async_todo);
785 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
786 		     "%d:%d node %d u%016llx c%016llx created\n",
787 		     proc->pid, current->pid, node->debug_id,
788 		     (u64)node->ptr, (u64)node->cookie);
789 
790 	return node;
791 }
792 
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)793 static struct binder_node *binder_new_node(struct binder_proc *proc,
794 					   struct flat_binder_object *fp)
795 {
796 	struct binder_node *node;
797 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
798 
799 	if (!new_node)
800 		return NULL;
801 	binder_inner_proc_lock(proc);
802 	node = binder_init_node_ilocked(proc, new_node, fp);
803 	binder_inner_proc_unlock(proc);
804 	if (node != new_node)
805 		/*
806 		 * The node was already added by another thread
807 		 */
808 		kfree(new_node);
809 
810 	return node;
811 }
812 
binder_free_node(struct binder_node * node)813 static void binder_free_node(struct binder_node *node)
814 {
815 	kfree(node);
816 	binder_stats_deleted(BINDER_STAT_NODE);
817 }
818 
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)819 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
820 				    int internal,
821 				    struct list_head *target_list)
822 {
823 	struct binder_proc *proc = node->proc;
824 
825 	assert_spin_locked(&node->lock);
826 	if (proc)
827 		assert_spin_locked(&proc->inner_lock);
828 	if (strong) {
829 		if (internal) {
830 			if (target_list == NULL &&
831 			    node->internal_strong_refs == 0 &&
832 			    !(node->proc &&
833 			      node == node->proc->context->binder_context_mgr_node &&
834 			      node->has_strong_ref)) {
835 				pr_err("invalid inc strong node for %d\n",
836 					node->debug_id);
837 				return -EINVAL;
838 			}
839 			node->internal_strong_refs++;
840 		} else
841 			node->local_strong_refs++;
842 		if (!node->has_strong_ref && target_list) {
843 			struct binder_thread *thread = container_of(target_list,
844 						    struct binder_thread, todo);
845 			binder_dequeue_work_ilocked(&node->work);
846 			BUG_ON(&thread->todo != target_list);
847 			binder_enqueue_deferred_thread_work_ilocked(thread,
848 								   &node->work);
849 		}
850 	} else {
851 		if (!internal)
852 			node->local_weak_refs++;
853 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
854 			if (target_list == NULL) {
855 				pr_err("invalid inc weak node for %d\n",
856 					node->debug_id);
857 				return -EINVAL;
858 			}
859 			/*
860 			 * See comment above
861 			 */
862 			binder_enqueue_work_ilocked(&node->work, target_list);
863 		}
864 	}
865 	return 0;
866 }
867 
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)868 static int binder_inc_node(struct binder_node *node, int strong, int internal,
869 			   struct list_head *target_list)
870 {
871 	int ret;
872 
873 	binder_node_inner_lock(node);
874 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
875 	binder_node_inner_unlock(node);
876 
877 	return ret;
878 }
879 
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)880 static bool binder_dec_node_nilocked(struct binder_node *node,
881 				     int strong, int internal)
882 {
883 	struct binder_proc *proc = node->proc;
884 
885 	assert_spin_locked(&node->lock);
886 	if (proc)
887 		assert_spin_locked(&proc->inner_lock);
888 	if (strong) {
889 		if (internal)
890 			node->internal_strong_refs--;
891 		else
892 			node->local_strong_refs--;
893 		if (node->local_strong_refs || node->internal_strong_refs)
894 			return false;
895 	} else {
896 		if (!internal)
897 			node->local_weak_refs--;
898 		if (node->local_weak_refs || node->tmp_refs ||
899 				!hlist_empty(&node->refs))
900 			return false;
901 	}
902 
903 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
904 		if (list_empty(&node->work.entry)) {
905 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
906 			binder_wakeup_proc_ilocked(proc);
907 		}
908 	} else {
909 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
910 		    !node->local_weak_refs && !node->tmp_refs) {
911 			if (proc) {
912 				binder_dequeue_work_ilocked(&node->work);
913 				rb_erase(&node->rb_node, &proc->nodes);
914 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
915 					     "refless node %d deleted\n",
916 					     node->debug_id);
917 			} else {
918 				BUG_ON(!list_empty(&node->work.entry));
919 				spin_lock(&binder_dead_nodes_lock);
920 				/*
921 				 * tmp_refs could have changed so
922 				 * check it again
923 				 */
924 				if (node->tmp_refs) {
925 					spin_unlock(&binder_dead_nodes_lock);
926 					return false;
927 				}
928 				hlist_del(&node->dead_node);
929 				spin_unlock(&binder_dead_nodes_lock);
930 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
931 					     "dead node %d deleted\n",
932 					     node->debug_id);
933 			}
934 			return true;
935 		}
936 	}
937 	return false;
938 }
939 
binder_dec_node(struct binder_node * node,int strong,int internal)940 static void binder_dec_node(struct binder_node *node, int strong, int internal)
941 {
942 	bool free_node;
943 
944 	binder_node_inner_lock(node);
945 	free_node = binder_dec_node_nilocked(node, strong, internal);
946 	binder_node_inner_unlock(node);
947 	if (free_node)
948 		binder_free_node(node);
949 }
950 
binder_inc_node_tmpref_ilocked(struct binder_node * node)951 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
952 {
953 	/*
954 	 * No call to binder_inc_node() is needed since we
955 	 * don't need to inform userspace of any changes to
956 	 * tmp_refs
957 	 */
958 	node->tmp_refs++;
959 }
960 
961 /**
962  * binder_inc_node_tmpref() - take a temporary reference on node
963  * @node:	node to reference
964  *
965  * Take reference on node to prevent the node from being freed
966  * while referenced only by a local variable. The inner lock is
967  * needed to serialize with the node work on the queue (which
968  * isn't needed after the node is dead). If the node is dead
969  * (node->proc is NULL), use binder_dead_nodes_lock to protect
970  * node->tmp_refs against dead-node-only cases where the node
971  * lock cannot be acquired (eg traversing the dead node list to
972  * print nodes)
973  */
binder_inc_node_tmpref(struct binder_node * node)974 static void binder_inc_node_tmpref(struct binder_node *node)
975 {
976 	binder_node_lock(node);
977 	if (node->proc)
978 		binder_inner_proc_lock(node->proc);
979 	else
980 		spin_lock(&binder_dead_nodes_lock);
981 	binder_inc_node_tmpref_ilocked(node);
982 	if (node->proc)
983 		binder_inner_proc_unlock(node->proc);
984 	else
985 		spin_unlock(&binder_dead_nodes_lock);
986 	binder_node_unlock(node);
987 }
988 
989 /**
990  * binder_dec_node_tmpref() - remove a temporary reference on node
991  * @node:	node to reference
992  *
993  * Release temporary reference on node taken via binder_inc_node_tmpref()
994  */
binder_dec_node_tmpref(struct binder_node * node)995 static void binder_dec_node_tmpref(struct binder_node *node)
996 {
997 	bool free_node;
998 
999 	binder_node_inner_lock(node);
1000 	if (!node->proc)
1001 		spin_lock(&binder_dead_nodes_lock);
1002 	else
1003 		__acquire(&binder_dead_nodes_lock);
1004 	node->tmp_refs--;
1005 	BUG_ON(node->tmp_refs < 0);
1006 	if (!node->proc)
1007 		spin_unlock(&binder_dead_nodes_lock);
1008 	else
1009 		__release(&binder_dead_nodes_lock);
1010 	/*
1011 	 * Call binder_dec_node() to check if all refcounts are 0
1012 	 * and cleanup is needed. Calling with strong=0 and internal=1
1013 	 * causes no actual reference to be released in binder_dec_node().
1014 	 * If that changes, a change is needed here too.
1015 	 */
1016 	free_node = binder_dec_node_nilocked(node, 0, 1);
1017 	binder_node_inner_unlock(node);
1018 	if (free_node)
1019 		binder_free_node(node);
1020 }
1021 
binder_put_node(struct binder_node * node)1022 static void binder_put_node(struct binder_node *node)
1023 {
1024 	binder_dec_node_tmpref(node);
1025 }
1026 
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1027 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1028 						 u32 desc, bool need_strong_ref)
1029 {
1030 	struct rb_node *n = proc->refs_by_desc.rb_node;
1031 	struct binder_ref *ref;
1032 
1033 	while (n) {
1034 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1035 
1036 		if (desc < ref->data.desc) {
1037 			n = n->rb_left;
1038 		} else if (desc > ref->data.desc) {
1039 			n = n->rb_right;
1040 		} else if (need_strong_ref && !ref->data.strong) {
1041 			binder_user_error("tried to use weak ref as strong ref\n");
1042 			return NULL;
1043 		} else {
1044 			return ref;
1045 		}
1046 	}
1047 	return NULL;
1048 }
1049 
1050 /* Find the smallest unused descriptor the "slow way" */
slow_desc_lookup_olocked(struct binder_proc * proc,u32 offset)1051 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1052 {
1053 	struct binder_ref *ref;
1054 	struct rb_node *n;
1055 	u32 desc;
1056 
1057 	desc = offset;
1058 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1059 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1060 		if (ref->data.desc > desc)
1061 			break;
1062 		desc = ref->data.desc + 1;
1063 	}
1064 
1065 	return desc;
1066 }
1067 
1068 /*
1069  * Find an available reference descriptor ID. The proc->outer_lock might
1070  * be released in the process, in which case -EAGAIN is returned and the
1071  * @desc should be considered invalid.
1072  */
get_ref_desc_olocked(struct binder_proc * proc,struct binder_node * node,u32 * desc)1073 static int get_ref_desc_olocked(struct binder_proc *proc,
1074 				struct binder_node *node,
1075 				u32 *desc)
1076 {
1077 	struct dbitmap *dmap = &proc->dmap;
1078 	unsigned int nbits, offset;
1079 	unsigned long *new, bit;
1080 
1081 	/* 0 is reserved for the context manager */
1082 	offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1083 
1084 	if (!dbitmap_enabled(dmap)) {
1085 		*desc = slow_desc_lookup_olocked(proc, offset);
1086 		return 0;
1087 	}
1088 
1089 	if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1090 		*desc = bit;
1091 		return 0;
1092 	}
1093 
1094 	/*
1095 	 * The dbitmap is full and needs to grow. The proc->outer_lock
1096 	 * is briefly released to allocate the new bitmap safely.
1097 	 */
1098 	nbits = dbitmap_grow_nbits(dmap);
1099 	binder_proc_unlock(proc);
1100 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1101 	binder_proc_lock(proc);
1102 	dbitmap_grow(dmap, new, nbits);
1103 
1104 	return -EAGAIN;
1105 }
1106 
1107 /**
1108  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1109  * @proc:	binder_proc that owns the ref
1110  * @node:	binder_node of target
1111  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1112  *
1113  * Look up the ref for the given node and return it if it exists
1114  *
1115  * If it doesn't exist and the caller provides a newly allocated
1116  * ref, initialize the fields of the newly allocated ref and insert
1117  * into the given proc rb_trees and node refs list.
1118  *
1119  * Return:	the ref for node. It is possible that another thread
1120  *		allocated/initialized the ref first in which case the
1121  *		returned ref would be different than the passed-in
1122  *		new_ref. new_ref must be kfree'd by the caller in
1123  *		this case.
1124  */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1125 static struct binder_ref *binder_get_ref_for_node_olocked(
1126 					struct binder_proc *proc,
1127 					struct binder_node *node,
1128 					struct binder_ref *new_ref)
1129 {
1130 	struct binder_ref *ref;
1131 	struct rb_node *parent;
1132 	struct rb_node **p;
1133 	u32 desc;
1134 
1135 retry:
1136 	p = &proc->refs_by_node.rb_node;
1137 	parent = NULL;
1138 	while (*p) {
1139 		parent = *p;
1140 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1141 
1142 		if (node < ref->node)
1143 			p = &(*p)->rb_left;
1144 		else if (node > ref->node)
1145 			p = &(*p)->rb_right;
1146 		else
1147 			return ref;
1148 	}
1149 	if (!new_ref)
1150 		return NULL;
1151 
1152 	/* might release the proc->outer_lock */
1153 	if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1154 		goto retry;
1155 
1156 	binder_stats_created(BINDER_STAT_REF);
1157 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1158 	new_ref->proc = proc;
1159 	new_ref->node = node;
1160 	rb_link_node(&new_ref->rb_node_node, parent, p);
1161 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1162 
1163 	new_ref->data.desc = desc;
1164 	p = &proc->refs_by_desc.rb_node;
1165 	while (*p) {
1166 		parent = *p;
1167 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1168 
1169 		if (new_ref->data.desc < ref->data.desc)
1170 			p = &(*p)->rb_left;
1171 		else if (new_ref->data.desc > ref->data.desc)
1172 			p = &(*p)->rb_right;
1173 		else
1174 			BUG();
1175 	}
1176 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1177 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1178 
1179 	binder_node_lock(node);
1180 	hlist_add_head(&new_ref->node_entry, &node->refs);
1181 
1182 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1183 		     "%d new ref %d desc %d for node %d\n",
1184 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1185 		      node->debug_id);
1186 	binder_node_unlock(node);
1187 	return new_ref;
1188 }
1189 
binder_cleanup_ref_olocked(struct binder_ref * ref)1190 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1191 {
1192 	struct dbitmap *dmap = &ref->proc->dmap;
1193 	bool delete_node = false;
1194 
1195 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1196 		     "%d delete ref %d desc %d for node %d\n",
1197 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1198 		      ref->node->debug_id);
1199 
1200 	if (dbitmap_enabled(dmap))
1201 		dbitmap_clear_bit(dmap, ref->data.desc);
1202 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1203 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1204 
1205 	binder_node_inner_lock(ref->node);
1206 	if (ref->data.strong)
1207 		binder_dec_node_nilocked(ref->node, 1, 1);
1208 
1209 	hlist_del(&ref->node_entry);
1210 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1211 	binder_node_inner_unlock(ref->node);
1212 	/*
1213 	 * Clear ref->node unless we want the caller to free the node
1214 	 */
1215 	if (!delete_node) {
1216 		/*
1217 		 * The caller uses ref->node to determine
1218 		 * whether the node needs to be freed. Clear
1219 		 * it since the node is still alive.
1220 		 */
1221 		ref->node = NULL;
1222 	}
1223 
1224 	if (ref->death) {
1225 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1226 			     "%d delete ref %d desc %d has death notification\n",
1227 			      ref->proc->pid, ref->data.debug_id,
1228 			      ref->data.desc);
1229 		binder_dequeue_work(ref->proc, &ref->death->work);
1230 		binder_stats_deleted(BINDER_STAT_DEATH);
1231 	}
1232 
1233 	if (ref->freeze) {
1234 		binder_dequeue_work(ref->proc, &ref->freeze->work);
1235 		binder_stats_deleted(BINDER_STAT_FREEZE);
1236 	}
1237 
1238 	binder_stats_deleted(BINDER_STAT_REF);
1239 }
1240 
1241 /**
1242  * binder_inc_ref_olocked() - increment the ref for given handle
1243  * @ref:         ref to be incremented
1244  * @strong:      if true, strong increment, else weak
1245  * @target_list: list to queue node work on
1246  *
1247  * Increment the ref. @ref->proc->outer_lock must be held on entry
1248  *
1249  * Return: 0, if successful, else errno
1250  */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1251 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1252 				  struct list_head *target_list)
1253 {
1254 	int ret;
1255 
1256 	if (strong) {
1257 		if (ref->data.strong == 0) {
1258 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1259 			if (ret)
1260 				return ret;
1261 		}
1262 		ref->data.strong++;
1263 	} else {
1264 		if (ref->data.weak == 0) {
1265 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1266 			if (ret)
1267 				return ret;
1268 		}
1269 		ref->data.weak++;
1270 	}
1271 	return 0;
1272 }
1273 
1274 /**
1275  * binder_dec_ref_olocked() - dec the ref for given handle
1276  * @ref:	ref to be decremented
1277  * @strong:	if true, strong decrement, else weak
1278  *
1279  * Decrement the ref.
1280  *
1281  * Return: %true if ref is cleaned up and ready to be freed.
1282  */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1283 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1284 {
1285 	if (strong) {
1286 		if (ref->data.strong == 0) {
1287 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1288 					  ref->proc->pid, ref->data.debug_id,
1289 					  ref->data.desc, ref->data.strong,
1290 					  ref->data.weak);
1291 			return false;
1292 		}
1293 		ref->data.strong--;
1294 		if (ref->data.strong == 0)
1295 			binder_dec_node(ref->node, strong, 1);
1296 	} else {
1297 		if (ref->data.weak == 0) {
1298 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1299 					  ref->proc->pid, ref->data.debug_id,
1300 					  ref->data.desc, ref->data.strong,
1301 					  ref->data.weak);
1302 			return false;
1303 		}
1304 		ref->data.weak--;
1305 	}
1306 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1307 		binder_cleanup_ref_olocked(ref);
1308 		return true;
1309 	}
1310 	return false;
1311 }
1312 
1313 /**
1314  * binder_get_node_from_ref() - get the node from the given proc/desc
1315  * @proc:	proc containing the ref
1316  * @desc:	the handle associated with the ref
1317  * @need_strong_ref: if true, only return node if ref is strong
1318  * @rdata:	the id/refcount data for the ref
1319  *
1320  * Given a proc and ref handle, return the associated binder_node
1321  *
1322  * Return: a binder_node or NULL if not found or not strong when strong required
1323  */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1324 static struct binder_node *binder_get_node_from_ref(
1325 		struct binder_proc *proc,
1326 		u32 desc, bool need_strong_ref,
1327 		struct binder_ref_data *rdata)
1328 {
1329 	struct binder_node *node;
1330 	struct binder_ref *ref;
1331 
1332 	binder_proc_lock(proc);
1333 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1334 	if (!ref)
1335 		goto err_no_ref;
1336 	node = ref->node;
1337 	/*
1338 	 * Take an implicit reference on the node to ensure
1339 	 * it stays alive until the call to binder_put_node()
1340 	 */
1341 	binder_inc_node_tmpref(node);
1342 	if (rdata)
1343 		*rdata = ref->data;
1344 	binder_proc_unlock(proc);
1345 
1346 	return node;
1347 
1348 err_no_ref:
1349 	binder_proc_unlock(proc);
1350 	return NULL;
1351 }
1352 
1353 /**
1354  * binder_free_ref() - free the binder_ref
1355  * @ref:	ref to free
1356  *
1357  * Free the binder_ref. Free the binder_node indicated by ref->node
1358  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1359  */
binder_free_ref(struct binder_ref * ref)1360 static void binder_free_ref(struct binder_ref *ref)
1361 {
1362 	if (ref->node)
1363 		binder_free_node(ref->node);
1364 	kfree(ref->death);
1365 	kfree(ref->freeze);
1366 	kfree(ref);
1367 }
1368 
1369 /* shrink descriptor bitmap if needed */
try_shrink_dmap(struct binder_proc * proc)1370 static void try_shrink_dmap(struct binder_proc *proc)
1371 {
1372 	unsigned long *new;
1373 	int nbits;
1374 
1375 	binder_proc_lock(proc);
1376 	nbits = dbitmap_shrink_nbits(&proc->dmap);
1377 	binder_proc_unlock(proc);
1378 
1379 	if (!nbits)
1380 		return;
1381 
1382 	new = bitmap_zalloc(nbits, GFP_KERNEL);
1383 	binder_proc_lock(proc);
1384 	dbitmap_shrink(&proc->dmap, new, nbits);
1385 	binder_proc_unlock(proc);
1386 }
1387 
1388 /**
1389  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1390  * @proc:	proc containing the ref
1391  * @desc:	the handle associated with the ref
1392  * @increment:	true=inc reference, false=dec reference
1393  * @strong:	true=strong reference, false=weak reference
1394  * @rdata:	the id/refcount data for the ref
1395  *
1396  * Given a proc and ref handle, increment or decrement the ref
1397  * according to "increment" arg.
1398  *
1399  * Return: 0 if successful, else errno
1400  */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1401 static int binder_update_ref_for_handle(struct binder_proc *proc,
1402 		uint32_t desc, bool increment, bool strong,
1403 		struct binder_ref_data *rdata)
1404 {
1405 	int ret = 0;
1406 	struct binder_ref *ref;
1407 	bool delete_ref = false;
1408 
1409 	binder_proc_lock(proc);
1410 	ref = binder_get_ref_olocked(proc, desc, strong);
1411 	if (!ref) {
1412 		ret = -EINVAL;
1413 		goto err_no_ref;
1414 	}
1415 	if (increment)
1416 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1417 	else
1418 		delete_ref = binder_dec_ref_olocked(ref, strong);
1419 
1420 	if (rdata)
1421 		*rdata = ref->data;
1422 	binder_proc_unlock(proc);
1423 
1424 	if (delete_ref) {
1425 		binder_free_ref(ref);
1426 		try_shrink_dmap(proc);
1427 	}
1428 	return ret;
1429 
1430 err_no_ref:
1431 	binder_proc_unlock(proc);
1432 	return ret;
1433 }
1434 
1435 /**
1436  * binder_dec_ref_for_handle() - dec the ref for given handle
1437  * @proc:	proc containing the ref
1438  * @desc:	the handle associated with the ref
1439  * @strong:	true=strong reference, false=weak reference
1440  * @rdata:	the id/refcount data for the ref
1441  *
1442  * Just calls binder_update_ref_for_handle() to decrement the ref.
1443  *
1444  * Return: 0 if successful, else errno
1445  */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1446 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1447 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1448 {
1449 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1450 }
1451 
1452 
1453 /**
1454  * binder_inc_ref_for_node() - increment the ref for given proc/node
1455  * @proc:	 proc containing the ref
1456  * @node:	 target node
1457  * @strong:	 true=strong reference, false=weak reference
1458  * @target_list: worklist to use if node is incremented
1459  * @rdata:	 the id/refcount data for the ref
1460  *
1461  * Given a proc and node, increment the ref. Create the ref if it
1462  * doesn't already exist
1463  *
1464  * Return: 0 if successful, else errno
1465  */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1466 static int binder_inc_ref_for_node(struct binder_proc *proc,
1467 			struct binder_node *node,
1468 			bool strong,
1469 			struct list_head *target_list,
1470 			struct binder_ref_data *rdata)
1471 {
1472 	struct binder_ref *ref;
1473 	struct binder_ref *new_ref = NULL;
1474 	int ret = 0;
1475 
1476 	binder_proc_lock(proc);
1477 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1478 	if (!ref) {
1479 		binder_proc_unlock(proc);
1480 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1481 		if (!new_ref)
1482 			return -ENOMEM;
1483 		binder_proc_lock(proc);
1484 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1485 	}
1486 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1487 	*rdata = ref->data;
1488 	if (ret && ref == new_ref) {
1489 		/*
1490 		 * Cleanup the failed reference here as the target
1491 		 * could now be dead and have already released its
1492 		 * references by now. Calling on the new reference
1493 		 * with strong=0 and a tmp_refs will not decrement
1494 		 * the node. The new_ref gets kfree'd below.
1495 		 */
1496 		binder_cleanup_ref_olocked(new_ref);
1497 		ref = NULL;
1498 	}
1499 
1500 	binder_proc_unlock(proc);
1501 	if (new_ref && ref != new_ref)
1502 		/*
1503 		 * Another thread created the ref first so
1504 		 * free the one we allocated
1505 		 */
1506 		kfree(new_ref);
1507 	return ret;
1508 }
1509 
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1510 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1511 					   struct binder_transaction *t)
1512 {
1513 	BUG_ON(!target_thread);
1514 	assert_spin_locked(&target_thread->proc->inner_lock);
1515 	BUG_ON(target_thread->transaction_stack != t);
1516 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1517 	target_thread->transaction_stack =
1518 		target_thread->transaction_stack->from_parent;
1519 	t->from = NULL;
1520 }
1521 
1522 /**
1523  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1524  * @thread:	thread to decrement
1525  *
1526  * A thread needs to be kept alive while being used to create or
1527  * handle a transaction. binder_get_txn_from() is used to safely
1528  * extract t->from from a binder_transaction and keep the thread
1529  * indicated by t->from from being freed. When done with that
1530  * binder_thread, this function is called to decrement the
1531  * tmp_ref and free if appropriate (thread has been released
1532  * and no transaction being processed by the driver)
1533  */
binder_thread_dec_tmpref(struct binder_thread * thread)1534 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1535 {
1536 	/*
1537 	 * atomic is used to protect the counter value while
1538 	 * it cannot reach zero or thread->is_dead is false
1539 	 */
1540 	binder_inner_proc_lock(thread->proc);
1541 	atomic_dec(&thread->tmp_ref);
1542 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1543 		binder_inner_proc_unlock(thread->proc);
1544 		binder_free_thread(thread);
1545 		return;
1546 	}
1547 	binder_inner_proc_unlock(thread->proc);
1548 }
1549 
1550 /**
1551  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1552  * @proc:	proc to decrement
1553  *
1554  * A binder_proc needs to be kept alive while being used to create or
1555  * handle a transaction. proc->tmp_ref is incremented when
1556  * creating a new transaction or the binder_proc is currently in-use
1557  * by threads that are being released. When done with the binder_proc,
1558  * this function is called to decrement the counter and free the
1559  * proc if appropriate (proc has been released, all threads have
1560  * been released and not currently in-use to process a transaction).
1561  */
binder_proc_dec_tmpref(struct binder_proc * proc)1562 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1563 {
1564 	binder_inner_proc_lock(proc);
1565 	proc->tmp_ref--;
1566 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1567 			!proc->tmp_ref) {
1568 		binder_inner_proc_unlock(proc);
1569 		binder_free_proc(proc);
1570 		return;
1571 	}
1572 	binder_inner_proc_unlock(proc);
1573 }
1574 
1575 /**
1576  * binder_get_txn_from() - safely extract the "from" thread in transaction
1577  * @t:	binder transaction for t->from
1578  *
1579  * Atomically return the "from" thread and increment the tmp_ref
1580  * count for the thread to ensure it stays alive until
1581  * binder_thread_dec_tmpref() is called.
1582  *
1583  * Return: the value of t->from
1584  */
binder_get_txn_from(struct binder_transaction * t)1585 static struct binder_thread *binder_get_txn_from(
1586 		struct binder_transaction *t)
1587 {
1588 	struct binder_thread *from;
1589 
1590 	guard(spinlock)(&t->lock);
1591 	from = t->from;
1592 	if (from)
1593 		atomic_inc(&from->tmp_ref);
1594 	return from;
1595 }
1596 
1597 /**
1598  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1599  * @t:	binder transaction for t->from
1600  *
1601  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1602  * to guarantee that the thread cannot be released while operating on it.
1603  * The caller must call binder_inner_proc_unlock() to release the inner lock
1604  * as well as call binder_dec_thread_txn() to release the reference.
1605  *
1606  * Return: the value of t->from
1607  */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1608 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1609 		struct binder_transaction *t)
1610 	__acquires(&t->from->proc->inner_lock)
1611 {
1612 	struct binder_thread *from;
1613 
1614 	from = binder_get_txn_from(t);
1615 	if (!from) {
1616 		__acquire(&from->proc->inner_lock);
1617 		return NULL;
1618 	}
1619 	binder_inner_proc_lock(from->proc);
1620 	if (t->from) {
1621 		BUG_ON(from != t->from);
1622 		return from;
1623 	}
1624 	binder_inner_proc_unlock(from->proc);
1625 	__acquire(&from->proc->inner_lock);
1626 	binder_thread_dec_tmpref(from);
1627 	return NULL;
1628 }
1629 
1630 /**
1631  * binder_free_txn_fixups() - free unprocessed fd fixups
1632  * @t:	binder transaction for t->from
1633  *
1634  * If the transaction is being torn down prior to being
1635  * processed by the target process, free all of the
1636  * fd fixups and fput the file structs. It is safe to
1637  * call this function after the fixups have been
1638  * processed -- in that case, the list will be empty.
1639  */
binder_free_txn_fixups(struct binder_transaction * t)1640 static void binder_free_txn_fixups(struct binder_transaction *t)
1641 {
1642 	struct binder_txn_fd_fixup *fixup, *tmp;
1643 
1644 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1645 		fput(fixup->file);
1646 		if (fixup->target_fd >= 0)
1647 			put_unused_fd(fixup->target_fd);
1648 		list_del(&fixup->fixup_entry);
1649 		kfree(fixup);
1650 	}
1651 }
1652 
binder_txn_latency_free(struct binder_transaction * t)1653 static void binder_txn_latency_free(struct binder_transaction *t)
1654 {
1655 	int from_proc, from_thread, to_proc, to_thread;
1656 
1657 	spin_lock(&t->lock);
1658 	from_proc = t->from ? t->from->proc->pid : 0;
1659 	from_thread = t->from ? t->from->pid : 0;
1660 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1661 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1662 	spin_unlock(&t->lock);
1663 
1664 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1665 }
1666 
binder_free_transaction(struct binder_transaction * t)1667 static void binder_free_transaction(struct binder_transaction *t)
1668 {
1669 	struct binder_proc *target_proc = t->to_proc;
1670 
1671 	if (target_proc) {
1672 		binder_inner_proc_lock(target_proc);
1673 		target_proc->outstanding_txns--;
1674 		if (target_proc->outstanding_txns < 0)
1675 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1676 				__func__, target_proc->outstanding_txns);
1677 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1678 			wake_up_interruptible_all(&target_proc->freeze_wait);
1679 		if (t->buffer)
1680 			t->buffer->transaction = NULL;
1681 		binder_inner_proc_unlock(target_proc);
1682 	}
1683 	if (trace_binder_txn_latency_free_enabled())
1684 		binder_txn_latency_free(t);
1685 	/*
1686 	 * If the transaction has no target_proc, then
1687 	 * t->buffer->transaction has already been cleared.
1688 	 */
1689 	binder_free_txn_fixups(t);
1690 	kfree(t);
1691 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1692 }
1693 
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1694 static void binder_send_failed_reply(struct binder_transaction *t,
1695 				     uint32_t error_code)
1696 {
1697 	struct binder_thread *target_thread;
1698 	struct binder_transaction *next;
1699 
1700 	BUG_ON(t->flags & TF_ONE_WAY);
1701 	while (1) {
1702 		target_thread = binder_get_txn_from_and_acq_inner(t);
1703 		if (target_thread) {
1704 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1705 				     "send failed reply for transaction %d to %d:%d\n",
1706 				      t->debug_id,
1707 				      target_thread->proc->pid,
1708 				      target_thread->pid);
1709 
1710 			binder_pop_transaction_ilocked(target_thread, t);
1711 			if (target_thread->reply_error.cmd == BR_OK) {
1712 				target_thread->reply_error.cmd = error_code;
1713 				binder_enqueue_thread_work_ilocked(
1714 					target_thread,
1715 					&target_thread->reply_error.work);
1716 				wake_up_interruptible(&target_thread->wait);
1717 			} else {
1718 				/*
1719 				 * Cannot get here for normal operation, but
1720 				 * we can if multiple synchronous transactions
1721 				 * are sent without blocking for responses.
1722 				 * Just ignore the 2nd error in this case.
1723 				 */
1724 				pr_warn("Unexpected reply error: %u\n",
1725 					target_thread->reply_error.cmd);
1726 			}
1727 			binder_inner_proc_unlock(target_thread->proc);
1728 			binder_thread_dec_tmpref(target_thread);
1729 			binder_free_transaction(t);
1730 			return;
1731 		}
1732 		__release(&target_thread->proc->inner_lock);
1733 		next = t->from_parent;
1734 
1735 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1736 			     "send failed reply for transaction %d, target dead\n",
1737 			     t->debug_id);
1738 
1739 		binder_free_transaction(t);
1740 		if (next == NULL) {
1741 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1742 				     "reply failed, no target thread at root\n");
1743 			return;
1744 		}
1745 		t = next;
1746 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1747 			     "reply failed, no target thread -- retry %d\n",
1748 			      t->debug_id);
1749 	}
1750 }
1751 
1752 /**
1753  * binder_cleanup_transaction() - cleans up undelivered transaction
1754  * @t:		transaction that needs to be cleaned up
1755  * @reason:	reason the transaction wasn't delivered
1756  * @error_code:	error to return to caller (if synchronous call)
1757  */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1758 static void binder_cleanup_transaction(struct binder_transaction *t,
1759 				       const char *reason,
1760 				       uint32_t error_code)
1761 {
1762 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1763 		binder_send_failed_reply(t, error_code);
1764 	} else {
1765 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1766 			"undelivered transaction %d, %s\n",
1767 			t->debug_id, reason);
1768 		binder_free_transaction(t);
1769 	}
1770 }
1771 
1772 /**
1773  * binder_get_object() - gets object and checks for valid metadata
1774  * @proc:	binder_proc owning the buffer
1775  * @u:		sender's user pointer to base of buffer
1776  * @buffer:	binder_buffer that we're parsing.
1777  * @offset:	offset in the @buffer at which to validate an object.
1778  * @object:	struct binder_object to read into
1779  *
1780  * Copy the binder object at the given offset into @object. If @u is
1781  * provided then the copy is from the sender's buffer. If not, then
1782  * it is copied from the target's @buffer.
1783  *
1784  * Return:	If there's a valid metadata object at @offset, the
1785  *		size of that object. Otherwise, it returns zero. The object
1786  *		is read into the struct binder_object pointed to by @object.
1787  */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1788 static size_t binder_get_object(struct binder_proc *proc,
1789 				const void __user *u,
1790 				struct binder_buffer *buffer,
1791 				unsigned long offset,
1792 				struct binder_object *object)
1793 {
1794 	size_t read_size;
1795 	struct binder_object_header *hdr;
1796 	size_t object_size = 0;
1797 
1798 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1799 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1800 	    !IS_ALIGNED(offset, sizeof(u32)))
1801 		return 0;
1802 
1803 	if (u) {
1804 		if (copy_from_user(object, u + offset, read_size))
1805 			return 0;
1806 	} else {
1807 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1808 						  offset, read_size))
1809 			return 0;
1810 	}
1811 
1812 	/* Ok, now see if we read a complete object. */
1813 	hdr = &object->hdr;
1814 	switch (hdr->type) {
1815 	case BINDER_TYPE_BINDER:
1816 	case BINDER_TYPE_WEAK_BINDER:
1817 	case BINDER_TYPE_HANDLE:
1818 	case BINDER_TYPE_WEAK_HANDLE:
1819 		object_size = sizeof(struct flat_binder_object);
1820 		break;
1821 	case BINDER_TYPE_FD:
1822 		object_size = sizeof(struct binder_fd_object);
1823 		break;
1824 	case BINDER_TYPE_PTR:
1825 		object_size = sizeof(struct binder_buffer_object);
1826 		break;
1827 	case BINDER_TYPE_FDA:
1828 		object_size = sizeof(struct binder_fd_array_object);
1829 		break;
1830 	default:
1831 		return 0;
1832 	}
1833 	if (offset <= buffer->data_size - object_size &&
1834 	    buffer->data_size >= object_size)
1835 		return object_size;
1836 	else
1837 		return 0;
1838 }
1839 
1840 /**
1841  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1842  * @proc:	binder_proc owning the buffer
1843  * @b:		binder_buffer containing the object
1844  * @object:	struct binder_object to read into
1845  * @index:	index in offset array at which the binder_buffer_object is
1846  *		located
1847  * @start_offset: points to the start of the offset array
1848  * @object_offsetp: offset of @object read from @b
1849  * @num_valid:	the number of valid offsets in the offset array
1850  *
1851  * Return:	If @index is within the valid range of the offset array
1852  *		described by @start and @num_valid, and if there's a valid
1853  *		binder_buffer_object at the offset found in index @index
1854  *		of the offset array, that object is returned. Otherwise,
1855  *		%NULL is returned.
1856  *		Note that the offset found in index @index itself is not
1857  *		verified; this function assumes that @num_valid elements
1858  *		from @start were previously verified to have valid offsets.
1859  *		If @object_offsetp is non-NULL, then the offset within
1860  *		@b is written to it.
1861  */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1862 static struct binder_buffer_object *binder_validate_ptr(
1863 						struct binder_proc *proc,
1864 						struct binder_buffer *b,
1865 						struct binder_object *object,
1866 						binder_size_t index,
1867 						binder_size_t start_offset,
1868 						binder_size_t *object_offsetp,
1869 						binder_size_t num_valid)
1870 {
1871 	size_t object_size;
1872 	binder_size_t object_offset;
1873 	unsigned long buffer_offset;
1874 
1875 	if (index >= num_valid)
1876 		return NULL;
1877 
1878 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1879 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1880 					  b, buffer_offset,
1881 					  sizeof(object_offset)))
1882 		return NULL;
1883 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1884 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1885 		return NULL;
1886 	if (object_offsetp)
1887 		*object_offsetp = object_offset;
1888 
1889 	return &object->bbo;
1890 }
1891 
1892 /**
1893  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1894  * @proc:		binder_proc owning the buffer
1895  * @b:			transaction buffer
1896  * @objects_start_offset: offset to start of objects buffer
1897  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1898  * @fixup_offset:	start offset in @buffer to fix up
1899  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1900  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1901  *
1902  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1903  *			allowed.
1904  *
1905  * For safety reasons, we only allow fixups inside a buffer to happen
1906  * at increasing offsets; additionally, we only allow fixup on the last
1907  * buffer object that was verified, or one of its parents.
1908  *
1909  * Example of what is allowed:
1910  *
1911  * A
1912  *   B (parent = A, offset = 0)
1913  *   C (parent = A, offset = 16)
1914  *     D (parent = C, offset = 0)
1915  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1916  *
1917  * Examples of what is not allowed:
1918  *
1919  * Decreasing offsets within the same parent:
1920  * A
1921  *   C (parent = A, offset = 16)
1922  *   B (parent = A, offset = 0) // decreasing offset within A
1923  *
1924  * Referring to a parent that wasn't the last object or any of its parents:
1925  * A
1926  *   B (parent = A, offset = 0)
1927  *   C (parent = A, offset = 0)
1928  *   C (parent = A, offset = 16)
1929  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1930  */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1931 static bool binder_validate_fixup(struct binder_proc *proc,
1932 				  struct binder_buffer *b,
1933 				  binder_size_t objects_start_offset,
1934 				  binder_size_t buffer_obj_offset,
1935 				  binder_size_t fixup_offset,
1936 				  binder_size_t last_obj_offset,
1937 				  binder_size_t last_min_offset)
1938 {
1939 	if (!last_obj_offset) {
1940 		/* Nothing to fix up in */
1941 		return false;
1942 	}
1943 
1944 	while (last_obj_offset != buffer_obj_offset) {
1945 		unsigned long buffer_offset;
1946 		struct binder_object last_object;
1947 		struct binder_buffer_object *last_bbo;
1948 		size_t object_size = binder_get_object(proc, NULL, b,
1949 						       last_obj_offset,
1950 						       &last_object);
1951 		if (object_size != sizeof(*last_bbo))
1952 			return false;
1953 
1954 		last_bbo = &last_object.bbo;
1955 		/*
1956 		 * Safe to retrieve the parent of last_obj, since it
1957 		 * was already previously verified by the driver.
1958 		 */
1959 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1960 			return false;
1961 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1962 		buffer_offset = objects_start_offset +
1963 			sizeof(binder_size_t) * last_bbo->parent;
1964 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1965 						  &last_obj_offset,
1966 						  b, buffer_offset,
1967 						  sizeof(last_obj_offset)))
1968 			return false;
1969 	}
1970 	return (fixup_offset >= last_min_offset);
1971 }
1972 
1973 /**
1974  * struct binder_task_work_cb - for deferred close
1975  *
1976  * @twork:                callback_head for task work
1977  * @file:                 file to close
1978  *
1979  * Structure to pass task work to be handled after
1980  * returning from binder_ioctl() via task_work_add().
1981  */
1982 struct binder_task_work_cb {
1983 	struct callback_head twork;
1984 	struct file *file;
1985 };
1986 
1987 /**
1988  * binder_do_fd_close() - close list of file descriptors
1989  * @twork:	callback head for task work
1990  *
1991  * It is not safe to call ksys_close() during the binder_ioctl()
1992  * function if there is a chance that binder's own file descriptor
1993  * might be closed. This is to meet the requirements for using
1994  * fdget() (see comments for __fget_light()). Therefore use
1995  * task_work_add() to schedule the close operation once we have
1996  * returned from binder_ioctl(). This function is a callback
1997  * for that mechanism and does the actual ksys_close() on the
1998  * given file descriptor.
1999  */
binder_do_fd_close(struct callback_head * twork)2000 static void binder_do_fd_close(struct callback_head *twork)
2001 {
2002 	struct binder_task_work_cb *twcb = container_of(twork,
2003 			struct binder_task_work_cb, twork);
2004 
2005 	fput(twcb->file);
2006 	kfree(twcb);
2007 }
2008 
2009 /**
2010  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2011  * @fd:		file-descriptor to close
2012  *
2013  * See comments in binder_do_fd_close(). This function is used to schedule
2014  * a file-descriptor to be closed after returning from binder_ioctl().
2015  */
binder_deferred_fd_close(int fd)2016 static void binder_deferred_fd_close(int fd)
2017 {
2018 	struct binder_task_work_cb *twcb;
2019 
2020 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2021 	if (!twcb)
2022 		return;
2023 	init_task_work(&twcb->twork, binder_do_fd_close);
2024 	twcb->file = file_close_fd(fd);
2025 	if (twcb->file) {
2026 		// pin it until binder_do_fd_close(); see comments there
2027 		get_file(twcb->file);
2028 		filp_close(twcb->file, current->files);
2029 		task_work_add(current, &twcb->twork, TWA_RESUME);
2030 	} else {
2031 		kfree(twcb);
2032 	}
2033 }
2034 
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2035 static void binder_transaction_buffer_release(struct binder_proc *proc,
2036 					      struct binder_thread *thread,
2037 					      struct binder_buffer *buffer,
2038 					      binder_size_t off_end_offset,
2039 					      bool is_failure)
2040 {
2041 	int debug_id = buffer->debug_id;
2042 	binder_size_t off_start_offset, buffer_offset;
2043 
2044 	binder_debug(BINDER_DEBUG_TRANSACTION,
2045 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2046 		     proc->pid, buffer->debug_id,
2047 		     buffer->data_size, buffer->offsets_size,
2048 		     (unsigned long long)off_end_offset);
2049 
2050 	if (buffer->target_node)
2051 		binder_dec_node(buffer->target_node, 1, 0);
2052 
2053 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2054 
2055 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2056 	     buffer_offset += sizeof(binder_size_t)) {
2057 		struct binder_object_header *hdr;
2058 		size_t object_size = 0;
2059 		struct binder_object object;
2060 		binder_size_t object_offset;
2061 
2062 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2063 						   buffer, buffer_offset,
2064 						   sizeof(object_offset)))
2065 			object_size = binder_get_object(proc, NULL, buffer,
2066 							object_offset, &object);
2067 		if (object_size == 0) {
2068 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2069 			       debug_id, (u64)object_offset, buffer->data_size);
2070 			continue;
2071 		}
2072 		hdr = &object.hdr;
2073 		switch (hdr->type) {
2074 		case BINDER_TYPE_BINDER:
2075 		case BINDER_TYPE_WEAK_BINDER: {
2076 			struct flat_binder_object *fp;
2077 			struct binder_node *node;
2078 
2079 			fp = to_flat_binder_object(hdr);
2080 			node = binder_get_node(proc, fp->binder);
2081 			if (node == NULL) {
2082 				pr_err("transaction release %d bad node %016llx\n",
2083 				       debug_id, (u64)fp->binder);
2084 				break;
2085 			}
2086 			binder_debug(BINDER_DEBUG_TRANSACTION,
2087 				     "        node %d u%016llx\n",
2088 				     node->debug_id, (u64)node->ptr);
2089 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2090 					0);
2091 			binder_put_node(node);
2092 		} break;
2093 		case BINDER_TYPE_HANDLE:
2094 		case BINDER_TYPE_WEAK_HANDLE: {
2095 			struct flat_binder_object *fp;
2096 			struct binder_ref_data rdata;
2097 			int ret;
2098 
2099 			fp = to_flat_binder_object(hdr);
2100 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2101 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2102 
2103 			if (ret) {
2104 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2105 				 debug_id, fp->handle, ret);
2106 				break;
2107 			}
2108 			binder_debug(BINDER_DEBUG_TRANSACTION,
2109 				     "        ref %d desc %d\n",
2110 				     rdata.debug_id, rdata.desc);
2111 		} break;
2112 
2113 		case BINDER_TYPE_FD: {
2114 			/*
2115 			 * No need to close the file here since user-space
2116 			 * closes it for successfully delivered
2117 			 * transactions. For transactions that weren't
2118 			 * delivered, the new fd was never allocated so
2119 			 * there is no need to close and the fput on the
2120 			 * file is done when the transaction is torn
2121 			 * down.
2122 			 */
2123 		} break;
2124 		case BINDER_TYPE_PTR:
2125 			/*
2126 			 * Nothing to do here, this will get cleaned up when the
2127 			 * transaction buffer gets freed
2128 			 */
2129 			break;
2130 		case BINDER_TYPE_FDA: {
2131 			struct binder_fd_array_object *fda;
2132 			struct binder_buffer_object *parent;
2133 			struct binder_object ptr_object;
2134 			binder_size_t fda_offset;
2135 			size_t fd_index;
2136 			binder_size_t fd_buf_size;
2137 			binder_size_t num_valid;
2138 
2139 			if (is_failure) {
2140 				/*
2141 				 * The fd fixups have not been applied so no
2142 				 * fds need to be closed.
2143 				 */
2144 				continue;
2145 			}
2146 
2147 			num_valid = (buffer_offset - off_start_offset) /
2148 						sizeof(binder_size_t);
2149 			fda = to_binder_fd_array_object(hdr);
2150 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2151 						     fda->parent,
2152 						     off_start_offset,
2153 						     NULL,
2154 						     num_valid);
2155 			if (!parent) {
2156 				pr_err("transaction release %d bad parent offset\n",
2157 				       debug_id);
2158 				continue;
2159 			}
2160 			fd_buf_size = sizeof(u32) * fda->num_fds;
2161 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2162 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2163 				       debug_id, (u64)fda->num_fds);
2164 				continue;
2165 			}
2166 			if (fd_buf_size > parent->length ||
2167 			    fda->parent_offset > parent->length - fd_buf_size) {
2168 				/* No space for all file descriptors here. */
2169 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2170 				       debug_id, (u64)fda->num_fds);
2171 				continue;
2172 			}
2173 			/*
2174 			 * the source data for binder_buffer_object is visible
2175 			 * to user-space and the @buffer element is the user
2176 			 * pointer to the buffer_object containing the fd_array.
2177 			 * Convert the address to an offset relative to
2178 			 * the base of the transaction buffer.
2179 			 */
2180 			fda_offset = parent->buffer - buffer->user_data +
2181 				fda->parent_offset;
2182 			for (fd_index = 0; fd_index < fda->num_fds;
2183 			     fd_index++) {
2184 				u32 fd;
2185 				int err;
2186 				binder_size_t offset = fda_offset +
2187 					fd_index * sizeof(fd);
2188 
2189 				err = binder_alloc_copy_from_buffer(
2190 						&proc->alloc, &fd, buffer,
2191 						offset, sizeof(fd));
2192 				WARN_ON(err);
2193 				if (!err) {
2194 					binder_deferred_fd_close(fd);
2195 					/*
2196 					 * Need to make sure the thread goes
2197 					 * back to userspace to complete the
2198 					 * deferred close
2199 					 */
2200 					if (thread)
2201 						thread->looper_need_return = true;
2202 				}
2203 			}
2204 		} break;
2205 		default:
2206 			pr_err("transaction release %d bad object type %x\n",
2207 				debug_id, hdr->type);
2208 			break;
2209 		}
2210 	}
2211 }
2212 
2213 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2214 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2215 						struct binder_thread *thread,
2216 						struct binder_buffer *buffer,
2217 						bool is_failure)
2218 {
2219 	binder_size_t off_end_offset;
2220 
2221 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2222 	off_end_offset += buffer->offsets_size;
2223 
2224 	binder_transaction_buffer_release(proc, thread, buffer,
2225 					  off_end_offset, is_failure);
2226 }
2227 
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2228 static int binder_translate_binder(struct flat_binder_object *fp,
2229 				   struct binder_transaction *t,
2230 				   struct binder_thread *thread)
2231 {
2232 	struct binder_node *node;
2233 	struct binder_proc *proc = thread->proc;
2234 	struct binder_proc *target_proc = t->to_proc;
2235 	struct binder_ref_data rdata;
2236 	int ret = 0;
2237 
2238 	node = binder_get_node(proc, fp->binder);
2239 	if (!node) {
2240 		node = binder_new_node(proc, fp);
2241 		if (!node)
2242 			return -ENOMEM;
2243 	}
2244 	if (fp->cookie != node->cookie) {
2245 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2246 				  proc->pid, thread->pid, (u64)fp->binder,
2247 				  node->debug_id, (u64)fp->cookie,
2248 				  (u64)node->cookie);
2249 		ret = -EINVAL;
2250 		goto done;
2251 	}
2252 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2253 		ret = -EPERM;
2254 		goto done;
2255 	}
2256 
2257 	ret = binder_inc_ref_for_node(target_proc, node,
2258 			fp->hdr.type == BINDER_TYPE_BINDER,
2259 			&thread->todo, &rdata);
2260 	if (ret)
2261 		goto done;
2262 
2263 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2264 		fp->hdr.type = BINDER_TYPE_HANDLE;
2265 	else
2266 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2267 	fp->binder = 0;
2268 	fp->handle = rdata.desc;
2269 	fp->cookie = 0;
2270 
2271 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2272 	binder_debug(BINDER_DEBUG_TRANSACTION,
2273 		     "        node %d u%016llx -> ref %d desc %d\n",
2274 		     node->debug_id, (u64)node->ptr,
2275 		     rdata.debug_id, rdata.desc);
2276 done:
2277 	binder_put_node(node);
2278 	return ret;
2279 }
2280 
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2281 static int binder_translate_handle(struct flat_binder_object *fp,
2282 				   struct binder_transaction *t,
2283 				   struct binder_thread *thread)
2284 {
2285 	struct binder_proc *proc = thread->proc;
2286 	struct binder_proc *target_proc = t->to_proc;
2287 	struct binder_node *node;
2288 	struct binder_ref_data src_rdata;
2289 	int ret = 0;
2290 
2291 	node = binder_get_node_from_ref(proc, fp->handle,
2292 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2293 	if (!node) {
2294 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2295 				  proc->pid, thread->pid, fp->handle);
2296 		return -EINVAL;
2297 	}
2298 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2299 		ret = -EPERM;
2300 		goto done;
2301 	}
2302 
2303 	binder_node_lock(node);
2304 	if (node->proc == target_proc) {
2305 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2306 			fp->hdr.type = BINDER_TYPE_BINDER;
2307 		else
2308 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2309 		fp->binder = node->ptr;
2310 		fp->cookie = node->cookie;
2311 		if (node->proc)
2312 			binder_inner_proc_lock(node->proc);
2313 		else
2314 			__acquire(&node->proc->inner_lock);
2315 		binder_inc_node_nilocked(node,
2316 					 fp->hdr.type == BINDER_TYPE_BINDER,
2317 					 0, NULL);
2318 		if (node->proc)
2319 			binder_inner_proc_unlock(node->proc);
2320 		else
2321 			__release(&node->proc->inner_lock);
2322 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2323 		binder_debug(BINDER_DEBUG_TRANSACTION,
2324 			     "        ref %d desc %d -> node %d u%016llx\n",
2325 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2326 			     (u64)node->ptr);
2327 		binder_node_unlock(node);
2328 	} else {
2329 		struct binder_ref_data dest_rdata;
2330 
2331 		binder_node_unlock(node);
2332 		ret = binder_inc_ref_for_node(target_proc, node,
2333 				fp->hdr.type == BINDER_TYPE_HANDLE,
2334 				NULL, &dest_rdata);
2335 		if (ret)
2336 			goto done;
2337 
2338 		fp->binder = 0;
2339 		fp->handle = dest_rdata.desc;
2340 		fp->cookie = 0;
2341 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2342 						    &dest_rdata);
2343 		binder_debug(BINDER_DEBUG_TRANSACTION,
2344 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2345 			     src_rdata.debug_id, src_rdata.desc,
2346 			     dest_rdata.debug_id, dest_rdata.desc,
2347 			     node->debug_id);
2348 	}
2349 done:
2350 	binder_put_node(node);
2351 	return ret;
2352 }
2353 
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2354 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2355 			       struct binder_transaction *t,
2356 			       struct binder_thread *thread,
2357 			       struct binder_transaction *in_reply_to)
2358 {
2359 	struct binder_proc *proc = thread->proc;
2360 	struct binder_proc *target_proc = t->to_proc;
2361 	struct binder_txn_fd_fixup *fixup;
2362 	struct file *file;
2363 	int ret = 0;
2364 	bool target_allows_fd;
2365 
2366 	if (in_reply_to)
2367 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2368 	else
2369 		target_allows_fd = t->buffer->target_node->accept_fds;
2370 	if (!target_allows_fd) {
2371 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2372 				  proc->pid, thread->pid,
2373 				  in_reply_to ? "reply" : "transaction",
2374 				  fd);
2375 		ret = -EPERM;
2376 		goto err_fd_not_accepted;
2377 	}
2378 
2379 	file = fget(fd);
2380 	if (!file) {
2381 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2382 				  proc->pid, thread->pid, fd);
2383 		ret = -EBADF;
2384 		goto err_fget;
2385 	}
2386 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2387 	if (ret < 0) {
2388 		ret = -EPERM;
2389 		goto err_security;
2390 	}
2391 
2392 	/*
2393 	 * Add fixup record for this transaction. The allocation
2394 	 * of the fd in the target needs to be done from a
2395 	 * target thread.
2396 	 */
2397 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2398 	if (!fixup) {
2399 		ret = -ENOMEM;
2400 		goto err_alloc;
2401 	}
2402 	fixup->file = file;
2403 	fixup->offset = fd_offset;
2404 	fixup->target_fd = -1;
2405 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2406 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2407 
2408 	return ret;
2409 
2410 err_alloc:
2411 err_security:
2412 	fput(file);
2413 err_fget:
2414 err_fd_not_accepted:
2415 	return ret;
2416 }
2417 
2418 /**
2419  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2420  * @offset	offset in target buffer to fixup
2421  * @skip_size	bytes to skip in copy (fixup will be written later)
2422  * @fixup_data	data to write at fixup offset
2423  * @node	list node
2424  *
2425  * This is used for the pointer fixup list (pf) which is created and consumed
2426  * during binder_transaction() and is only accessed locally. No
2427  * locking is necessary.
2428  *
2429  * The list is ordered by @offset.
2430  */
2431 struct binder_ptr_fixup {
2432 	binder_size_t offset;
2433 	size_t skip_size;
2434 	binder_uintptr_t fixup_data;
2435 	struct list_head node;
2436 };
2437 
2438 /**
2439  * struct binder_sg_copy - scatter-gather data to be copied
2440  * @offset		offset in target buffer
2441  * @sender_uaddr	user address in source buffer
2442  * @length		bytes to copy
2443  * @node		list node
2444  *
2445  * This is used for the sg copy list (sgc) which is created and consumed
2446  * during binder_transaction() and is only accessed locally. No
2447  * locking is necessary.
2448  *
2449  * The list is ordered by @offset.
2450  */
2451 struct binder_sg_copy {
2452 	binder_size_t offset;
2453 	const void __user *sender_uaddr;
2454 	size_t length;
2455 	struct list_head node;
2456 };
2457 
2458 /**
2459  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2460  * @alloc:	binder_alloc associated with @buffer
2461  * @buffer:	binder buffer in target process
2462  * @sgc_head:	list_head of scatter-gather copy list
2463  * @pf_head:	list_head of pointer fixup list
2464  *
2465  * Processes all elements of @sgc_head, applying fixups from @pf_head
2466  * and copying the scatter-gather data from the source process' user
2467  * buffer to the target's buffer. It is expected that the list creation
2468  * and processing all occurs during binder_transaction() so these lists
2469  * are only accessed in local context.
2470  *
2471  * Return: 0=success, else -errno
2472  */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2473 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2474 					 struct binder_buffer *buffer,
2475 					 struct list_head *sgc_head,
2476 					 struct list_head *pf_head)
2477 {
2478 	int ret = 0;
2479 	struct binder_sg_copy *sgc, *tmpsgc;
2480 	struct binder_ptr_fixup *tmppf;
2481 	struct binder_ptr_fixup *pf =
2482 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2483 					 node);
2484 
2485 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2486 		size_t bytes_copied = 0;
2487 
2488 		while (bytes_copied < sgc->length) {
2489 			size_t copy_size;
2490 			size_t bytes_left = sgc->length - bytes_copied;
2491 			size_t offset = sgc->offset + bytes_copied;
2492 
2493 			/*
2494 			 * We copy up to the fixup (pointed to by pf)
2495 			 */
2496 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2497 				       : bytes_left;
2498 			if (!ret && copy_size)
2499 				ret = binder_alloc_copy_user_to_buffer(
2500 						alloc, buffer,
2501 						offset,
2502 						sgc->sender_uaddr + bytes_copied,
2503 						copy_size);
2504 			bytes_copied += copy_size;
2505 			if (copy_size != bytes_left) {
2506 				BUG_ON(!pf);
2507 				/* we stopped at a fixup offset */
2508 				if (pf->skip_size) {
2509 					/*
2510 					 * we are just skipping. This is for
2511 					 * BINDER_TYPE_FDA where the translated
2512 					 * fds will be fixed up when we get
2513 					 * to target context.
2514 					 */
2515 					bytes_copied += pf->skip_size;
2516 				} else {
2517 					/* apply the fixup indicated by pf */
2518 					if (!ret)
2519 						ret = binder_alloc_copy_to_buffer(
2520 							alloc, buffer,
2521 							pf->offset,
2522 							&pf->fixup_data,
2523 							sizeof(pf->fixup_data));
2524 					bytes_copied += sizeof(pf->fixup_data);
2525 				}
2526 				list_del(&pf->node);
2527 				kfree(pf);
2528 				pf = list_first_entry_or_null(pf_head,
2529 						struct binder_ptr_fixup, node);
2530 			}
2531 		}
2532 		list_del(&sgc->node);
2533 		kfree(sgc);
2534 	}
2535 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2536 		BUG_ON(pf->skip_size == 0);
2537 		list_del(&pf->node);
2538 		kfree(pf);
2539 	}
2540 	BUG_ON(!list_empty(sgc_head));
2541 
2542 	return ret > 0 ? -EINVAL : ret;
2543 }
2544 
2545 /**
2546  * binder_cleanup_deferred_txn_lists() - free specified lists
2547  * @sgc_head:	list_head of scatter-gather copy list
2548  * @pf_head:	list_head of pointer fixup list
2549  *
2550  * Called to clean up @sgc_head and @pf_head if there is an
2551  * error.
2552  */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2553 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2554 					      struct list_head *pf_head)
2555 {
2556 	struct binder_sg_copy *sgc, *tmpsgc;
2557 	struct binder_ptr_fixup *pf, *tmppf;
2558 
2559 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2560 		list_del(&sgc->node);
2561 		kfree(sgc);
2562 	}
2563 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2564 		list_del(&pf->node);
2565 		kfree(pf);
2566 	}
2567 }
2568 
2569 /**
2570  * binder_defer_copy() - queue a scatter-gather buffer for copy
2571  * @sgc_head:		list_head of scatter-gather copy list
2572  * @offset:		binder buffer offset in target process
2573  * @sender_uaddr:	user address in source process
2574  * @length:		bytes to copy
2575  *
2576  * Specify a scatter-gather block to be copied. The actual copy must
2577  * be deferred until all the needed fixups are identified and queued.
2578  * Then the copy and fixups are done together so un-translated values
2579  * from the source are never visible in the target buffer.
2580  *
2581  * We are guaranteed that repeated calls to this function will have
2582  * monotonically increasing @offset values so the list will naturally
2583  * be ordered.
2584  *
2585  * Return: 0=success, else -errno
2586  */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2587 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2588 			     const void __user *sender_uaddr, size_t length)
2589 {
2590 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2591 
2592 	if (!bc)
2593 		return -ENOMEM;
2594 
2595 	bc->offset = offset;
2596 	bc->sender_uaddr = sender_uaddr;
2597 	bc->length = length;
2598 	INIT_LIST_HEAD(&bc->node);
2599 
2600 	/*
2601 	 * We are guaranteed that the deferred copies are in-order
2602 	 * so just add to the tail.
2603 	 */
2604 	list_add_tail(&bc->node, sgc_head);
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * binder_add_fixup() - queue a fixup to be applied to sg copy
2611  * @pf_head:	list_head of binder ptr fixup list
2612  * @offset:	binder buffer offset in target process
2613  * @fixup:	bytes to be copied for fixup
2614  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2615  *
2616  * Add the specified fixup to a list ordered by @offset. When copying
2617  * the scatter-gather buffers, the fixup will be copied instead of
2618  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2619  * will be applied later (in target process context), so we just skip
2620  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2621  * value in @fixup.
2622  *
2623  * This function is called *mostly* in @offset order, but there are
2624  * exceptions. Since out-of-order inserts are relatively uncommon,
2625  * we insert the new element by searching backward from the tail of
2626  * the list.
2627  *
2628  * Return: 0=success, else -errno
2629  */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2630 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2631 			    binder_uintptr_t fixup, size_t skip_size)
2632 {
2633 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2634 	struct binder_ptr_fixup *tmppf;
2635 
2636 	if (!pf)
2637 		return -ENOMEM;
2638 
2639 	pf->offset = offset;
2640 	pf->fixup_data = fixup;
2641 	pf->skip_size = skip_size;
2642 	INIT_LIST_HEAD(&pf->node);
2643 
2644 	/* Fixups are *mostly* added in-order, but there are some
2645 	 * exceptions. Look backwards through list for insertion point.
2646 	 */
2647 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2648 		if (tmppf->offset < pf->offset) {
2649 			list_add(&pf->node, &tmppf->node);
2650 			return 0;
2651 		}
2652 	}
2653 	/*
2654 	 * if we get here, then the new offset is the lowest so
2655 	 * insert at the head
2656 	 */
2657 	list_add(&pf->node, pf_head);
2658 	return 0;
2659 }
2660 
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2661 static int binder_translate_fd_array(struct list_head *pf_head,
2662 				     struct binder_fd_array_object *fda,
2663 				     const void __user *sender_ubuffer,
2664 				     struct binder_buffer_object *parent,
2665 				     struct binder_buffer_object *sender_uparent,
2666 				     struct binder_transaction *t,
2667 				     struct binder_thread *thread,
2668 				     struct binder_transaction *in_reply_to)
2669 {
2670 	binder_size_t fdi, fd_buf_size;
2671 	binder_size_t fda_offset;
2672 	const void __user *sender_ufda_base;
2673 	struct binder_proc *proc = thread->proc;
2674 	int ret;
2675 
2676 	if (fda->num_fds == 0)
2677 		return 0;
2678 
2679 	fd_buf_size = sizeof(u32) * fda->num_fds;
2680 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2681 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2682 				  proc->pid, thread->pid, (u64)fda->num_fds);
2683 		return -EINVAL;
2684 	}
2685 	if (fd_buf_size > parent->length ||
2686 	    fda->parent_offset > parent->length - fd_buf_size) {
2687 		/* No space for all file descriptors here. */
2688 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2689 				  proc->pid, thread->pid, (u64)fda->num_fds);
2690 		return -EINVAL;
2691 	}
2692 	/*
2693 	 * the source data for binder_buffer_object is visible
2694 	 * to user-space and the @buffer element is the user
2695 	 * pointer to the buffer_object containing the fd_array.
2696 	 * Convert the address to an offset relative to
2697 	 * the base of the transaction buffer.
2698 	 */
2699 	fda_offset = parent->buffer - t->buffer->user_data +
2700 		fda->parent_offset;
2701 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2702 				fda->parent_offset;
2703 
2704 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2705 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2706 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2707 				  proc->pid, thread->pid);
2708 		return -EINVAL;
2709 	}
2710 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2711 	if (ret)
2712 		return ret;
2713 
2714 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2715 		u32 fd;
2716 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2717 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2718 
2719 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2720 		if (!ret)
2721 			ret = binder_translate_fd(fd, offset, t, thread,
2722 						  in_reply_to);
2723 		if (ret)
2724 			return ret > 0 ? -EINVAL : ret;
2725 	}
2726 	return 0;
2727 }
2728 
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2729 static int binder_fixup_parent(struct list_head *pf_head,
2730 			       struct binder_transaction *t,
2731 			       struct binder_thread *thread,
2732 			       struct binder_buffer_object *bp,
2733 			       binder_size_t off_start_offset,
2734 			       binder_size_t num_valid,
2735 			       binder_size_t last_fixup_obj_off,
2736 			       binder_size_t last_fixup_min_off)
2737 {
2738 	struct binder_buffer_object *parent;
2739 	struct binder_buffer *b = t->buffer;
2740 	struct binder_proc *proc = thread->proc;
2741 	struct binder_proc *target_proc = t->to_proc;
2742 	struct binder_object object;
2743 	binder_size_t buffer_offset;
2744 	binder_size_t parent_offset;
2745 
2746 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2747 		return 0;
2748 
2749 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2750 				     off_start_offset, &parent_offset,
2751 				     num_valid);
2752 	if (!parent) {
2753 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2754 				  proc->pid, thread->pid);
2755 		return -EINVAL;
2756 	}
2757 
2758 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2759 				   parent_offset, bp->parent_offset,
2760 				   last_fixup_obj_off,
2761 				   last_fixup_min_off)) {
2762 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2763 				  proc->pid, thread->pid);
2764 		return -EINVAL;
2765 	}
2766 
2767 	if (parent->length < sizeof(binder_uintptr_t) ||
2768 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2769 		/* No space for a pointer here! */
2770 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2771 				  proc->pid, thread->pid);
2772 		return -EINVAL;
2773 	}
2774 
2775 	buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2776 
2777 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2778 }
2779 
2780 /**
2781  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2782  * @t1: the pending async txn in the frozen process
2783  * @t2: the new async txn to supersede the outdated pending one
2784  *
2785  * Return:  true if t2 can supersede t1
2786  *          false if t2 can not supersede t1
2787  */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2788 static bool binder_can_update_transaction(struct binder_transaction *t1,
2789 					  struct binder_transaction *t2)
2790 {
2791 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2792 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2793 		return false;
2794 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2795 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2796 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2797 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2798 		return true;
2799 	return false;
2800 }
2801 
2802 /**
2803  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2804  * @t:		 new async transaction
2805  * @target_list: list to find outdated transaction
2806  *
2807  * Return: the outdated transaction if found
2808  *         NULL if no outdated transacton can be found
2809  *
2810  * Requires the proc->inner_lock to be held.
2811  */
2812 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2813 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2814 					 struct list_head *target_list)
2815 {
2816 	struct binder_work *w;
2817 
2818 	list_for_each_entry(w, target_list, entry) {
2819 		struct binder_transaction *t_queued;
2820 
2821 		if (w->type != BINDER_WORK_TRANSACTION)
2822 			continue;
2823 		t_queued = container_of(w, struct binder_transaction, work);
2824 		if (binder_can_update_transaction(t_queued, t))
2825 			return t_queued;
2826 	}
2827 	return NULL;
2828 }
2829 
2830 /**
2831  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2832  * @t:		transaction to send
2833  * @proc:	process to send the transaction to
2834  * @thread:	thread in @proc to send the transaction to (may be NULL)
2835  *
2836  * This function queues a transaction to the specified process. It will try
2837  * to find a thread in the target process to handle the transaction and
2838  * wake it up. If no thread is found, the work is queued to the proc
2839  * waitqueue.
2840  *
2841  * If the @thread parameter is not NULL, the transaction is always queued
2842  * to the waitlist of that specific thread.
2843  *
2844  * Return:	0 if the transaction was successfully queued
2845  *		BR_DEAD_REPLY if the target process or thread is dead
2846  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2847  *			the sync transaction was rejected
2848  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2849  *		and the async transaction was successfully queued
2850  */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2851 static int binder_proc_transaction(struct binder_transaction *t,
2852 				    struct binder_proc *proc,
2853 				    struct binder_thread *thread)
2854 {
2855 	struct binder_node *node = t->buffer->target_node;
2856 	bool oneway = !!(t->flags & TF_ONE_WAY);
2857 	bool pending_async = false;
2858 	struct binder_transaction *t_outdated = NULL;
2859 	bool frozen = false;
2860 
2861 	BUG_ON(!node);
2862 	binder_node_lock(node);
2863 	if (oneway) {
2864 		BUG_ON(thread);
2865 		if (node->has_async_transaction)
2866 			pending_async = true;
2867 		else
2868 			node->has_async_transaction = true;
2869 	}
2870 
2871 	binder_inner_proc_lock(proc);
2872 	if (proc->is_frozen) {
2873 		frozen = true;
2874 		proc->sync_recv |= !oneway;
2875 		proc->async_recv |= oneway;
2876 	}
2877 
2878 	if ((frozen && !oneway) || proc->is_dead ||
2879 			(thread && thread->is_dead)) {
2880 		binder_inner_proc_unlock(proc);
2881 		binder_node_unlock(node);
2882 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2883 	}
2884 
2885 	if (!thread && !pending_async)
2886 		thread = binder_select_thread_ilocked(proc);
2887 
2888 	if (thread) {
2889 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2890 	} else if (!pending_async) {
2891 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2892 	} else {
2893 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2894 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2895 									      &node->async_todo);
2896 			if (t_outdated) {
2897 				binder_debug(BINDER_DEBUG_TRANSACTION,
2898 					     "txn %d supersedes %d\n",
2899 					     t->debug_id, t_outdated->debug_id);
2900 				list_del_init(&t_outdated->work.entry);
2901 				proc->outstanding_txns--;
2902 			}
2903 		}
2904 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2905 	}
2906 
2907 	if (!pending_async)
2908 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2909 
2910 	proc->outstanding_txns++;
2911 	binder_inner_proc_unlock(proc);
2912 	binder_node_unlock(node);
2913 
2914 	/*
2915 	 * To reduce potential contention, free the outdated transaction and
2916 	 * buffer after releasing the locks.
2917 	 */
2918 	if (t_outdated) {
2919 		struct binder_buffer *buffer = t_outdated->buffer;
2920 
2921 		t_outdated->buffer = NULL;
2922 		buffer->transaction = NULL;
2923 		trace_binder_transaction_update_buffer_release(buffer);
2924 		binder_release_entire_buffer(proc, NULL, buffer, false);
2925 		binder_alloc_free_buf(&proc->alloc, buffer);
2926 		kfree(t_outdated);
2927 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2928 	}
2929 
2930 	if (oneway && frozen)
2931 		return BR_TRANSACTION_PENDING_FROZEN;
2932 
2933 	return 0;
2934 }
2935 
2936 /**
2937  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2938  * @node:         struct binder_node for which to get refs
2939  * @procp:        returns @node->proc if valid
2940  * @error:        if no @procp then returns BR_DEAD_REPLY
2941  *
2942  * User-space normally keeps the node alive when creating a transaction
2943  * since it has a reference to the target. The local strong ref keeps it
2944  * alive if the sending process dies before the target process processes
2945  * the transaction. If the source process is malicious or has a reference
2946  * counting bug, relying on the local strong ref can fail.
2947  *
2948  * Since user-space can cause the local strong ref to go away, we also take
2949  * a tmpref on the node to ensure it survives while we are constructing
2950  * the transaction. We also need a tmpref on the proc while we are
2951  * constructing the transaction, so we take that here as well.
2952  *
2953  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2954  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2955  * target proc has died, @error is set to BR_DEAD_REPLY.
2956  */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2957 static struct binder_node *binder_get_node_refs_for_txn(
2958 		struct binder_node *node,
2959 		struct binder_proc **procp,
2960 		uint32_t *error)
2961 {
2962 	struct binder_node *target_node = NULL;
2963 
2964 	binder_node_inner_lock(node);
2965 	if (node->proc) {
2966 		target_node = node;
2967 		binder_inc_node_nilocked(node, 1, 0, NULL);
2968 		binder_inc_node_tmpref_ilocked(node);
2969 		node->proc->tmp_ref++;
2970 		*procp = node->proc;
2971 	} else
2972 		*error = BR_DEAD_REPLY;
2973 	binder_node_inner_unlock(node);
2974 
2975 	return target_node;
2976 }
2977 
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2978 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2979 				      uint32_t command, int32_t param)
2980 {
2981 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2982 
2983 	if (!from) {
2984 		/* annotation for sparse */
2985 		__release(&from->proc->inner_lock);
2986 		return;
2987 	}
2988 
2989 	/* don't override existing errors */
2990 	if (from->ee.command == BR_OK)
2991 		binder_set_extended_error(&from->ee, id, command, param);
2992 	binder_inner_proc_unlock(from->proc);
2993 	binder_thread_dec_tmpref(from);
2994 }
2995 
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2996 static void binder_transaction(struct binder_proc *proc,
2997 			       struct binder_thread *thread,
2998 			       struct binder_transaction_data *tr, int reply,
2999 			       binder_size_t extra_buffers_size)
3000 {
3001 	int ret;
3002 	struct binder_transaction *t;
3003 	struct binder_work *w;
3004 	struct binder_work *tcomplete;
3005 	binder_size_t buffer_offset = 0;
3006 	binder_size_t off_start_offset, off_end_offset;
3007 	binder_size_t off_min;
3008 	binder_size_t sg_buf_offset, sg_buf_end_offset;
3009 	binder_size_t user_offset = 0;
3010 	struct binder_proc *target_proc = NULL;
3011 	struct binder_thread *target_thread = NULL;
3012 	struct binder_node *target_node = NULL;
3013 	struct binder_transaction *in_reply_to = NULL;
3014 	struct binder_transaction_log_entry *e;
3015 	uint32_t return_error = 0;
3016 	uint32_t return_error_param = 0;
3017 	uint32_t return_error_line = 0;
3018 	binder_size_t last_fixup_obj_off = 0;
3019 	binder_size_t last_fixup_min_off = 0;
3020 	struct binder_context *context = proc->context;
3021 	int t_debug_id = atomic_inc_return(&binder_last_id);
3022 	ktime_t t_start_time = ktime_get();
3023 	struct lsm_context lsmctx = { };
3024 	struct list_head sgc_head;
3025 	struct list_head pf_head;
3026 	const void __user *user_buffer = (const void __user *)
3027 				(uintptr_t)tr->data.ptr.buffer;
3028 	INIT_LIST_HEAD(&sgc_head);
3029 	INIT_LIST_HEAD(&pf_head);
3030 
3031 	e = binder_transaction_log_add(&binder_transaction_log);
3032 	e->debug_id = t_debug_id;
3033 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3034 	e->from_proc = proc->pid;
3035 	e->from_thread = thread->pid;
3036 	e->target_handle = tr->target.handle;
3037 	e->data_size = tr->data_size;
3038 	e->offsets_size = tr->offsets_size;
3039 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3040 
3041 	binder_inner_proc_lock(proc);
3042 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3043 	binder_inner_proc_unlock(proc);
3044 
3045 	if (reply) {
3046 		binder_inner_proc_lock(proc);
3047 		in_reply_to = thread->transaction_stack;
3048 		if (in_reply_to == NULL) {
3049 			binder_inner_proc_unlock(proc);
3050 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3051 					  proc->pid, thread->pid);
3052 			return_error = BR_FAILED_REPLY;
3053 			return_error_param = -EPROTO;
3054 			return_error_line = __LINE__;
3055 			goto err_empty_call_stack;
3056 		}
3057 		if (in_reply_to->to_thread != thread) {
3058 			spin_lock(&in_reply_to->lock);
3059 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3060 				proc->pid, thread->pid, in_reply_to->debug_id,
3061 				in_reply_to->to_proc ?
3062 				in_reply_to->to_proc->pid : 0,
3063 				in_reply_to->to_thread ?
3064 				in_reply_to->to_thread->pid : 0);
3065 			spin_unlock(&in_reply_to->lock);
3066 			binder_inner_proc_unlock(proc);
3067 			return_error = BR_FAILED_REPLY;
3068 			return_error_param = -EPROTO;
3069 			return_error_line = __LINE__;
3070 			in_reply_to = NULL;
3071 			goto err_bad_call_stack;
3072 		}
3073 		thread->transaction_stack = in_reply_to->to_parent;
3074 		binder_inner_proc_unlock(proc);
3075 		binder_set_nice(in_reply_to->saved_priority);
3076 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3077 		if (target_thread == NULL) {
3078 			/* annotation for sparse */
3079 			__release(&target_thread->proc->inner_lock);
3080 			binder_txn_error("%d:%d reply target not found\n",
3081 				thread->pid, proc->pid);
3082 			return_error = BR_DEAD_REPLY;
3083 			return_error_line = __LINE__;
3084 			goto err_dead_binder;
3085 		}
3086 		if (target_thread->transaction_stack != in_reply_to) {
3087 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3088 				proc->pid, thread->pid,
3089 				target_thread->transaction_stack ?
3090 				target_thread->transaction_stack->debug_id : 0,
3091 				in_reply_to->debug_id);
3092 			binder_inner_proc_unlock(target_thread->proc);
3093 			return_error = BR_FAILED_REPLY;
3094 			return_error_param = -EPROTO;
3095 			return_error_line = __LINE__;
3096 			in_reply_to = NULL;
3097 			target_thread = NULL;
3098 			goto err_dead_binder;
3099 		}
3100 		target_proc = target_thread->proc;
3101 		target_proc->tmp_ref++;
3102 		binder_inner_proc_unlock(target_thread->proc);
3103 	} else {
3104 		if (tr->target.handle) {
3105 			struct binder_ref *ref;
3106 
3107 			/*
3108 			 * There must already be a strong ref
3109 			 * on this node. If so, do a strong
3110 			 * increment on the node to ensure it
3111 			 * stays alive until the transaction is
3112 			 * done.
3113 			 */
3114 			binder_proc_lock(proc);
3115 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3116 						     true);
3117 			if (ref) {
3118 				target_node = binder_get_node_refs_for_txn(
3119 						ref->node, &target_proc,
3120 						&return_error);
3121 			} else {
3122 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3123 						  proc->pid, thread->pid, tr->target.handle);
3124 				return_error = BR_FAILED_REPLY;
3125 			}
3126 			binder_proc_unlock(proc);
3127 		} else {
3128 			mutex_lock(&context->context_mgr_node_lock);
3129 			target_node = context->binder_context_mgr_node;
3130 			if (target_node)
3131 				target_node = binder_get_node_refs_for_txn(
3132 						target_node, &target_proc,
3133 						&return_error);
3134 			else
3135 				return_error = BR_DEAD_REPLY;
3136 			mutex_unlock(&context->context_mgr_node_lock);
3137 			if (target_node && target_proc->pid == proc->pid) {
3138 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3139 						  proc->pid, thread->pid);
3140 				return_error = BR_FAILED_REPLY;
3141 				return_error_param = -EINVAL;
3142 				return_error_line = __LINE__;
3143 				goto err_invalid_target_handle;
3144 			}
3145 		}
3146 		if (!target_node) {
3147 			binder_txn_error("%d:%d cannot find target node\n",
3148 					 proc->pid, thread->pid);
3149 			/* return_error is set above */
3150 			return_error_param = -EINVAL;
3151 			return_error_line = __LINE__;
3152 			goto err_dead_binder;
3153 		}
3154 		e->to_node = target_node->debug_id;
3155 		if (WARN_ON(proc == target_proc)) {
3156 			binder_txn_error("%d:%d self transactions not allowed\n",
3157 				thread->pid, proc->pid);
3158 			return_error = BR_FAILED_REPLY;
3159 			return_error_param = -EINVAL;
3160 			return_error_line = __LINE__;
3161 			goto err_invalid_target_handle;
3162 		}
3163 		if (security_binder_transaction(proc->cred,
3164 						target_proc->cred) < 0) {
3165 			binder_txn_error("%d:%d transaction credentials failed\n",
3166 				thread->pid, proc->pid);
3167 			return_error = BR_FAILED_REPLY;
3168 			return_error_param = -EPERM;
3169 			return_error_line = __LINE__;
3170 			goto err_invalid_target_handle;
3171 		}
3172 		binder_inner_proc_lock(proc);
3173 
3174 		w = list_first_entry_or_null(&thread->todo,
3175 					     struct binder_work, entry);
3176 		if (!(tr->flags & TF_ONE_WAY) && w &&
3177 		    w->type == BINDER_WORK_TRANSACTION) {
3178 			/*
3179 			 * Do not allow new outgoing transaction from a
3180 			 * thread that has a transaction at the head of
3181 			 * its todo list. Only need to check the head
3182 			 * because binder_select_thread_ilocked picks a
3183 			 * thread from proc->waiting_threads to enqueue
3184 			 * the transaction, and nothing is queued to the
3185 			 * todo list while the thread is on waiting_threads.
3186 			 */
3187 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3188 					  proc->pid, thread->pid);
3189 			binder_inner_proc_unlock(proc);
3190 			return_error = BR_FAILED_REPLY;
3191 			return_error_param = -EPROTO;
3192 			return_error_line = __LINE__;
3193 			goto err_bad_todo_list;
3194 		}
3195 
3196 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3197 			struct binder_transaction *tmp;
3198 
3199 			tmp = thread->transaction_stack;
3200 			if (tmp->to_thread != thread) {
3201 				spin_lock(&tmp->lock);
3202 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3203 					proc->pid, thread->pid, tmp->debug_id,
3204 					tmp->to_proc ? tmp->to_proc->pid : 0,
3205 					tmp->to_thread ?
3206 					tmp->to_thread->pid : 0);
3207 				spin_unlock(&tmp->lock);
3208 				binder_inner_proc_unlock(proc);
3209 				return_error = BR_FAILED_REPLY;
3210 				return_error_param = -EPROTO;
3211 				return_error_line = __LINE__;
3212 				goto err_bad_call_stack;
3213 			}
3214 			while (tmp) {
3215 				struct binder_thread *from;
3216 
3217 				spin_lock(&tmp->lock);
3218 				from = tmp->from;
3219 				if (from && from->proc == target_proc) {
3220 					atomic_inc(&from->tmp_ref);
3221 					target_thread = from;
3222 					spin_unlock(&tmp->lock);
3223 					break;
3224 				}
3225 				spin_unlock(&tmp->lock);
3226 				tmp = tmp->from_parent;
3227 			}
3228 		}
3229 		binder_inner_proc_unlock(proc);
3230 	}
3231 	if (target_thread)
3232 		e->to_thread = target_thread->pid;
3233 	e->to_proc = target_proc->pid;
3234 
3235 	/* TODO: reuse incoming transaction for reply */
3236 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3237 	if (t == NULL) {
3238 		binder_txn_error("%d:%d cannot allocate transaction\n",
3239 			thread->pid, proc->pid);
3240 		return_error = BR_FAILED_REPLY;
3241 		return_error_param = -ENOMEM;
3242 		return_error_line = __LINE__;
3243 		goto err_alloc_t_failed;
3244 	}
3245 	INIT_LIST_HEAD(&t->fd_fixups);
3246 	binder_stats_created(BINDER_STAT_TRANSACTION);
3247 	spin_lock_init(&t->lock);
3248 
3249 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3250 	if (tcomplete == NULL) {
3251 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3252 			thread->pid, proc->pid);
3253 		return_error = BR_FAILED_REPLY;
3254 		return_error_param = -ENOMEM;
3255 		return_error_line = __LINE__;
3256 		goto err_alloc_tcomplete_failed;
3257 	}
3258 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3259 
3260 	t->debug_id = t_debug_id;
3261 	t->start_time = t_start_time;
3262 
3263 	if (reply)
3264 		binder_debug(BINDER_DEBUG_TRANSACTION,
3265 			     "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3266 			     proc->pid, thread->pid, t->debug_id,
3267 			     target_proc->pid, target_thread->pid,
3268 			     (u64)tr->data_size, (u64)tr->offsets_size,
3269 			     (u64)extra_buffers_size);
3270 	else
3271 		binder_debug(BINDER_DEBUG_TRANSACTION,
3272 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3273 			     proc->pid, thread->pid, t->debug_id,
3274 			     target_proc->pid, target_node->debug_id,
3275 			     (u64)tr->data_size, (u64)tr->offsets_size,
3276 			     (u64)extra_buffers_size);
3277 
3278 	if (!reply && !(tr->flags & TF_ONE_WAY))
3279 		t->from = thread;
3280 	else
3281 		t->from = NULL;
3282 	t->from_pid = proc->pid;
3283 	t->from_tid = thread->pid;
3284 	t->sender_euid = task_euid(proc->tsk);
3285 	t->to_proc = target_proc;
3286 	t->to_thread = target_thread;
3287 	t->code = tr->code;
3288 	t->flags = tr->flags;
3289 	t->priority = task_nice(current);
3290 
3291 	if (target_node && target_node->txn_security_ctx) {
3292 		u32 secid;
3293 		size_t added_size;
3294 
3295 		security_cred_getsecid(proc->cred, &secid);
3296 		ret = security_secid_to_secctx(secid, &lsmctx);
3297 		if (ret < 0) {
3298 			binder_txn_error("%d:%d failed to get security context\n",
3299 				thread->pid, proc->pid);
3300 			return_error = BR_FAILED_REPLY;
3301 			return_error_param = ret;
3302 			return_error_line = __LINE__;
3303 			goto err_get_secctx_failed;
3304 		}
3305 		added_size = ALIGN(lsmctx.len, sizeof(u64));
3306 		extra_buffers_size += added_size;
3307 		if (extra_buffers_size < added_size) {
3308 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3309 				thread->pid, proc->pid);
3310 			return_error = BR_FAILED_REPLY;
3311 			return_error_param = -EINVAL;
3312 			return_error_line = __LINE__;
3313 			goto err_bad_extra_size;
3314 		}
3315 	}
3316 
3317 	trace_binder_transaction(reply, t, target_node);
3318 
3319 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3320 		tr->offsets_size, extra_buffers_size,
3321 		!reply && (t->flags & TF_ONE_WAY));
3322 	if (IS_ERR(t->buffer)) {
3323 		char *s;
3324 
3325 		ret = PTR_ERR(t->buffer);
3326 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3327 			: (ret == -ENOSPC) ? ": no space left"
3328 			: (ret == -ENOMEM) ? ": memory allocation failed"
3329 			: "";
3330 		binder_txn_error("cannot allocate buffer%s", s);
3331 
3332 		return_error_param = PTR_ERR(t->buffer);
3333 		return_error = return_error_param == -ESRCH ?
3334 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3335 		return_error_line = __LINE__;
3336 		t->buffer = NULL;
3337 		goto err_binder_alloc_buf_failed;
3338 	}
3339 	if (lsmctx.context) {
3340 		int err;
3341 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3342 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3343 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3344 				    ALIGN(lsmctx.len, sizeof(u64));
3345 
3346 		t->security_ctx = t->buffer->user_data + buf_offset;
3347 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3348 						  t->buffer, buf_offset,
3349 						  lsmctx.context, lsmctx.len);
3350 		if (err) {
3351 			t->security_ctx = 0;
3352 			WARN_ON(1);
3353 		}
3354 		security_release_secctx(&lsmctx);
3355 		lsmctx.context = NULL;
3356 	}
3357 	t->buffer->debug_id = t->debug_id;
3358 	t->buffer->transaction = t;
3359 	t->buffer->target_node = target_node;
3360 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3361 	trace_binder_transaction_alloc_buf(t->buffer);
3362 
3363 	if (binder_alloc_copy_user_to_buffer(
3364 				&target_proc->alloc,
3365 				t->buffer,
3366 				ALIGN(tr->data_size, sizeof(void *)),
3367 				(const void __user *)
3368 					(uintptr_t)tr->data.ptr.offsets,
3369 				tr->offsets_size)) {
3370 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3371 				proc->pid, thread->pid);
3372 		return_error = BR_FAILED_REPLY;
3373 		return_error_param = -EFAULT;
3374 		return_error_line = __LINE__;
3375 		goto err_copy_data_failed;
3376 	}
3377 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3378 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3379 				proc->pid, thread->pid, (u64)tr->offsets_size);
3380 		return_error = BR_FAILED_REPLY;
3381 		return_error_param = -EINVAL;
3382 		return_error_line = __LINE__;
3383 		goto err_bad_offset;
3384 	}
3385 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3386 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3387 				  proc->pid, thread->pid,
3388 				  (u64)extra_buffers_size);
3389 		return_error = BR_FAILED_REPLY;
3390 		return_error_param = -EINVAL;
3391 		return_error_line = __LINE__;
3392 		goto err_bad_offset;
3393 	}
3394 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3395 	buffer_offset = off_start_offset;
3396 	off_end_offset = off_start_offset + tr->offsets_size;
3397 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3398 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3399 		ALIGN(lsmctx.len, sizeof(u64));
3400 	off_min = 0;
3401 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3402 	     buffer_offset += sizeof(binder_size_t)) {
3403 		struct binder_object_header *hdr;
3404 		size_t object_size;
3405 		struct binder_object object;
3406 		binder_size_t object_offset;
3407 		binder_size_t copy_size;
3408 
3409 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3410 						  &object_offset,
3411 						  t->buffer,
3412 						  buffer_offset,
3413 						  sizeof(object_offset))) {
3414 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3415 				thread->pid, proc->pid);
3416 			return_error = BR_FAILED_REPLY;
3417 			return_error_param = -EINVAL;
3418 			return_error_line = __LINE__;
3419 			goto err_bad_offset;
3420 		}
3421 
3422 		/*
3423 		 * Copy the source user buffer up to the next object
3424 		 * that will be processed.
3425 		 */
3426 		copy_size = object_offset - user_offset;
3427 		if (copy_size && (user_offset > object_offset ||
3428 				object_offset > tr->data_size ||
3429 				binder_alloc_copy_user_to_buffer(
3430 					&target_proc->alloc,
3431 					t->buffer, user_offset,
3432 					user_buffer + user_offset,
3433 					copy_size))) {
3434 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3435 					proc->pid, thread->pid);
3436 			return_error = BR_FAILED_REPLY;
3437 			return_error_param = -EFAULT;
3438 			return_error_line = __LINE__;
3439 			goto err_copy_data_failed;
3440 		}
3441 		object_size = binder_get_object(target_proc, user_buffer,
3442 				t->buffer, object_offset, &object);
3443 		if (object_size == 0 || object_offset < off_min) {
3444 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3445 					  proc->pid, thread->pid,
3446 					  (u64)object_offset,
3447 					  (u64)off_min,
3448 					  (u64)t->buffer->data_size);
3449 			return_error = BR_FAILED_REPLY;
3450 			return_error_param = -EINVAL;
3451 			return_error_line = __LINE__;
3452 			goto err_bad_offset;
3453 		}
3454 		/*
3455 		 * Set offset to the next buffer fragment to be
3456 		 * copied
3457 		 */
3458 		user_offset = object_offset + object_size;
3459 
3460 		hdr = &object.hdr;
3461 		off_min = object_offset + object_size;
3462 		switch (hdr->type) {
3463 		case BINDER_TYPE_BINDER:
3464 		case BINDER_TYPE_WEAK_BINDER: {
3465 			struct flat_binder_object *fp;
3466 
3467 			fp = to_flat_binder_object(hdr);
3468 			ret = binder_translate_binder(fp, t, thread);
3469 
3470 			if (ret < 0 ||
3471 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3472 							t->buffer,
3473 							object_offset,
3474 							fp, sizeof(*fp))) {
3475 				binder_txn_error("%d:%d translate binder failed\n",
3476 					thread->pid, proc->pid);
3477 				return_error = BR_FAILED_REPLY;
3478 				return_error_param = ret;
3479 				return_error_line = __LINE__;
3480 				goto err_translate_failed;
3481 			}
3482 		} break;
3483 		case BINDER_TYPE_HANDLE:
3484 		case BINDER_TYPE_WEAK_HANDLE: {
3485 			struct flat_binder_object *fp;
3486 
3487 			fp = to_flat_binder_object(hdr);
3488 			ret = binder_translate_handle(fp, t, thread);
3489 			if (ret < 0 ||
3490 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3491 							t->buffer,
3492 							object_offset,
3493 							fp, sizeof(*fp))) {
3494 				binder_txn_error("%d:%d translate handle failed\n",
3495 					thread->pid, proc->pid);
3496 				return_error = BR_FAILED_REPLY;
3497 				return_error_param = ret;
3498 				return_error_line = __LINE__;
3499 				goto err_translate_failed;
3500 			}
3501 		} break;
3502 
3503 		case BINDER_TYPE_FD: {
3504 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3505 			binder_size_t fd_offset = object_offset +
3506 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3507 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3508 						      thread, in_reply_to);
3509 
3510 			fp->pad_binder = 0;
3511 			if (ret < 0 ||
3512 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3513 							t->buffer,
3514 							object_offset,
3515 							fp, sizeof(*fp))) {
3516 				binder_txn_error("%d:%d translate fd failed\n",
3517 					thread->pid, proc->pid);
3518 				return_error = BR_FAILED_REPLY;
3519 				return_error_param = ret;
3520 				return_error_line = __LINE__;
3521 				goto err_translate_failed;
3522 			}
3523 		} break;
3524 		case BINDER_TYPE_FDA: {
3525 			struct binder_object ptr_object;
3526 			binder_size_t parent_offset;
3527 			struct binder_object user_object;
3528 			size_t user_parent_size;
3529 			struct binder_fd_array_object *fda =
3530 				to_binder_fd_array_object(hdr);
3531 			size_t num_valid = (buffer_offset - off_start_offset) /
3532 						sizeof(binder_size_t);
3533 			struct binder_buffer_object *parent =
3534 				binder_validate_ptr(target_proc, t->buffer,
3535 						    &ptr_object, fda->parent,
3536 						    off_start_offset,
3537 						    &parent_offset,
3538 						    num_valid);
3539 			if (!parent) {
3540 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3541 						  proc->pid, thread->pid);
3542 				return_error = BR_FAILED_REPLY;
3543 				return_error_param = -EINVAL;
3544 				return_error_line = __LINE__;
3545 				goto err_bad_parent;
3546 			}
3547 			if (!binder_validate_fixup(target_proc, t->buffer,
3548 						   off_start_offset,
3549 						   parent_offset,
3550 						   fda->parent_offset,
3551 						   last_fixup_obj_off,
3552 						   last_fixup_min_off)) {
3553 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3554 						  proc->pid, thread->pid);
3555 				return_error = BR_FAILED_REPLY;
3556 				return_error_param = -EINVAL;
3557 				return_error_line = __LINE__;
3558 				goto err_bad_parent;
3559 			}
3560 			/*
3561 			 * We need to read the user version of the parent
3562 			 * object to get the original user offset
3563 			 */
3564 			user_parent_size =
3565 				binder_get_object(proc, user_buffer, t->buffer,
3566 						  parent_offset, &user_object);
3567 			if (user_parent_size != sizeof(user_object.bbo)) {
3568 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3569 						  proc->pid, thread->pid,
3570 						  user_parent_size,
3571 						  sizeof(user_object.bbo));
3572 				return_error = BR_FAILED_REPLY;
3573 				return_error_param = -EINVAL;
3574 				return_error_line = __LINE__;
3575 				goto err_bad_parent;
3576 			}
3577 			ret = binder_translate_fd_array(&pf_head, fda,
3578 							user_buffer, parent,
3579 							&user_object.bbo, t,
3580 							thread, in_reply_to);
3581 			if (!ret)
3582 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3583 								  t->buffer,
3584 								  object_offset,
3585 								  fda, sizeof(*fda));
3586 			if (ret) {
3587 				binder_txn_error("%d:%d translate fd array failed\n",
3588 					thread->pid, proc->pid);
3589 				return_error = BR_FAILED_REPLY;
3590 				return_error_param = ret > 0 ? -EINVAL : ret;
3591 				return_error_line = __LINE__;
3592 				goto err_translate_failed;
3593 			}
3594 			last_fixup_obj_off = parent_offset;
3595 			last_fixup_min_off =
3596 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3597 		} break;
3598 		case BINDER_TYPE_PTR: {
3599 			struct binder_buffer_object *bp =
3600 				to_binder_buffer_object(hdr);
3601 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3602 			size_t num_valid;
3603 
3604 			if (bp->length > buf_left) {
3605 				binder_user_error("%d:%d got transaction with too large buffer\n",
3606 						  proc->pid, thread->pid);
3607 				return_error = BR_FAILED_REPLY;
3608 				return_error_param = -EINVAL;
3609 				return_error_line = __LINE__;
3610 				goto err_bad_offset;
3611 			}
3612 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3613 				(const void __user *)(uintptr_t)bp->buffer,
3614 				bp->length);
3615 			if (ret) {
3616 				binder_txn_error("%d:%d deferred copy failed\n",
3617 					thread->pid, proc->pid);
3618 				return_error = BR_FAILED_REPLY;
3619 				return_error_param = ret;
3620 				return_error_line = __LINE__;
3621 				goto err_translate_failed;
3622 			}
3623 			/* Fixup buffer pointer to target proc address space */
3624 			bp->buffer = t->buffer->user_data + sg_buf_offset;
3625 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3626 
3627 			num_valid = (buffer_offset - off_start_offset) /
3628 					sizeof(binder_size_t);
3629 			ret = binder_fixup_parent(&pf_head, t,
3630 						  thread, bp,
3631 						  off_start_offset,
3632 						  num_valid,
3633 						  last_fixup_obj_off,
3634 						  last_fixup_min_off);
3635 			if (ret < 0 ||
3636 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3637 							t->buffer,
3638 							object_offset,
3639 							bp, sizeof(*bp))) {
3640 				binder_txn_error("%d:%d failed to fixup parent\n",
3641 					thread->pid, proc->pid);
3642 				return_error = BR_FAILED_REPLY;
3643 				return_error_param = ret;
3644 				return_error_line = __LINE__;
3645 				goto err_translate_failed;
3646 			}
3647 			last_fixup_obj_off = object_offset;
3648 			last_fixup_min_off = 0;
3649 		} break;
3650 		default:
3651 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3652 				proc->pid, thread->pid, hdr->type);
3653 			return_error = BR_FAILED_REPLY;
3654 			return_error_param = -EINVAL;
3655 			return_error_line = __LINE__;
3656 			goto err_bad_object_type;
3657 		}
3658 	}
3659 	/* Done processing objects, copy the rest of the buffer */
3660 	if (binder_alloc_copy_user_to_buffer(
3661 				&target_proc->alloc,
3662 				t->buffer, user_offset,
3663 				user_buffer + user_offset,
3664 				tr->data_size - user_offset)) {
3665 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3666 				proc->pid, thread->pid);
3667 		return_error = BR_FAILED_REPLY;
3668 		return_error_param = -EFAULT;
3669 		return_error_line = __LINE__;
3670 		goto err_copy_data_failed;
3671 	}
3672 
3673 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3674 					    &sgc_head, &pf_head);
3675 	if (ret) {
3676 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3677 				  proc->pid, thread->pid);
3678 		return_error = BR_FAILED_REPLY;
3679 		return_error_param = ret;
3680 		return_error_line = __LINE__;
3681 		goto err_copy_data_failed;
3682 	}
3683 	if (t->buffer->oneway_spam_suspect)
3684 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3685 	else
3686 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3687 	t->work.type = BINDER_WORK_TRANSACTION;
3688 
3689 	if (reply) {
3690 		binder_enqueue_thread_work(thread, tcomplete);
3691 		binder_inner_proc_lock(target_proc);
3692 		if (target_thread->is_dead) {
3693 			return_error = BR_DEAD_REPLY;
3694 			binder_inner_proc_unlock(target_proc);
3695 			goto err_dead_proc_or_thread;
3696 		}
3697 		BUG_ON(t->buffer->async_transaction != 0);
3698 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3699 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3700 		target_proc->outstanding_txns++;
3701 		binder_inner_proc_unlock(target_proc);
3702 		wake_up_interruptible_sync(&target_thread->wait);
3703 		binder_free_transaction(in_reply_to);
3704 	} else if (!(t->flags & TF_ONE_WAY)) {
3705 		BUG_ON(t->buffer->async_transaction != 0);
3706 		binder_inner_proc_lock(proc);
3707 		/*
3708 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3709 		 * userspace immediately; this allows the target process to
3710 		 * immediately start processing this transaction, reducing
3711 		 * latency. We will then return the TRANSACTION_COMPLETE when
3712 		 * the target replies (or there is an error).
3713 		 */
3714 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3715 		t->need_reply = 1;
3716 		t->from_parent = thread->transaction_stack;
3717 		thread->transaction_stack = t;
3718 		binder_inner_proc_unlock(proc);
3719 		return_error = binder_proc_transaction(t,
3720 				target_proc, target_thread);
3721 		if (return_error) {
3722 			binder_inner_proc_lock(proc);
3723 			binder_pop_transaction_ilocked(thread, t);
3724 			binder_inner_proc_unlock(proc);
3725 			goto err_dead_proc_or_thread;
3726 		}
3727 	} else {
3728 		BUG_ON(target_node == NULL);
3729 		BUG_ON(t->buffer->async_transaction != 1);
3730 		return_error = binder_proc_transaction(t, target_proc, NULL);
3731 		/*
3732 		 * Let the caller know when async transaction reaches a frozen
3733 		 * process and is put in a pending queue, waiting for the target
3734 		 * process to be unfrozen.
3735 		 */
3736 		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3737 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3738 		binder_enqueue_thread_work(thread, tcomplete);
3739 		if (return_error &&
3740 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3741 			goto err_dead_proc_or_thread;
3742 	}
3743 	if (target_thread)
3744 		binder_thread_dec_tmpref(target_thread);
3745 	binder_proc_dec_tmpref(target_proc);
3746 	if (target_node)
3747 		binder_dec_node_tmpref(target_node);
3748 	/*
3749 	 * write barrier to synchronize with initialization
3750 	 * of log entry
3751 	 */
3752 	smp_wmb();
3753 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3754 	return;
3755 
3756 err_dead_proc_or_thread:
3757 	binder_txn_error("%d:%d dead process or thread\n",
3758 		thread->pid, proc->pid);
3759 	return_error_line = __LINE__;
3760 	binder_dequeue_work(proc, tcomplete);
3761 err_translate_failed:
3762 err_bad_object_type:
3763 err_bad_offset:
3764 err_bad_parent:
3765 err_copy_data_failed:
3766 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3767 	binder_free_txn_fixups(t);
3768 	trace_binder_transaction_failed_buffer_release(t->buffer);
3769 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3770 					  buffer_offset, true);
3771 	if (target_node)
3772 		binder_dec_node_tmpref(target_node);
3773 	target_node = NULL;
3774 	t->buffer->transaction = NULL;
3775 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3776 err_binder_alloc_buf_failed:
3777 err_bad_extra_size:
3778 	if (lsmctx.context)
3779 		security_release_secctx(&lsmctx);
3780 err_get_secctx_failed:
3781 	kfree(tcomplete);
3782 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3783 err_alloc_tcomplete_failed:
3784 	if (trace_binder_txn_latency_free_enabled())
3785 		binder_txn_latency_free(t);
3786 	kfree(t);
3787 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3788 err_alloc_t_failed:
3789 err_bad_todo_list:
3790 err_bad_call_stack:
3791 err_empty_call_stack:
3792 err_dead_binder:
3793 err_invalid_target_handle:
3794 	if (target_node) {
3795 		binder_dec_node(target_node, 1, 0);
3796 		binder_dec_node_tmpref(target_node);
3797 	}
3798 
3799 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3800 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3801 		     proc->pid, thread->pid, reply ? "reply" :
3802 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3803 		     target_proc ? target_proc->pid : 0,
3804 		     target_thread ? target_thread->pid : 0,
3805 		     t_debug_id, return_error, return_error_param,
3806 		     tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3807 		     return_error_line);
3808 
3809 	if (target_thread)
3810 		binder_thread_dec_tmpref(target_thread);
3811 	if (target_proc)
3812 		binder_proc_dec_tmpref(target_proc);
3813 
3814 	{
3815 		struct binder_transaction_log_entry *fe;
3816 
3817 		e->return_error = return_error;
3818 		e->return_error_param = return_error_param;
3819 		e->return_error_line = return_error_line;
3820 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3821 		*fe = *e;
3822 		/*
3823 		 * write barrier to synchronize with initialization
3824 		 * of log entry
3825 		 */
3826 		smp_wmb();
3827 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3828 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3829 	}
3830 
3831 	BUG_ON(thread->return_error.cmd != BR_OK);
3832 	if (in_reply_to) {
3833 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3834 				return_error, return_error_param);
3835 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3836 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3837 		binder_send_failed_reply(in_reply_to, return_error);
3838 	} else {
3839 		binder_inner_proc_lock(proc);
3840 		binder_set_extended_error(&thread->ee, t_debug_id,
3841 				return_error, return_error_param);
3842 		binder_inner_proc_unlock(proc);
3843 		thread->return_error.cmd = return_error;
3844 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3845 	}
3846 }
3847 
3848 static int
binder_request_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3849 binder_request_freeze_notification(struct binder_proc *proc,
3850 				   struct binder_thread *thread,
3851 				   struct binder_handle_cookie *handle_cookie)
3852 {
3853 	struct binder_ref_freeze *freeze;
3854 	struct binder_ref *ref;
3855 
3856 	freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3857 	if (!freeze)
3858 		return -ENOMEM;
3859 	binder_proc_lock(proc);
3860 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3861 	if (!ref) {
3862 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3863 				  proc->pid, thread->pid, handle_cookie->handle);
3864 		binder_proc_unlock(proc);
3865 		kfree(freeze);
3866 		return -EINVAL;
3867 	}
3868 
3869 	binder_node_lock(ref->node);
3870 	if (ref->freeze) {
3871 		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3872 				  proc->pid, thread->pid);
3873 		binder_node_unlock(ref->node);
3874 		binder_proc_unlock(proc);
3875 		kfree(freeze);
3876 		return -EINVAL;
3877 	}
3878 
3879 	binder_stats_created(BINDER_STAT_FREEZE);
3880 	INIT_LIST_HEAD(&freeze->work.entry);
3881 	freeze->cookie = handle_cookie->cookie;
3882 	freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3883 	ref->freeze = freeze;
3884 
3885 	if (ref->node->proc) {
3886 		binder_inner_proc_lock(ref->node->proc);
3887 		freeze->is_frozen = ref->node->proc->is_frozen;
3888 		binder_inner_proc_unlock(ref->node->proc);
3889 
3890 		binder_inner_proc_lock(proc);
3891 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3892 		binder_wakeup_proc_ilocked(proc);
3893 		binder_inner_proc_unlock(proc);
3894 	}
3895 
3896 	binder_node_unlock(ref->node);
3897 	binder_proc_unlock(proc);
3898 	return 0;
3899 }
3900 
3901 static int
binder_clear_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3902 binder_clear_freeze_notification(struct binder_proc *proc,
3903 				 struct binder_thread *thread,
3904 				 struct binder_handle_cookie *handle_cookie)
3905 {
3906 	struct binder_ref_freeze *freeze;
3907 	struct binder_ref *ref;
3908 
3909 	binder_proc_lock(proc);
3910 	ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3911 	if (!ref) {
3912 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3913 				  proc->pid, thread->pid, handle_cookie->handle);
3914 		binder_proc_unlock(proc);
3915 		return -EINVAL;
3916 	}
3917 
3918 	binder_node_lock(ref->node);
3919 
3920 	if (!ref->freeze) {
3921 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3922 				  proc->pid, thread->pid);
3923 		binder_node_unlock(ref->node);
3924 		binder_proc_unlock(proc);
3925 		return -EINVAL;
3926 	}
3927 	freeze = ref->freeze;
3928 	binder_inner_proc_lock(proc);
3929 	if (freeze->cookie != handle_cookie->cookie) {
3930 		binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3931 				  proc->pid, thread->pid, (u64)freeze->cookie,
3932 				  (u64)handle_cookie->cookie);
3933 		binder_inner_proc_unlock(proc);
3934 		binder_node_unlock(ref->node);
3935 		binder_proc_unlock(proc);
3936 		return -EINVAL;
3937 	}
3938 	ref->freeze = NULL;
3939 	/*
3940 	 * Take the existing freeze object and overwrite its work type. There are three cases here:
3941 	 * 1. No pending notification. In this case just add the work to the queue.
3942 	 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3943 	 *    should resend with the new work type.
3944 	 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3945 	 *    needs to be done here.
3946 	 */
3947 	freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
3948 	if (list_empty(&freeze->work.entry)) {
3949 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3950 		binder_wakeup_proc_ilocked(proc);
3951 	} else if (freeze->sent) {
3952 		freeze->resend = true;
3953 	}
3954 	binder_inner_proc_unlock(proc);
3955 	binder_node_unlock(ref->node);
3956 	binder_proc_unlock(proc);
3957 	return 0;
3958 }
3959 
3960 static int
binder_freeze_notification_done(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t cookie)3961 binder_freeze_notification_done(struct binder_proc *proc,
3962 				struct binder_thread *thread,
3963 				binder_uintptr_t cookie)
3964 {
3965 	struct binder_ref_freeze *freeze = NULL;
3966 	struct binder_work *w;
3967 
3968 	binder_inner_proc_lock(proc);
3969 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
3970 		struct binder_ref_freeze *tmp_freeze =
3971 			container_of(w, struct binder_ref_freeze, work);
3972 
3973 		if (tmp_freeze->cookie == cookie) {
3974 			freeze = tmp_freeze;
3975 			break;
3976 		}
3977 	}
3978 	if (!freeze) {
3979 		binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3980 				  proc->pid, thread->pid, (u64)cookie);
3981 		binder_inner_proc_unlock(proc);
3982 		return -EINVAL;
3983 	}
3984 	binder_dequeue_work_ilocked(&freeze->work);
3985 	freeze->sent = false;
3986 	if (freeze->resend) {
3987 		freeze->resend = false;
3988 		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3989 		binder_wakeup_proc_ilocked(proc);
3990 	}
3991 	binder_inner_proc_unlock(proc);
3992 	return 0;
3993 }
3994 
3995 /**
3996  * binder_free_buf() - free the specified buffer
3997  * @proc:	binder proc that owns buffer
3998  * @buffer:	buffer to be freed
3999  * @is_failure:	failed to send transaction
4000  *
4001  * If buffer for an async transaction, enqueue the next async
4002  * transaction from the node.
4003  *
4004  * Cleanup buffer and free it.
4005  */
4006 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)4007 binder_free_buf(struct binder_proc *proc,
4008 		struct binder_thread *thread,
4009 		struct binder_buffer *buffer, bool is_failure)
4010 {
4011 	binder_inner_proc_lock(proc);
4012 	if (buffer->transaction) {
4013 		buffer->transaction->buffer = NULL;
4014 		buffer->transaction = NULL;
4015 	}
4016 	binder_inner_proc_unlock(proc);
4017 	if (buffer->async_transaction && buffer->target_node) {
4018 		struct binder_node *buf_node;
4019 		struct binder_work *w;
4020 
4021 		buf_node = buffer->target_node;
4022 		binder_node_inner_lock(buf_node);
4023 		BUG_ON(!buf_node->has_async_transaction);
4024 		BUG_ON(buf_node->proc != proc);
4025 		w = binder_dequeue_work_head_ilocked(
4026 				&buf_node->async_todo);
4027 		if (!w) {
4028 			buf_node->has_async_transaction = false;
4029 		} else {
4030 			binder_enqueue_work_ilocked(
4031 					w, &proc->todo);
4032 			binder_wakeup_proc_ilocked(proc);
4033 		}
4034 		binder_node_inner_unlock(buf_node);
4035 	}
4036 	trace_binder_transaction_buffer_release(buffer);
4037 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
4038 	binder_alloc_free_buf(&proc->alloc, buffer);
4039 }
4040 
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)4041 static int binder_thread_write(struct binder_proc *proc,
4042 			struct binder_thread *thread,
4043 			binder_uintptr_t binder_buffer, size_t size,
4044 			binder_size_t *consumed)
4045 {
4046 	uint32_t cmd;
4047 	struct binder_context *context = proc->context;
4048 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4049 	void __user *ptr = buffer + *consumed;
4050 	void __user *end = buffer + size;
4051 
4052 	while (ptr < end && thread->return_error.cmd == BR_OK) {
4053 		int ret;
4054 
4055 		if (get_user(cmd, (uint32_t __user *)ptr))
4056 			return -EFAULT;
4057 		ptr += sizeof(uint32_t);
4058 		trace_binder_command(cmd);
4059 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4060 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4061 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4062 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4063 		}
4064 		switch (cmd) {
4065 		case BC_INCREFS:
4066 		case BC_ACQUIRE:
4067 		case BC_RELEASE:
4068 		case BC_DECREFS: {
4069 			uint32_t target;
4070 			const char *debug_string;
4071 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4072 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4073 			struct binder_ref_data rdata;
4074 
4075 			if (get_user(target, (uint32_t __user *)ptr))
4076 				return -EFAULT;
4077 
4078 			ptr += sizeof(uint32_t);
4079 			ret = -1;
4080 			if (increment && !target) {
4081 				struct binder_node *ctx_mgr_node;
4082 
4083 				mutex_lock(&context->context_mgr_node_lock);
4084 				ctx_mgr_node = context->binder_context_mgr_node;
4085 				if (ctx_mgr_node) {
4086 					if (ctx_mgr_node->proc == proc) {
4087 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4088 								  proc->pid, thread->pid);
4089 						mutex_unlock(&context->context_mgr_node_lock);
4090 						return -EINVAL;
4091 					}
4092 					ret = binder_inc_ref_for_node(
4093 							proc, ctx_mgr_node,
4094 							strong, NULL, &rdata);
4095 				}
4096 				mutex_unlock(&context->context_mgr_node_lock);
4097 			}
4098 			if (ret)
4099 				ret = binder_update_ref_for_handle(
4100 						proc, target, increment, strong,
4101 						&rdata);
4102 			if (!ret && rdata.desc != target) {
4103 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4104 					proc->pid, thread->pid,
4105 					target, rdata.desc);
4106 			}
4107 			switch (cmd) {
4108 			case BC_INCREFS:
4109 				debug_string = "IncRefs";
4110 				break;
4111 			case BC_ACQUIRE:
4112 				debug_string = "Acquire";
4113 				break;
4114 			case BC_RELEASE:
4115 				debug_string = "Release";
4116 				break;
4117 			case BC_DECREFS:
4118 			default:
4119 				debug_string = "DecRefs";
4120 				break;
4121 			}
4122 			if (ret) {
4123 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4124 					proc->pid, thread->pid, debug_string,
4125 					strong, target, ret);
4126 				break;
4127 			}
4128 			binder_debug(BINDER_DEBUG_USER_REFS,
4129 				     "%d:%d %s ref %d desc %d s %d w %d\n",
4130 				     proc->pid, thread->pid, debug_string,
4131 				     rdata.debug_id, rdata.desc, rdata.strong,
4132 				     rdata.weak);
4133 			break;
4134 		}
4135 		case BC_INCREFS_DONE:
4136 		case BC_ACQUIRE_DONE: {
4137 			binder_uintptr_t node_ptr;
4138 			binder_uintptr_t cookie;
4139 			struct binder_node *node;
4140 			bool free_node;
4141 
4142 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4143 				return -EFAULT;
4144 			ptr += sizeof(binder_uintptr_t);
4145 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4146 				return -EFAULT;
4147 			ptr += sizeof(binder_uintptr_t);
4148 			node = binder_get_node(proc, node_ptr);
4149 			if (node == NULL) {
4150 				binder_user_error("%d:%d %s u%016llx no match\n",
4151 					proc->pid, thread->pid,
4152 					cmd == BC_INCREFS_DONE ?
4153 					"BC_INCREFS_DONE" :
4154 					"BC_ACQUIRE_DONE",
4155 					(u64)node_ptr);
4156 				break;
4157 			}
4158 			if (cookie != node->cookie) {
4159 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4160 					proc->pid, thread->pid,
4161 					cmd == BC_INCREFS_DONE ?
4162 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4163 					(u64)node_ptr, node->debug_id,
4164 					(u64)cookie, (u64)node->cookie);
4165 				binder_put_node(node);
4166 				break;
4167 			}
4168 			binder_node_inner_lock(node);
4169 			if (cmd == BC_ACQUIRE_DONE) {
4170 				if (node->pending_strong_ref == 0) {
4171 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4172 						proc->pid, thread->pid,
4173 						node->debug_id);
4174 					binder_node_inner_unlock(node);
4175 					binder_put_node(node);
4176 					break;
4177 				}
4178 				node->pending_strong_ref = 0;
4179 			} else {
4180 				if (node->pending_weak_ref == 0) {
4181 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4182 						proc->pid, thread->pid,
4183 						node->debug_id);
4184 					binder_node_inner_unlock(node);
4185 					binder_put_node(node);
4186 					break;
4187 				}
4188 				node->pending_weak_ref = 0;
4189 			}
4190 			free_node = binder_dec_node_nilocked(node,
4191 					cmd == BC_ACQUIRE_DONE, 0);
4192 			WARN_ON(free_node);
4193 			binder_debug(BINDER_DEBUG_USER_REFS,
4194 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4195 				     proc->pid, thread->pid,
4196 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4197 				     node->debug_id, node->local_strong_refs,
4198 				     node->local_weak_refs, node->tmp_refs);
4199 			binder_node_inner_unlock(node);
4200 			binder_put_node(node);
4201 			break;
4202 		}
4203 		case BC_ATTEMPT_ACQUIRE:
4204 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4205 			return -EINVAL;
4206 		case BC_ACQUIRE_RESULT:
4207 			pr_err("BC_ACQUIRE_RESULT not supported\n");
4208 			return -EINVAL;
4209 
4210 		case BC_FREE_BUFFER: {
4211 			binder_uintptr_t data_ptr;
4212 			struct binder_buffer *buffer;
4213 
4214 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4215 				return -EFAULT;
4216 			ptr += sizeof(binder_uintptr_t);
4217 
4218 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4219 							      data_ptr);
4220 			if (IS_ERR_OR_NULL(buffer)) {
4221 				if (PTR_ERR(buffer) == -EPERM) {
4222 					binder_user_error(
4223 						"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4224 						proc->pid, thread->pid,
4225 						(unsigned long)data_ptr - proc->alloc.vm_start);
4226 				} else {
4227 					binder_user_error(
4228 						"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4229 						proc->pid, thread->pid,
4230 						(unsigned long)data_ptr - proc->alloc.vm_start);
4231 				}
4232 				break;
4233 			}
4234 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4235 				     "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4236 				     proc->pid, thread->pid,
4237 				     (unsigned long)data_ptr - proc->alloc.vm_start,
4238 				     buffer->debug_id,
4239 				     buffer->transaction ? "active" : "finished");
4240 			binder_free_buf(proc, thread, buffer, false);
4241 			break;
4242 		}
4243 
4244 		case BC_TRANSACTION_SG:
4245 		case BC_REPLY_SG: {
4246 			struct binder_transaction_data_sg tr;
4247 
4248 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4249 				return -EFAULT;
4250 			ptr += sizeof(tr);
4251 			binder_transaction(proc, thread, &tr.transaction_data,
4252 					   cmd == BC_REPLY_SG, tr.buffers_size);
4253 			break;
4254 		}
4255 		case BC_TRANSACTION:
4256 		case BC_REPLY: {
4257 			struct binder_transaction_data tr;
4258 
4259 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4260 				return -EFAULT;
4261 			ptr += sizeof(tr);
4262 			binder_transaction(proc, thread, &tr,
4263 					   cmd == BC_REPLY, 0);
4264 			break;
4265 		}
4266 
4267 		case BC_REGISTER_LOOPER:
4268 			binder_debug(BINDER_DEBUG_THREADS,
4269 				     "%d:%d BC_REGISTER_LOOPER\n",
4270 				     proc->pid, thread->pid);
4271 			binder_inner_proc_lock(proc);
4272 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4273 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4274 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4275 					proc->pid, thread->pid);
4276 			} else if (proc->requested_threads == 0) {
4277 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4278 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4279 					proc->pid, thread->pid);
4280 			} else {
4281 				proc->requested_threads--;
4282 				proc->requested_threads_started++;
4283 			}
4284 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4285 			binder_inner_proc_unlock(proc);
4286 			break;
4287 		case BC_ENTER_LOOPER:
4288 			binder_debug(BINDER_DEBUG_THREADS,
4289 				     "%d:%d BC_ENTER_LOOPER\n",
4290 				     proc->pid, thread->pid);
4291 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4292 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4293 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4294 					proc->pid, thread->pid);
4295 			}
4296 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4297 			break;
4298 		case BC_EXIT_LOOPER:
4299 			binder_debug(BINDER_DEBUG_THREADS,
4300 				     "%d:%d BC_EXIT_LOOPER\n",
4301 				     proc->pid, thread->pid);
4302 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4303 			break;
4304 
4305 		case BC_REQUEST_DEATH_NOTIFICATION:
4306 		case BC_CLEAR_DEATH_NOTIFICATION: {
4307 			uint32_t target;
4308 			binder_uintptr_t cookie;
4309 			struct binder_ref *ref;
4310 			struct binder_ref_death *death = NULL;
4311 
4312 			if (get_user(target, (uint32_t __user *)ptr))
4313 				return -EFAULT;
4314 			ptr += sizeof(uint32_t);
4315 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4316 				return -EFAULT;
4317 			ptr += sizeof(binder_uintptr_t);
4318 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4319 				/*
4320 				 * Allocate memory for death notification
4321 				 * before taking lock
4322 				 */
4323 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4324 				if (death == NULL) {
4325 					WARN_ON(thread->return_error.cmd !=
4326 						BR_OK);
4327 					thread->return_error.cmd = BR_ERROR;
4328 					binder_enqueue_thread_work(
4329 						thread,
4330 						&thread->return_error.work);
4331 					binder_debug(
4332 						BINDER_DEBUG_FAILED_TRANSACTION,
4333 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4334 						proc->pid, thread->pid);
4335 					break;
4336 				}
4337 			}
4338 			binder_proc_lock(proc);
4339 			ref = binder_get_ref_olocked(proc, target, false);
4340 			if (ref == NULL) {
4341 				binder_user_error("%d:%d %s invalid ref %d\n",
4342 					proc->pid, thread->pid,
4343 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4344 					"BC_REQUEST_DEATH_NOTIFICATION" :
4345 					"BC_CLEAR_DEATH_NOTIFICATION",
4346 					target);
4347 				binder_proc_unlock(proc);
4348 				kfree(death);
4349 				break;
4350 			}
4351 
4352 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4353 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4354 				     proc->pid, thread->pid,
4355 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4356 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4357 				     "BC_CLEAR_DEATH_NOTIFICATION",
4358 				     (u64)cookie, ref->data.debug_id,
4359 				     ref->data.desc, ref->data.strong,
4360 				     ref->data.weak, ref->node->debug_id);
4361 
4362 			binder_node_lock(ref->node);
4363 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4364 				if (ref->death) {
4365 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4366 						proc->pid, thread->pid);
4367 					binder_node_unlock(ref->node);
4368 					binder_proc_unlock(proc);
4369 					kfree(death);
4370 					break;
4371 				}
4372 				binder_stats_created(BINDER_STAT_DEATH);
4373 				INIT_LIST_HEAD(&death->work.entry);
4374 				death->cookie = cookie;
4375 				ref->death = death;
4376 				if (ref->node->proc == NULL) {
4377 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4378 
4379 					binder_inner_proc_lock(proc);
4380 					binder_enqueue_work_ilocked(
4381 						&ref->death->work, &proc->todo);
4382 					binder_wakeup_proc_ilocked(proc);
4383 					binder_inner_proc_unlock(proc);
4384 				}
4385 			} else {
4386 				if (ref->death == NULL) {
4387 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4388 						proc->pid, thread->pid);
4389 					binder_node_unlock(ref->node);
4390 					binder_proc_unlock(proc);
4391 					break;
4392 				}
4393 				death = ref->death;
4394 				if (death->cookie != cookie) {
4395 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4396 						proc->pid, thread->pid,
4397 						(u64)death->cookie,
4398 						(u64)cookie);
4399 					binder_node_unlock(ref->node);
4400 					binder_proc_unlock(proc);
4401 					break;
4402 				}
4403 				ref->death = NULL;
4404 				binder_inner_proc_lock(proc);
4405 				if (list_empty(&death->work.entry)) {
4406 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4407 					if (thread->looper &
4408 					    (BINDER_LOOPER_STATE_REGISTERED |
4409 					     BINDER_LOOPER_STATE_ENTERED))
4410 						binder_enqueue_thread_work_ilocked(
4411 								thread,
4412 								&death->work);
4413 					else {
4414 						binder_enqueue_work_ilocked(
4415 								&death->work,
4416 								&proc->todo);
4417 						binder_wakeup_proc_ilocked(
4418 								proc);
4419 					}
4420 				} else {
4421 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4422 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4423 				}
4424 				binder_inner_proc_unlock(proc);
4425 			}
4426 			binder_node_unlock(ref->node);
4427 			binder_proc_unlock(proc);
4428 		} break;
4429 		case BC_DEAD_BINDER_DONE: {
4430 			struct binder_work *w;
4431 			binder_uintptr_t cookie;
4432 			struct binder_ref_death *death = NULL;
4433 
4434 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4435 				return -EFAULT;
4436 
4437 			ptr += sizeof(cookie);
4438 			binder_inner_proc_lock(proc);
4439 			list_for_each_entry(w, &proc->delivered_death,
4440 					    entry) {
4441 				struct binder_ref_death *tmp_death =
4442 					container_of(w,
4443 						     struct binder_ref_death,
4444 						     work);
4445 
4446 				if (tmp_death->cookie == cookie) {
4447 					death = tmp_death;
4448 					break;
4449 				}
4450 			}
4451 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4452 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4453 				     proc->pid, thread->pid, (u64)cookie,
4454 				     death);
4455 			if (death == NULL) {
4456 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4457 					proc->pid, thread->pid, (u64)cookie);
4458 				binder_inner_proc_unlock(proc);
4459 				break;
4460 			}
4461 			binder_dequeue_work_ilocked(&death->work);
4462 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4463 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4464 				if (thread->looper &
4465 					(BINDER_LOOPER_STATE_REGISTERED |
4466 					 BINDER_LOOPER_STATE_ENTERED))
4467 					binder_enqueue_thread_work_ilocked(
4468 						thread, &death->work);
4469 				else {
4470 					binder_enqueue_work_ilocked(
4471 							&death->work,
4472 							&proc->todo);
4473 					binder_wakeup_proc_ilocked(proc);
4474 				}
4475 			}
4476 			binder_inner_proc_unlock(proc);
4477 		} break;
4478 
4479 		case BC_REQUEST_FREEZE_NOTIFICATION: {
4480 			struct binder_handle_cookie handle_cookie;
4481 			int error;
4482 
4483 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4484 				return -EFAULT;
4485 			ptr += sizeof(handle_cookie);
4486 			error = binder_request_freeze_notification(proc, thread,
4487 								   &handle_cookie);
4488 			if (error)
4489 				return error;
4490 		} break;
4491 
4492 		case BC_CLEAR_FREEZE_NOTIFICATION: {
4493 			struct binder_handle_cookie handle_cookie;
4494 			int error;
4495 
4496 			if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4497 				return -EFAULT;
4498 			ptr += sizeof(handle_cookie);
4499 			error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4500 			if (error)
4501 				return error;
4502 		} break;
4503 
4504 		case BC_FREEZE_NOTIFICATION_DONE: {
4505 			binder_uintptr_t cookie;
4506 			int error;
4507 
4508 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4509 				return -EFAULT;
4510 
4511 			ptr += sizeof(cookie);
4512 			error = binder_freeze_notification_done(proc, thread, cookie);
4513 			if (error)
4514 				return error;
4515 		} break;
4516 
4517 		default:
4518 			pr_err("%d:%d unknown command %u\n",
4519 			       proc->pid, thread->pid, cmd);
4520 			return -EINVAL;
4521 		}
4522 		*consumed = ptr - buffer;
4523 	}
4524 	return 0;
4525 }
4526 
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4527 static void binder_stat_br(struct binder_proc *proc,
4528 			   struct binder_thread *thread, uint32_t cmd)
4529 {
4530 	trace_binder_return(cmd);
4531 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4532 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4533 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4534 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4535 	}
4536 }
4537 
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4538 static int binder_put_node_cmd(struct binder_proc *proc,
4539 			       struct binder_thread *thread,
4540 			       void __user **ptrp,
4541 			       binder_uintptr_t node_ptr,
4542 			       binder_uintptr_t node_cookie,
4543 			       int node_debug_id,
4544 			       uint32_t cmd, const char *cmd_name)
4545 {
4546 	void __user *ptr = *ptrp;
4547 
4548 	if (put_user(cmd, (uint32_t __user *)ptr))
4549 		return -EFAULT;
4550 	ptr += sizeof(uint32_t);
4551 
4552 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4553 		return -EFAULT;
4554 	ptr += sizeof(binder_uintptr_t);
4555 
4556 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4557 		return -EFAULT;
4558 	ptr += sizeof(binder_uintptr_t);
4559 
4560 	binder_stat_br(proc, thread, cmd);
4561 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4562 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4563 		     (u64)node_ptr, (u64)node_cookie);
4564 
4565 	*ptrp = ptr;
4566 	return 0;
4567 }
4568 
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4569 static int binder_wait_for_work(struct binder_thread *thread,
4570 				bool do_proc_work)
4571 {
4572 	DEFINE_WAIT(wait);
4573 	struct binder_proc *proc = thread->proc;
4574 	int ret = 0;
4575 
4576 	binder_inner_proc_lock(proc);
4577 	for (;;) {
4578 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4579 		if (binder_has_work_ilocked(thread, do_proc_work))
4580 			break;
4581 		if (do_proc_work)
4582 			list_add(&thread->waiting_thread_node,
4583 				 &proc->waiting_threads);
4584 		binder_inner_proc_unlock(proc);
4585 		schedule();
4586 		binder_inner_proc_lock(proc);
4587 		list_del_init(&thread->waiting_thread_node);
4588 		if (signal_pending(current)) {
4589 			ret = -EINTR;
4590 			break;
4591 		}
4592 	}
4593 	finish_wait(&thread->wait, &wait);
4594 	binder_inner_proc_unlock(proc);
4595 
4596 	return ret;
4597 }
4598 
4599 /**
4600  * binder_apply_fd_fixups() - finish fd translation
4601  * @proc:         binder_proc associated @t->buffer
4602  * @t:	binder transaction with list of fd fixups
4603  *
4604  * Now that we are in the context of the transaction target
4605  * process, we can allocate and install fds. Process the
4606  * list of fds to translate and fixup the buffer with the
4607  * new fds first and only then install the files.
4608  *
4609  * If we fail to allocate an fd, skip the install and release
4610  * any fds that have already been allocated.
4611  */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4612 static int binder_apply_fd_fixups(struct binder_proc *proc,
4613 				  struct binder_transaction *t)
4614 {
4615 	struct binder_txn_fd_fixup *fixup, *tmp;
4616 	int ret = 0;
4617 
4618 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4619 		int fd = get_unused_fd_flags(O_CLOEXEC);
4620 
4621 		if (fd < 0) {
4622 			binder_debug(BINDER_DEBUG_TRANSACTION,
4623 				     "failed fd fixup txn %d fd %d\n",
4624 				     t->debug_id, fd);
4625 			ret = -ENOMEM;
4626 			goto err;
4627 		}
4628 		binder_debug(BINDER_DEBUG_TRANSACTION,
4629 			     "fd fixup txn %d fd %d\n",
4630 			     t->debug_id, fd);
4631 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4632 		fixup->target_fd = fd;
4633 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4634 						fixup->offset, &fd,
4635 						sizeof(u32))) {
4636 			ret = -EINVAL;
4637 			goto err;
4638 		}
4639 	}
4640 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4641 		fd_install(fixup->target_fd, fixup->file);
4642 		list_del(&fixup->fixup_entry);
4643 		kfree(fixup);
4644 	}
4645 
4646 	return ret;
4647 
4648 err:
4649 	binder_free_txn_fixups(t);
4650 	return ret;
4651 }
4652 
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4653 static int binder_thread_read(struct binder_proc *proc,
4654 			      struct binder_thread *thread,
4655 			      binder_uintptr_t binder_buffer, size_t size,
4656 			      binder_size_t *consumed, int non_block)
4657 {
4658 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4659 	void __user *ptr = buffer + *consumed;
4660 	void __user *end = buffer + size;
4661 
4662 	int ret = 0;
4663 	int wait_for_proc_work;
4664 
4665 	if (*consumed == 0) {
4666 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4667 			return -EFAULT;
4668 		ptr += sizeof(uint32_t);
4669 	}
4670 
4671 retry:
4672 	binder_inner_proc_lock(proc);
4673 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4674 	binder_inner_proc_unlock(proc);
4675 
4676 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4677 
4678 	trace_binder_wait_for_work(wait_for_proc_work,
4679 				   !!thread->transaction_stack,
4680 				   !binder_worklist_empty(proc, &thread->todo));
4681 	if (wait_for_proc_work) {
4682 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4683 					BINDER_LOOPER_STATE_ENTERED))) {
4684 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4685 				proc->pid, thread->pid, thread->looper);
4686 			wait_event_interruptible(binder_user_error_wait,
4687 						 binder_stop_on_user_error < 2);
4688 		}
4689 		binder_set_nice(proc->default_priority);
4690 	}
4691 
4692 	if (non_block) {
4693 		if (!binder_has_work(thread, wait_for_proc_work))
4694 			ret = -EAGAIN;
4695 	} else {
4696 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4697 	}
4698 
4699 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4700 
4701 	if (ret)
4702 		return ret;
4703 
4704 	while (1) {
4705 		uint32_t cmd;
4706 		struct binder_transaction_data_secctx tr;
4707 		struct binder_transaction_data *trd = &tr.transaction_data;
4708 		struct binder_work *w = NULL;
4709 		struct list_head *list = NULL;
4710 		struct binder_transaction *t = NULL;
4711 		struct binder_thread *t_from;
4712 		size_t trsize = sizeof(*trd);
4713 
4714 		binder_inner_proc_lock(proc);
4715 		if (!binder_worklist_empty_ilocked(&thread->todo))
4716 			list = &thread->todo;
4717 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4718 			   wait_for_proc_work)
4719 			list = &proc->todo;
4720 		else {
4721 			binder_inner_proc_unlock(proc);
4722 
4723 			/* no data added */
4724 			if (ptr - buffer == 4 && !thread->looper_need_return)
4725 				goto retry;
4726 			break;
4727 		}
4728 
4729 		if (end - ptr < sizeof(tr) + 4) {
4730 			binder_inner_proc_unlock(proc);
4731 			break;
4732 		}
4733 		w = binder_dequeue_work_head_ilocked(list);
4734 		if (binder_worklist_empty_ilocked(&thread->todo))
4735 			thread->process_todo = false;
4736 
4737 		switch (w->type) {
4738 		case BINDER_WORK_TRANSACTION: {
4739 			binder_inner_proc_unlock(proc);
4740 			t = container_of(w, struct binder_transaction, work);
4741 		} break;
4742 		case BINDER_WORK_RETURN_ERROR: {
4743 			struct binder_error *e = container_of(
4744 					w, struct binder_error, work);
4745 
4746 			WARN_ON(e->cmd == BR_OK);
4747 			binder_inner_proc_unlock(proc);
4748 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4749 				return -EFAULT;
4750 			cmd = e->cmd;
4751 			e->cmd = BR_OK;
4752 			ptr += sizeof(uint32_t);
4753 
4754 			binder_stat_br(proc, thread, cmd);
4755 		} break;
4756 		case BINDER_WORK_TRANSACTION_COMPLETE:
4757 		case BINDER_WORK_TRANSACTION_PENDING:
4758 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4759 			if (proc->oneway_spam_detection_enabled &&
4760 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4761 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4762 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4763 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4764 			else
4765 				cmd = BR_TRANSACTION_COMPLETE;
4766 			binder_inner_proc_unlock(proc);
4767 			kfree(w);
4768 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4769 			if (put_user(cmd, (uint32_t __user *)ptr))
4770 				return -EFAULT;
4771 			ptr += sizeof(uint32_t);
4772 
4773 			binder_stat_br(proc, thread, cmd);
4774 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4775 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4776 				     proc->pid, thread->pid);
4777 		} break;
4778 		case BINDER_WORK_NODE: {
4779 			struct binder_node *node = container_of(w, struct binder_node, work);
4780 			int strong, weak;
4781 			binder_uintptr_t node_ptr = node->ptr;
4782 			binder_uintptr_t node_cookie = node->cookie;
4783 			int node_debug_id = node->debug_id;
4784 			int has_weak_ref;
4785 			int has_strong_ref;
4786 			void __user *orig_ptr = ptr;
4787 
4788 			BUG_ON(proc != node->proc);
4789 			strong = node->internal_strong_refs ||
4790 					node->local_strong_refs;
4791 			weak = !hlist_empty(&node->refs) ||
4792 					node->local_weak_refs ||
4793 					node->tmp_refs || strong;
4794 			has_strong_ref = node->has_strong_ref;
4795 			has_weak_ref = node->has_weak_ref;
4796 
4797 			if (weak && !has_weak_ref) {
4798 				node->has_weak_ref = 1;
4799 				node->pending_weak_ref = 1;
4800 				node->local_weak_refs++;
4801 			}
4802 			if (strong && !has_strong_ref) {
4803 				node->has_strong_ref = 1;
4804 				node->pending_strong_ref = 1;
4805 				node->local_strong_refs++;
4806 			}
4807 			if (!strong && has_strong_ref)
4808 				node->has_strong_ref = 0;
4809 			if (!weak && has_weak_ref)
4810 				node->has_weak_ref = 0;
4811 			if (!weak && !strong) {
4812 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4813 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4814 					     proc->pid, thread->pid,
4815 					     node_debug_id,
4816 					     (u64)node_ptr,
4817 					     (u64)node_cookie);
4818 				rb_erase(&node->rb_node, &proc->nodes);
4819 				binder_inner_proc_unlock(proc);
4820 				binder_node_lock(node);
4821 				/*
4822 				 * Acquire the node lock before freeing the
4823 				 * node to serialize with other threads that
4824 				 * may have been holding the node lock while
4825 				 * decrementing this node (avoids race where
4826 				 * this thread frees while the other thread
4827 				 * is unlocking the node after the final
4828 				 * decrement)
4829 				 */
4830 				binder_node_unlock(node);
4831 				binder_free_node(node);
4832 			} else
4833 				binder_inner_proc_unlock(proc);
4834 
4835 			if (weak && !has_weak_ref)
4836 				ret = binder_put_node_cmd(
4837 						proc, thread, &ptr, node_ptr,
4838 						node_cookie, node_debug_id,
4839 						BR_INCREFS, "BR_INCREFS");
4840 			if (!ret && strong && !has_strong_ref)
4841 				ret = binder_put_node_cmd(
4842 						proc, thread, &ptr, node_ptr,
4843 						node_cookie, node_debug_id,
4844 						BR_ACQUIRE, "BR_ACQUIRE");
4845 			if (!ret && !strong && has_strong_ref)
4846 				ret = binder_put_node_cmd(
4847 						proc, thread, &ptr, node_ptr,
4848 						node_cookie, node_debug_id,
4849 						BR_RELEASE, "BR_RELEASE");
4850 			if (!ret && !weak && has_weak_ref)
4851 				ret = binder_put_node_cmd(
4852 						proc, thread, &ptr, node_ptr,
4853 						node_cookie, node_debug_id,
4854 						BR_DECREFS, "BR_DECREFS");
4855 			if (orig_ptr == ptr)
4856 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4857 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4858 					     proc->pid, thread->pid,
4859 					     node_debug_id,
4860 					     (u64)node_ptr,
4861 					     (u64)node_cookie);
4862 			if (ret)
4863 				return ret;
4864 		} break;
4865 		case BINDER_WORK_DEAD_BINDER:
4866 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4867 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4868 			struct binder_ref_death *death;
4869 			uint32_t cmd;
4870 			binder_uintptr_t cookie;
4871 
4872 			death = container_of(w, struct binder_ref_death, work);
4873 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4874 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4875 			else
4876 				cmd = BR_DEAD_BINDER;
4877 			cookie = death->cookie;
4878 
4879 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4880 				     "%d:%d %s %016llx\n",
4881 				      proc->pid, thread->pid,
4882 				      cmd == BR_DEAD_BINDER ?
4883 				      "BR_DEAD_BINDER" :
4884 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4885 				      (u64)cookie);
4886 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4887 				binder_inner_proc_unlock(proc);
4888 				kfree(death);
4889 				binder_stats_deleted(BINDER_STAT_DEATH);
4890 			} else {
4891 				binder_enqueue_work_ilocked(
4892 						w, &proc->delivered_death);
4893 				binder_inner_proc_unlock(proc);
4894 			}
4895 			if (put_user(cmd, (uint32_t __user *)ptr))
4896 				return -EFAULT;
4897 			ptr += sizeof(uint32_t);
4898 			if (put_user(cookie,
4899 				     (binder_uintptr_t __user *)ptr))
4900 				return -EFAULT;
4901 			ptr += sizeof(binder_uintptr_t);
4902 			binder_stat_br(proc, thread, cmd);
4903 			if (cmd == BR_DEAD_BINDER)
4904 				goto done; /* DEAD_BINDER notifications can cause transactions */
4905 		} break;
4906 
4907 		case BINDER_WORK_FROZEN_BINDER: {
4908 			struct binder_ref_freeze *freeze;
4909 			struct binder_frozen_state_info info;
4910 
4911 			memset(&info, 0, sizeof(info));
4912 			freeze = container_of(w, struct binder_ref_freeze, work);
4913 			info.is_frozen = freeze->is_frozen;
4914 			info.cookie = freeze->cookie;
4915 			freeze->sent = true;
4916 			binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4917 			binder_inner_proc_unlock(proc);
4918 
4919 			if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4920 				return -EFAULT;
4921 			ptr += sizeof(uint32_t);
4922 			if (copy_to_user(ptr, &info, sizeof(info)))
4923 				return -EFAULT;
4924 			ptr += sizeof(info);
4925 			binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4926 			goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4927 		} break;
4928 
4929 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4930 			struct binder_ref_freeze *freeze =
4931 			    container_of(w, struct binder_ref_freeze, work);
4932 			binder_uintptr_t cookie = freeze->cookie;
4933 
4934 			binder_inner_proc_unlock(proc);
4935 			kfree(freeze);
4936 			binder_stats_deleted(BINDER_STAT_FREEZE);
4937 			if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4938 				return -EFAULT;
4939 			ptr += sizeof(uint32_t);
4940 			if (put_user(cookie, (binder_uintptr_t __user *)ptr))
4941 				return -EFAULT;
4942 			ptr += sizeof(binder_uintptr_t);
4943 			binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
4944 		} break;
4945 
4946 		default:
4947 			binder_inner_proc_unlock(proc);
4948 			pr_err("%d:%d: bad work type %d\n",
4949 			       proc->pid, thread->pid, w->type);
4950 			break;
4951 		}
4952 
4953 		if (!t)
4954 			continue;
4955 
4956 		BUG_ON(t->buffer == NULL);
4957 		if (t->buffer->target_node) {
4958 			struct binder_node *target_node = t->buffer->target_node;
4959 
4960 			trd->target.ptr = target_node->ptr;
4961 			trd->cookie =  target_node->cookie;
4962 			t->saved_priority = task_nice(current);
4963 			if (t->priority < target_node->min_priority &&
4964 			    !(t->flags & TF_ONE_WAY))
4965 				binder_set_nice(t->priority);
4966 			else if (!(t->flags & TF_ONE_WAY) ||
4967 				 t->saved_priority > target_node->min_priority)
4968 				binder_set_nice(target_node->min_priority);
4969 			cmd = BR_TRANSACTION;
4970 		} else {
4971 			trd->target.ptr = 0;
4972 			trd->cookie = 0;
4973 			cmd = BR_REPLY;
4974 		}
4975 		trd->code = t->code;
4976 		trd->flags = t->flags;
4977 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4978 
4979 		t_from = binder_get_txn_from(t);
4980 		if (t_from) {
4981 			struct task_struct *sender = t_from->proc->tsk;
4982 
4983 			trd->sender_pid =
4984 				task_tgid_nr_ns(sender,
4985 						task_active_pid_ns(current));
4986 		} else {
4987 			trd->sender_pid = 0;
4988 		}
4989 
4990 		ret = binder_apply_fd_fixups(proc, t);
4991 		if (ret) {
4992 			struct binder_buffer *buffer = t->buffer;
4993 			bool oneway = !!(t->flags & TF_ONE_WAY);
4994 			int tid = t->debug_id;
4995 
4996 			if (t_from)
4997 				binder_thread_dec_tmpref(t_from);
4998 			buffer->transaction = NULL;
4999 			binder_cleanup_transaction(t, "fd fixups failed",
5000 						   BR_FAILED_REPLY);
5001 			binder_free_buf(proc, thread, buffer, true);
5002 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5003 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5004 				     proc->pid, thread->pid,
5005 				     oneway ? "async " :
5006 					(cmd == BR_REPLY ? "reply " : ""),
5007 				     tid, BR_FAILED_REPLY, ret, __LINE__);
5008 			if (cmd == BR_REPLY) {
5009 				cmd = BR_FAILED_REPLY;
5010 				if (put_user(cmd, (uint32_t __user *)ptr))
5011 					return -EFAULT;
5012 				ptr += sizeof(uint32_t);
5013 				binder_stat_br(proc, thread, cmd);
5014 				break;
5015 			}
5016 			continue;
5017 		}
5018 		trd->data_size = t->buffer->data_size;
5019 		trd->offsets_size = t->buffer->offsets_size;
5020 		trd->data.ptr.buffer = t->buffer->user_data;
5021 		trd->data.ptr.offsets = trd->data.ptr.buffer +
5022 					ALIGN(t->buffer->data_size,
5023 					    sizeof(void *));
5024 
5025 		tr.secctx = t->security_ctx;
5026 		if (t->security_ctx) {
5027 			cmd = BR_TRANSACTION_SEC_CTX;
5028 			trsize = sizeof(tr);
5029 		}
5030 		if (put_user(cmd, (uint32_t __user *)ptr)) {
5031 			if (t_from)
5032 				binder_thread_dec_tmpref(t_from);
5033 
5034 			binder_cleanup_transaction(t, "put_user failed",
5035 						   BR_FAILED_REPLY);
5036 
5037 			return -EFAULT;
5038 		}
5039 		ptr += sizeof(uint32_t);
5040 		if (copy_to_user(ptr, &tr, trsize)) {
5041 			if (t_from)
5042 				binder_thread_dec_tmpref(t_from);
5043 
5044 			binder_cleanup_transaction(t, "copy_to_user failed",
5045 						   BR_FAILED_REPLY);
5046 
5047 			return -EFAULT;
5048 		}
5049 		ptr += trsize;
5050 
5051 		trace_binder_transaction_received(t);
5052 		binder_stat_br(proc, thread, cmd);
5053 		binder_debug(BINDER_DEBUG_TRANSACTION,
5054 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5055 			     proc->pid, thread->pid,
5056 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5057 				(cmd == BR_TRANSACTION_SEC_CTX) ?
5058 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5059 			     t->debug_id, t_from ? t_from->proc->pid : 0,
5060 			     t_from ? t_from->pid : 0, cmd,
5061 			     t->buffer->data_size, t->buffer->offsets_size);
5062 
5063 		if (t_from)
5064 			binder_thread_dec_tmpref(t_from);
5065 		t->buffer->allow_user_free = 1;
5066 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5067 			binder_inner_proc_lock(thread->proc);
5068 			t->to_parent = thread->transaction_stack;
5069 			t->to_thread = thread;
5070 			thread->transaction_stack = t;
5071 			binder_inner_proc_unlock(thread->proc);
5072 		} else {
5073 			binder_free_transaction(t);
5074 		}
5075 		break;
5076 	}
5077 
5078 done:
5079 
5080 	*consumed = ptr - buffer;
5081 	binder_inner_proc_lock(proc);
5082 	if (proc->requested_threads == 0 &&
5083 	    list_empty(&thread->proc->waiting_threads) &&
5084 	    proc->requested_threads_started < proc->max_threads &&
5085 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5086 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5087 	     /*spawn a new thread if we leave this out */) {
5088 		proc->requested_threads++;
5089 		binder_inner_proc_unlock(proc);
5090 		binder_debug(BINDER_DEBUG_THREADS,
5091 			     "%d:%d BR_SPAWN_LOOPER\n",
5092 			     proc->pid, thread->pid);
5093 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5094 			return -EFAULT;
5095 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5096 	} else
5097 		binder_inner_proc_unlock(proc);
5098 	return 0;
5099 }
5100 
binder_release_work(struct binder_proc * proc,struct list_head * list)5101 static void binder_release_work(struct binder_proc *proc,
5102 				struct list_head *list)
5103 {
5104 	struct binder_work *w;
5105 	enum binder_work_type wtype;
5106 
5107 	while (1) {
5108 		binder_inner_proc_lock(proc);
5109 		w = binder_dequeue_work_head_ilocked(list);
5110 		wtype = w ? w->type : 0;
5111 		binder_inner_proc_unlock(proc);
5112 		if (!w)
5113 			return;
5114 
5115 		switch (wtype) {
5116 		case BINDER_WORK_TRANSACTION: {
5117 			struct binder_transaction *t;
5118 
5119 			t = container_of(w, struct binder_transaction, work);
5120 
5121 			binder_cleanup_transaction(t, "process died.",
5122 						   BR_DEAD_REPLY);
5123 		} break;
5124 		case BINDER_WORK_RETURN_ERROR: {
5125 			struct binder_error *e = container_of(
5126 					w, struct binder_error, work);
5127 
5128 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5129 				"undelivered TRANSACTION_ERROR: %u\n",
5130 				e->cmd);
5131 		} break;
5132 		case BINDER_WORK_TRANSACTION_PENDING:
5133 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5134 		case BINDER_WORK_TRANSACTION_COMPLETE: {
5135 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5136 				"undelivered TRANSACTION_COMPLETE\n");
5137 			kfree(w);
5138 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5139 		} break;
5140 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5141 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5142 			struct binder_ref_death *death;
5143 
5144 			death = container_of(w, struct binder_ref_death, work);
5145 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5146 				"undelivered death notification, %016llx\n",
5147 				(u64)death->cookie);
5148 			kfree(death);
5149 			binder_stats_deleted(BINDER_STAT_DEATH);
5150 		} break;
5151 		case BINDER_WORK_NODE:
5152 			break;
5153 		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5154 			struct binder_ref_freeze *freeze;
5155 
5156 			freeze = container_of(w, struct binder_ref_freeze, work);
5157 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5158 				     "undelivered freeze notification, %016llx\n",
5159 				     (u64)freeze->cookie);
5160 			kfree(freeze);
5161 			binder_stats_deleted(BINDER_STAT_FREEZE);
5162 		} break;
5163 		default:
5164 			pr_err("unexpected work type, %d, not freed\n",
5165 			       wtype);
5166 			break;
5167 		}
5168 	}
5169 
5170 }
5171 
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)5172 static struct binder_thread *binder_get_thread_ilocked(
5173 		struct binder_proc *proc, struct binder_thread *new_thread)
5174 {
5175 	struct binder_thread *thread = NULL;
5176 	struct rb_node *parent = NULL;
5177 	struct rb_node **p = &proc->threads.rb_node;
5178 
5179 	while (*p) {
5180 		parent = *p;
5181 		thread = rb_entry(parent, struct binder_thread, rb_node);
5182 
5183 		if (current->pid < thread->pid)
5184 			p = &(*p)->rb_left;
5185 		else if (current->pid > thread->pid)
5186 			p = &(*p)->rb_right;
5187 		else
5188 			return thread;
5189 	}
5190 	if (!new_thread)
5191 		return NULL;
5192 	thread = new_thread;
5193 	binder_stats_created(BINDER_STAT_THREAD);
5194 	thread->proc = proc;
5195 	thread->pid = current->pid;
5196 	atomic_set(&thread->tmp_ref, 0);
5197 	init_waitqueue_head(&thread->wait);
5198 	INIT_LIST_HEAD(&thread->todo);
5199 	rb_link_node(&thread->rb_node, parent, p);
5200 	rb_insert_color(&thread->rb_node, &proc->threads);
5201 	thread->looper_need_return = true;
5202 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5203 	thread->return_error.cmd = BR_OK;
5204 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5205 	thread->reply_error.cmd = BR_OK;
5206 	thread->ee.command = BR_OK;
5207 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5208 	return thread;
5209 }
5210 
binder_get_thread(struct binder_proc * proc)5211 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5212 {
5213 	struct binder_thread *thread;
5214 	struct binder_thread *new_thread;
5215 
5216 	binder_inner_proc_lock(proc);
5217 	thread = binder_get_thread_ilocked(proc, NULL);
5218 	binder_inner_proc_unlock(proc);
5219 	if (!thread) {
5220 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5221 		if (new_thread == NULL)
5222 			return NULL;
5223 		binder_inner_proc_lock(proc);
5224 		thread = binder_get_thread_ilocked(proc, new_thread);
5225 		binder_inner_proc_unlock(proc);
5226 		if (thread != new_thread)
5227 			kfree(new_thread);
5228 	}
5229 	return thread;
5230 }
5231 
binder_free_proc(struct binder_proc * proc)5232 static void binder_free_proc(struct binder_proc *proc)
5233 {
5234 	struct binder_device *device;
5235 
5236 	BUG_ON(!list_empty(&proc->todo));
5237 	BUG_ON(!list_empty(&proc->delivered_death));
5238 	if (proc->outstanding_txns)
5239 		pr_warn("%s: Unexpected outstanding_txns %d\n",
5240 			__func__, proc->outstanding_txns);
5241 	device = container_of(proc->context, struct binder_device, context);
5242 	if (refcount_dec_and_test(&device->ref)) {
5243 		binder_remove_device(device);
5244 		kfree(proc->context->name);
5245 		kfree(device);
5246 	}
5247 	binder_alloc_deferred_release(&proc->alloc);
5248 	put_task_struct(proc->tsk);
5249 	put_cred(proc->cred);
5250 	binder_stats_deleted(BINDER_STAT_PROC);
5251 	dbitmap_free(&proc->dmap);
5252 	kfree(proc);
5253 }
5254 
binder_free_thread(struct binder_thread * thread)5255 static void binder_free_thread(struct binder_thread *thread)
5256 {
5257 	BUG_ON(!list_empty(&thread->todo));
5258 	binder_stats_deleted(BINDER_STAT_THREAD);
5259 	binder_proc_dec_tmpref(thread->proc);
5260 	kfree(thread);
5261 }
5262 
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5263 static int binder_thread_release(struct binder_proc *proc,
5264 				 struct binder_thread *thread)
5265 {
5266 	struct binder_transaction *t;
5267 	struct binder_transaction *send_reply = NULL;
5268 	int active_transactions = 0;
5269 	struct binder_transaction *last_t = NULL;
5270 
5271 	binder_inner_proc_lock(thread->proc);
5272 	/*
5273 	 * take a ref on the proc so it survives
5274 	 * after we remove this thread from proc->threads.
5275 	 * The corresponding dec is when we actually
5276 	 * free the thread in binder_free_thread()
5277 	 */
5278 	proc->tmp_ref++;
5279 	/*
5280 	 * take a ref on this thread to ensure it
5281 	 * survives while we are releasing it
5282 	 */
5283 	atomic_inc(&thread->tmp_ref);
5284 	rb_erase(&thread->rb_node, &proc->threads);
5285 	t = thread->transaction_stack;
5286 	if (t) {
5287 		spin_lock(&t->lock);
5288 		if (t->to_thread == thread)
5289 			send_reply = t;
5290 	} else {
5291 		__acquire(&t->lock);
5292 	}
5293 	thread->is_dead = true;
5294 
5295 	while (t) {
5296 		last_t = t;
5297 		active_transactions++;
5298 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5299 			     "release %d:%d transaction %d %s, still active\n",
5300 			      proc->pid, thread->pid,
5301 			     t->debug_id,
5302 			     (t->to_thread == thread) ? "in" : "out");
5303 
5304 		if (t->to_thread == thread) {
5305 			thread->proc->outstanding_txns--;
5306 			t->to_proc = NULL;
5307 			t->to_thread = NULL;
5308 			if (t->buffer) {
5309 				t->buffer->transaction = NULL;
5310 				t->buffer = NULL;
5311 			}
5312 			t = t->to_parent;
5313 		} else if (t->from == thread) {
5314 			t->from = NULL;
5315 			t = t->from_parent;
5316 		} else
5317 			BUG();
5318 		spin_unlock(&last_t->lock);
5319 		if (t)
5320 			spin_lock(&t->lock);
5321 		else
5322 			__acquire(&t->lock);
5323 	}
5324 	/* annotation for sparse, lock not acquired in last iteration above */
5325 	__release(&t->lock);
5326 
5327 	/*
5328 	 * If this thread used poll, make sure we remove the waitqueue from any
5329 	 * poll data structures holding it.
5330 	 */
5331 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5332 		wake_up_pollfree(&thread->wait);
5333 
5334 	binder_inner_proc_unlock(thread->proc);
5335 
5336 	/*
5337 	 * This is needed to avoid races between wake_up_pollfree() above and
5338 	 * someone else removing the last entry from the queue for other reasons
5339 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5340 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5341 	 * we can be sure they're done after we call synchronize_rcu().
5342 	 */
5343 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5344 		synchronize_rcu();
5345 
5346 	if (send_reply)
5347 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5348 	binder_release_work(proc, &thread->todo);
5349 	binder_thread_dec_tmpref(thread);
5350 	return active_transactions;
5351 }
5352 
binder_poll(struct file * filp,struct poll_table_struct * wait)5353 static __poll_t binder_poll(struct file *filp,
5354 				struct poll_table_struct *wait)
5355 {
5356 	struct binder_proc *proc = filp->private_data;
5357 	struct binder_thread *thread = NULL;
5358 	bool wait_for_proc_work;
5359 
5360 	thread = binder_get_thread(proc);
5361 	if (!thread)
5362 		return EPOLLERR;
5363 
5364 	binder_inner_proc_lock(thread->proc);
5365 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5366 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5367 
5368 	binder_inner_proc_unlock(thread->proc);
5369 
5370 	poll_wait(filp, &thread->wait, wait);
5371 
5372 	if (binder_has_work(thread, wait_for_proc_work))
5373 		return EPOLLIN;
5374 
5375 	return 0;
5376 }
5377 
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5378 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5379 				struct binder_thread *thread)
5380 {
5381 	int ret = 0;
5382 	struct binder_proc *proc = filp->private_data;
5383 	void __user *ubuf = (void __user *)arg;
5384 	struct binder_write_read bwr;
5385 
5386 	if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5387 		return -EFAULT;
5388 
5389 	binder_debug(BINDER_DEBUG_READ_WRITE,
5390 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5391 		     proc->pid, thread->pid,
5392 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5393 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5394 
5395 	if (bwr.write_size > 0) {
5396 		ret = binder_thread_write(proc, thread,
5397 					  bwr.write_buffer,
5398 					  bwr.write_size,
5399 					  &bwr.write_consumed);
5400 		trace_binder_write_done(ret);
5401 		if (ret < 0) {
5402 			bwr.read_consumed = 0;
5403 			goto out;
5404 		}
5405 	}
5406 	if (bwr.read_size > 0) {
5407 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5408 					 bwr.read_size,
5409 					 &bwr.read_consumed,
5410 					 filp->f_flags & O_NONBLOCK);
5411 		trace_binder_read_done(ret);
5412 		binder_inner_proc_lock(proc);
5413 		if (!binder_worklist_empty_ilocked(&proc->todo))
5414 			binder_wakeup_proc_ilocked(proc);
5415 		binder_inner_proc_unlock(proc);
5416 		if (ret < 0)
5417 			goto out;
5418 	}
5419 	binder_debug(BINDER_DEBUG_READ_WRITE,
5420 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5421 		     proc->pid, thread->pid,
5422 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5423 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5424 out:
5425 	if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5426 		ret = -EFAULT;
5427 	return ret;
5428 }
5429 
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5430 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5431 				    struct flat_binder_object *fbo)
5432 {
5433 	int ret = 0;
5434 	struct binder_proc *proc = filp->private_data;
5435 	struct binder_context *context = proc->context;
5436 	struct binder_node *new_node;
5437 	kuid_t curr_euid = current_euid();
5438 
5439 	guard(mutex)(&context->context_mgr_node_lock);
5440 	if (context->binder_context_mgr_node) {
5441 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5442 		return -EBUSY;
5443 	}
5444 	ret = security_binder_set_context_mgr(proc->cred);
5445 	if (ret < 0)
5446 		return ret;
5447 	if (uid_valid(context->binder_context_mgr_uid)) {
5448 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5449 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5450 			       from_kuid(&init_user_ns, curr_euid),
5451 			       from_kuid(&init_user_ns,
5452 					 context->binder_context_mgr_uid));
5453 			return -EPERM;
5454 		}
5455 	} else {
5456 		context->binder_context_mgr_uid = curr_euid;
5457 	}
5458 	new_node = binder_new_node(proc, fbo);
5459 	if (!new_node)
5460 		return -ENOMEM;
5461 	binder_node_lock(new_node);
5462 	new_node->local_weak_refs++;
5463 	new_node->local_strong_refs++;
5464 	new_node->has_strong_ref = 1;
5465 	new_node->has_weak_ref = 1;
5466 	context->binder_context_mgr_node = new_node;
5467 	binder_node_unlock(new_node);
5468 	binder_put_node(new_node);
5469 	return ret;
5470 }
5471 
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5472 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5473 		struct binder_node_info_for_ref *info)
5474 {
5475 	struct binder_node *node;
5476 	struct binder_context *context = proc->context;
5477 	__u32 handle = info->handle;
5478 
5479 	if (info->strong_count || info->weak_count || info->reserved1 ||
5480 	    info->reserved2 || info->reserved3) {
5481 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5482 				  proc->pid);
5483 		return -EINVAL;
5484 	}
5485 
5486 	/* This ioctl may only be used by the context manager */
5487 	mutex_lock(&context->context_mgr_node_lock);
5488 	if (!context->binder_context_mgr_node ||
5489 		context->binder_context_mgr_node->proc != proc) {
5490 		mutex_unlock(&context->context_mgr_node_lock);
5491 		return -EPERM;
5492 	}
5493 	mutex_unlock(&context->context_mgr_node_lock);
5494 
5495 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5496 	if (!node)
5497 		return -EINVAL;
5498 
5499 	info->strong_count = node->local_strong_refs +
5500 		node->internal_strong_refs;
5501 	info->weak_count = node->local_weak_refs;
5502 
5503 	binder_put_node(node);
5504 
5505 	return 0;
5506 }
5507 
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5508 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5509 				struct binder_node_debug_info *info)
5510 {
5511 	struct rb_node *n;
5512 	binder_uintptr_t ptr = info->ptr;
5513 
5514 	memset(info, 0, sizeof(*info));
5515 
5516 	binder_inner_proc_lock(proc);
5517 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5518 		struct binder_node *node = rb_entry(n, struct binder_node,
5519 						    rb_node);
5520 		if (node->ptr > ptr) {
5521 			info->ptr = node->ptr;
5522 			info->cookie = node->cookie;
5523 			info->has_strong_ref = node->has_strong_ref;
5524 			info->has_weak_ref = node->has_weak_ref;
5525 			break;
5526 		}
5527 	}
5528 	binder_inner_proc_unlock(proc);
5529 
5530 	return 0;
5531 }
5532 
binder_txns_pending_ilocked(struct binder_proc * proc)5533 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5534 {
5535 	struct rb_node *n;
5536 	struct binder_thread *thread;
5537 
5538 	if (proc->outstanding_txns > 0)
5539 		return true;
5540 
5541 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5542 		thread = rb_entry(n, struct binder_thread, rb_node);
5543 		if (thread->transaction_stack)
5544 			return true;
5545 	}
5546 	return false;
5547 }
5548 
binder_add_freeze_work(struct binder_proc * proc,bool is_frozen)5549 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5550 {
5551 	struct binder_node *prev = NULL;
5552 	struct rb_node *n;
5553 	struct binder_ref *ref;
5554 
5555 	binder_inner_proc_lock(proc);
5556 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5557 		struct binder_node *node;
5558 
5559 		node = rb_entry(n, struct binder_node, rb_node);
5560 		binder_inc_node_tmpref_ilocked(node);
5561 		binder_inner_proc_unlock(proc);
5562 		if (prev)
5563 			binder_put_node(prev);
5564 		binder_node_lock(node);
5565 		hlist_for_each_entry(ref, &node->refs, node_entry) {
5566 			/*
5567 			 * Need the node lock to synchronize
5568 			 * with new notification requests and the
5569 			 * inner lock to synchronize with queued
5570 			 * freeze notifications.
5571 			 */
5572 			binder_inner_proc_lock(ref->proc);
5573 			if (!ref->freeze) {
5574 				binder_inner_proc_unlock(ref->proc);
5575 				continue;
5576 			}
5577 			ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5578 			if (list_empty(&ref->freeze->work.entry)) {
5579 				ref->freeze->is_frozen = is_frozen;
5580 				binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5581 				binder_wakeup_proc_ilocked(ref->proc);
5582 			} else {
5583 				if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5584 					ref->freeze->resend = true;
5585 				ref->freeze->is_frozen = is_frozen;
5586 			}
5587 			binder_inner_proc_unlock(ref->proc);
5588 		}
5589 		prev = node;
5590 		binder_node_unlock(node);
5591 		binder_inner_proc_lock(proc);
5592 		if (proc->is_dead)
5593 			break;
5594 	}
5595 	binder_inner_proc_unlock(proc);
5596 	if (prev)
5597 		binder_put_node(prev);
5598 }
5599 
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5600 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5601 			       struct binder_proc *target_proc)
5602 {
5603 	int ret = 0;
5604 
5605 	if (!info->enable) {
5606 		binder_inner_proc_lock(target_proc);
5607 		target_proc->sync_recv = false;
5608 		target_proc->async_recv = false;
5609 		target_proc->is_frozen = false;
5610 		binder_inner_proc_unlock(target_proc);
5611 		binder_add_freeze_work(target_proc, false);
5612 		return 0;
5613 	}
5614 
5615 	/*
5616 	 * Freezing the target. Prevent new transactions by
5617 	 * setting frozen state. If timeout specified, wait
5618 	 * for transactions to drain.
5619 	 */
5620 	binder_inner_proc_lock(target_proc);
5621 	target_proc->sync_recv = false;
5622 	target_proc->async_recv = false;
5623 	target_proc->is_frozen = true;
5624 	binder_inner_proc_unlock(target_proc);
5625 
5626 	if (info->timeout_ms > 0)
5627 		ret = wait_event_interruptible_timeout(
5628 			target_proc->freeze_wait,
5629 			(!target_proc->outstanding_txns),
5630 			msecs_to_jiffies(info->timeout_ms));
5631 
5632 	/* Check pending transactions that wait for reply */
5633 	if (ret >= 0) {
5634 		binder_inner_proc_lock(target_proc);
5635 		if (binder_txns_pending_ilocked(target_proc))
5636 			ret = -EAGAIN;
5637 		binder_inner_proc_unlock(target_proc);
5638 	}
5639 
5640 	if (ret < 0) {
5641 		binder_inner_proc_lock(target_proc);
5642 		target_proc->is_frozen = false;
5643 		binder_inner_proc_unlock(target_proc);
5644 	} else {
5645 		binder_add_freeze_work(target_proc, true);
5646 	}
5647 
5648 	return ret;
5649 }
5650 
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5651 static int binder_ioctl_get_freezer_info(
5652 				struct binder_frozen_status_info *info)
5653 {
5654 	struct binder_proc *target_proc;
5655 	bool found = false;
5656 	__u32 txns_pending;
5657 
5658 	info->sync_recv = 0;
5659 	info->async_recv = 0;
5660 
5661 	mutex_lock(&binder_procs_lock);
5662 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5663 		if (target_proc->pid == info->pid) {
5664 			found = true;
5665 			binder_inner_proc_lock(target_proc);
5666 			txns_pending = binder_txns_pending_ilocked(target_proc);
5667 			info->sync_recv |= target_proc->sync_recv |
5668 					(txns_pending << 1);
5669 			info->async_recv |= target_proc->async_recv;
5670 			binder_inner_proc_unlock(target_proc);
5671 		}
5672 	}
5673 	mutex_unlock(&binder_procs_lock);
5674 
5675 	if (!found)
5676 		return -EINVAL;
5677 
5678 	return 0;
5679 }
5680 
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5681 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5682 					   void __user *ubuf)
5683 {
5684 	struct binder_extended_error ee;
5685 
5686 	binder_inner_proc_lock(thread->proc);
5687 	ee = thread->ee;
5688 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5689 	binder_inner_proc_unlock(thread->proc);
5690 
5691 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5692 		return -EFAULT;
5693 
5694 	return 0;
5695 }
5696 
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5697 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5698 {
5699 	int ret;
5700 	struct binder_proc *proc = filp->private_data;
5701 	struct binder_thread *thread;
5702 	void __user *ubuf = (void __user *)arg;
5703 
5704 	trace_binder_ioctl(cmd, arg);
5705 
5706 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5707 	if (ret)
5708 		goto err_unlocked;
5709 
5710 	thread = binder_get_thread(proc);
5711 	if (thread == NULL) {
5712 		ret = -ENOMEM;
5713 		goto err;
5714 	}
5715 
5716 	switch (cmd) {
5717 	case BINDER_WRITE_READ:
5718 		ret = binder_ioctl_write_read(filp, arg, thread);
5719 		if (ret)
5720 			goto err;
5721 		break;
5722 	case BINDER_SET_MAX_THREADS: {
5723 		u32 max_threads;
5724 
5725 		if (copy_from_user(&max_threads, ubuf,
5726 				   sizeof(max_threads))) {
5727 			ret = -EINVAL;
5728 			goto err;
5729 		}
5730 		binder_inner_proc_lock(proc);
5731 		proc->max_threads = max_threads;
5732 		binder_inner_proc_unlock(proc);
5733 		break;
5734 	}
5735 	case BINDER_SET_CONTEXT_MGR_EXT: {
5736 		struct flat_binder_object fbo;
5737 
5738 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5739 			ret = -EINVAL;
5740 			goto err;
5741 		}
5742 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5743 		if (ret)
5744 			goto err;
5745 		break;
5746 	}
5747 	case BINDER_SET_CONTEXT_MGR:
5748 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5749 		if (ret)
5750 			goto err;
5751 		break;
5752 	case BINDER_THREAD_EXIT:
5753 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5754 			     proc->pid, thread->pid);
5755 		binder_thread_release(proc, thread);
5756 		thread = NULL;
5757 		break;
5758 	case BINDER_VERSION: {
5759 		struct binder_version __user *ver = ubuf;
5760 
5761 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5762 			     &ver->protocol_version)) {
5763 			ret = -EINVAL;
5764 			goto err;
5765 		}
5766 		break;
5767 	}
5768 	case BINDER_GET_NODE_INFO_FOR_REF: {
5769 		struct binder_node_info_for_ref info;
5770 
5771 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5772 			ret = -EFAULT;
5773 			goto err;
5774 		}
5775 
5776 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5777 		if (ret < 0)
5778 			goto err;
5779 
5780 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5781 			ret = -EFAULT;
5782 			goto err;
5783 		}
5784 
5785 		break;
5786 	}
5787 	case BINDER_GET_NODE_DEBUG_INFO: {
5788 		struct binder_node_debug_info info;
5789 
5790 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5791 			ret = -EFAULT;
5792 			goto err;
5793 		}
5794 
5795 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5796 		if (ret < 0)
5797 			goto err;
5798 
5799 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5800 			ret = -EFAULT;
5801 			goto err;
5802 		}
5803 		break;
5804 	}
5805 	case BINDER_FREEZE: {
5806 		struct binder_freeze_info info;
5807 		struct binder_proc **target_procs = NULL, *target_proc;
5808 		int target_procs_count = 0, i = 0;
5809 
5810 		ret = 0;
5811 
5812 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5813 			ret = -EFAULT;
5814 			goto err;
5815 		}
5816 
5817 		mutex_lock(&binder_procs_lock);
5818 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5819 			if (target_proc->pid == info.pid)
5820 				target_procs_count++;
5821 		}
5822 
5823 		if (target_procs_count == 0) {
5824 			mutex_unlock(&binder_procs_lock);
5825 			ret = -EINVAL;
5826 			goto err;
5827 		}
5828 
5829 		target_procs = kcalloc(target_procs_count,
5830 				       sizeof(struct binder_proc *),
5831 				       GFP_KERNEL);
5832 
5833 		if (!target_procs) {
5834 			mutex_unlock(&binder_procs_lock);
5835 			ret = -ENOMEM;
5836 			goto err;
5837 		}
5838 
5839 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5840 			if (target_proc->pid != info.pid)
5841 				continue;
5842 
5843 			binder_inner_proc_lock(target_proc);
5844 			target_proc->tmp_ref++;
5845 			binder_inner_proc_unlock(target_proc);
5846 
5847 			target_procs[i++] = target_proc;
5848 		}
5849 		mutex_unlock(&binder_procs_lock);
5850 
5851 		for (i = 0; i < target_procs_count; i++) {
5852 			if (ret >= 0)
5853 				ret = binder_ioctl_freeze(&info,
5854 							  target_procs[i]);
5855 
5856 			binder_proc_dec_tmpref(target_procs[i]);
5857 		}
5858 
5859 		kfree(target_procs);
5860 
5861 		if (ret < 0)
5862 			goto err;
5863 		break;
5864 	}
5865 	case BINDER_GET_FROZEN_INFO: {
5866 		struct binder_frozen_status_info info;
5867 
5868 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5869 			ret = -EFAULT;
5870 			goto err;
5871 		}
5872 
5873 		ret = binder_ioctl_get_freezer_info(&info);
5874 		if (ret < 0)
5875 			goto err;
5876 
5877 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5878 			ret = -EFAULT;
5879 			goto err;
5880 		}
5881 		break;
5882 	}
5883 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5884 		uint32_t enable;
5885 
5886 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5887 			ret = -EFAULT;
5888 			goto err;
5889 		}
5890 		binder_inner_proc_lock(proc);
5891 		proc->oneway_spam_detection_enabled = (bool)enable;
5892 		binder_inner_proc_unlock(proc);
5893 		break;
5894 	}
5895 	case BINDER_GET_EXTENDED_ERROR:
5896 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5897 		if (ret < 0)
5898 			goto err;
5899 		break;
5900 	default:
5901 		ret = -EINVAL;
5902 		goto err;
5903 	}
5904 	ret = 0;
5905 err:
5906 	if (thread)
5907 		thread->looper_need_return = false;
5908 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5909 	if (ret && ret != -EINTR)
5910 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5911 err_unlocked:
5912 	trace_binder_ioctl_done(ret);
5913 	return ret;
5914 }
5915 
binder_vma_open(struct vm_area_struct * vma)5916 static void binder_vma_open(struct vm_area_struct *vma)
5917 {
5918 	struct binder_proc *proc = vma->vm_private_data;
5919 
5920 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5921 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5922 		     proc->pid, vma->vm_start, vma->vm_end,
5923 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5924 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5925 }
5926 
binder_vma_close(struct vm_area_struct * vma)5927 static void binder_vma_close(struct vm_area_struct *vma)
5928 {
5929 	struct binder_proc *proc = vma->vm_private_data;
5930 
5931 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5932 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5933 		     proc->pid, vma->vm_start, vma->vm_end,
5934 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5935 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5936 	binder_alloc_vma_close(&proc->alloc);
5937 }
5938 
binder_vm_fault(struct vm_fault * vmf)5939 VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5940 {
5941 	return VM_FAULT_SIGBUS;
5942 }
5943 EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
5944 
5945 static const struct vm_operations_struct binder_vm_ops = {
5946 	.open = binder_vma_open,
5947 	.close = binder_vma_close,
5948 	.fault = binder_vm_fault,
5949 };
5950 
binder_mmap(struct file * filp,struct vm_area_struct * vma)5951 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5952 {
5953 	struct binder_proc *proc = filp->private_data;
5954 
5955 	if (proc->tsk != current->group_leader)
5956 		return -EINVAL;
5957 
5958 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5959 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5960 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5961 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5962 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5963 
5964 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5965 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5966 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5967 		return -EPERM;
5968 	}
5969 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5970 
5971 	vma->vm_ops = &binder_vm_ops;
5972 	vma->vm_private_data = proc;
5973 
5974 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5975 }
5976 
binder_open(struct inode * nodp,struct file * filp)5977 static int binder_open(struct inode *nodp, struct file *filp)
5978 {
5979 	struct binder_proc *proc, *itr;
5980 	struct binder_device *binder_dev;
5981 	struct binderfs_info *info;
5982 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5983 	bool existing_pid = false;
5984 
5985 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5986 		     current->group_leader->pid, current->pid);
5987 
5988 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5989 	if (proc == NULL)
5990 		return -ENOMEM;
5991 
5992 	dbitmap_init(&proc->dmap);
5993 	spin_lock_init(&proc->inner_lock);
5994 	spin_lock_init(&proc->outer_lock);
5995 	get_task_struct(current->group_leader);
5996 	proc->tsk = current->group_leader;
5997 	proc->cred = get_cred(filp->f_cred);
5998 	INIT_LIST_HEAD(&proc->todo);
5999 	init_waitqueue_head(&proc->freeze_wait);
6000 	proc->default_priority = task_nice(current);
6001 	/* binderfs stashes devices in i_private */
6002 	if (is_binderfs_device(nodp)) {
6003 		binder_dev = nodp->i_private;
6004 		info = nodp->i_sb->s_fs_info;
6005 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
6006 	} else {
6007 		binder_dev = container_of(filp->private_data,
6008 					  struct binder_device, miscdev);
6009 	}
6010 	refcount_inc(&binder_dev->ref);
6011 	proc->context = &binder_dev->context;
6012 	binder_alloc_init(&proc->alloc);
6013 
6014 	binder_stats_created(BINDER_STAT_PROC);
6015 	proc->pid = current->group_leader->pid;
6016 	INIT_LIST_HEAD(&proc->delivered_death);
6017 	INIT_LIST_HEAD(&proc->delivered_freeze);
6018 	INIT_LIST_HEAD(&proc->waiting_threads);
6019 	filp->private_data = proc;
6020 
6021 	mutex_lock(&binder_procs_lock);
6022 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6023 		if (itr->pid == proc->pid) {
6024 			existing_pid = true;
6025 			break;
6026 		}
6027 	}
6028 	hlist_add_head(&proc->proc_node, &binder_procs);
6029 	mutex_unlock(&binder_procs_lock);
6030 
6031 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
6032 		char strbuf[11];
6033 
6034 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6035 		/*
6036 		 * proc debug entries are shared between contexts.
6037 		 * Only create for the first PID to avoid debugfs log spamming
6038 		 * The printing code will anyway print all contexts for a given
6039 		 * PID so this is not a problem.
6040 		 */
6041 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6042 			binder_debugfs_dir_entry_proc,
6043 			(void *)(unsigned long)proc->pid,
6044 			&proc_fops);
6045 	}
6046 
6047 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
6048 		char strbuf[11];
6049 		struct dentry *binderfs_entry;
6050 
6051 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6052 		/*
6053 		 * Similar to debugfs, the process specific log file is shared
6054 		 * between contexts. Only create for the first PID.
6055 		 * This is ok since same as debugfs, the log file will contain
6056 		 * information on all contexts of a given PID.
6057 		 */
6058 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6059 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6060 		if (!IS_ERR(binderfs_entry)) {
6061 			proc->binderfs_entry = binderfs_entry;
6062 		} else {
6063 			int error;
6064 
6065 			error = PTR_ERR(binderfs_entry);
6066 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
6067 				strbuf, error);
6068 		}
6069 	}
6070 
6071 	return 0;
6072 }
6073 
binder_flush(struct file * filp,fl_owner_t id)6074 static int binder_flush(struct file *filp, fl_owner_t id)
6075 {
6076 	struct binder_proc *proc = filp->private_data;
6077 
6078 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6079 
6080 	return 0;
6081 }
6082 
binder_deferred_flush(struct binder_proc * proc)6083 static void binder_deferred_flush(struct binder_proc *proc)
6084 {
6085 	struct rb_node *n;
6086 	int wake_count = 0;
6087 
6088 	binder_inner_proc_lock(proc);
6089 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6090 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6091 
6092 		thread->looper_need_return = true;
6093 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6094 			wake_up_interruptible(&thread->wait);
6095 			wake_count++;
6096 		}
6097 	}
6098 	binder_inner_proc_unlock(proc);
6099 
6100 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6101 		     "binder_flush: %d woke %d threads\n", proc->pid,
6102 		     wake_count);
6103 }
6104 
binder_release(struct inode * nodp,struct file * filp)6105 static int binder_release(struct inode *nodp, struct file *filp)
6106 {
6107 	struct binder_proc *proc = filp->private_data;
6108 
6109 	debugfs_remove(proc->debugfs_entry);
6110 
6111 	if (proc->binderfs_entry) {
6112 		simple_recursive_removal(proc->binderfs_entry, NULL);
6113 		proc->binderfs_entry = NULL;
6114 	}
6115 
6116 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6117 
6118 	return 0;
6119 }
6120 
binder_node_release(struct binder_node * node,int refs)6121 static int binder_node_release(struct binder_node *node, int refs)
6122 {
6123 	struct binder_ref *ref;
6124 	int death = 0;
6125 	struct binder_proc *proc = node->proc;
6126 
6127 	binder_release_work(proc, &node->async_todo);
6128 
6129 	binder_node_lock(node);
6130 	binder_inner_proc_lock(proc);
6131 	binder_dequeue_work_ilocked(&node->work);
6132 	/*
6133 	 * The caller must have taken a temporary ref on the node,
6134 	 */
6135 	BUG_ON(!node->tmp_refs);
6136 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6137 		binder_inner_proc_unlock(proc);
6138 		binder_node_unlock(node);
6139 		binder_free_node(node);
6140 
6141 		return refs;
6142 	}
6143 
6144 	node->proc = NULL;
6145 	node->local_strong_refs = 0;
6146 	node->local_weak_refs = 0;
6147 	binder_inner_proc_unlock(proc);
6148 
6149 	spin_lock(&binder_dead_nodes_lock);
6150 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
6151 	spin_unlock(&binder_dead_nodes_lock);
6152 
6153 	hlist_for_each_entry(ref, &node->refs, node_entry) {
6154 		refs++;
6155 		/*
6156 		 * Need the node lock to synchronize
6157 		 * with new notification requests and the
6158 		 * inner lock to synchronize with queued
6159 		 * death notifications.
6160 		 */
6161 		binder_inner_proc_lock(ref->proc);
6162 		if (!ref->death) {
6163 			binder_inner_proc_unlock(ref->proc);
6164 			continue;
6165 		}
6166 
6167 		death++;
6168 
6169 		BUG_ON(!list_empty(&ref->death->work.entry));
6170 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6171 		binder_enqueue_work_ilocked(&ref->death->work,
6172 					    &ref->proc->todo);
6173 		binder_wakeup_proc_ilocked(ref->proc);
6174 		binder_inner_proc_unlock(ref->proc);
6175 	}
6176 
6177 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
6178 		     "node %d now dead, refs %d, death %d\n",
6179 		     node->debug_id, refs, death);
6180 	binder_node_unlock(node);
6181 	binder_put_node(node);
6182 
6183 	return refs;
6184 }
6185 
binder_deferred_release(struct binder_proc * proc)6186 static void binder_deferred_release(struct binder_proc *proc)
6187 {
6188 	struct binder_context *context = proc->context;
6189 	struct rb_node *n;
6190 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6191 
6192 	mutex_lock(&binder_procs_lock);
6193 	hlist_del(&proc->proc_node);
6194 	mutex_unlock(&binder_procs_lock);
6195 
6196 	mutex_lock(&context->context_mgr_node_lock);
6197 	if (context->binder_context_mgr_node &&
6198 	    context->binder_context_mgr_node->proc == proc) {
6199 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
6200 			     "%s: %d context_mgr_node gone\n",
6201 			     __func__, proc->pid);
6202 		context->binder_context_mgr_node = NULL;
6203 	}
6204 	mutex_unlock(&context->context_mgr_node_lock);
6205 	binder_inner_proc_lock(proc);
6206 	/*
6207 	 * Make sure proc stays alive after we
6208 	 * remove all the threads
6209 	 */
6210 	proc->tmp_ref++;
6211 
6212 	proc->is_dead = true;
6213 	proc->is_frozen = false;
6214 	proc->sync_recv = false;
6215 	proc->async_recv = false;
6216 	threads = 0;
6217 	active_transactions = 0;
6218 	while ((n = rb_first(&proc->threads))) {
6219 		struct binder_thread *thread;
6220 
6221 		thread = rb_entry(n, struct binder_thread, rb_node);
6222 		binder_inner_proc_unlock(proc);
6223 		threads++;
6224 		active_transactions += binder_thread_release(proc, thread);
6225 		binder_inner_proc_lock(proc);
6226 	}
6227 
6228 	nodes = 0;
6229 	incoming_refs = 0;
6230 	while ((n = rb_first(&proc->nodes))) {
6231 		struct binder_node *node;
6232 
6233 		node = rb_entry(n, struct binder_node, rb_node);
6234 		nodes++;
6235 		/*
6236 		 * take a temporary ref on the node before
6237 		 * calling binder_node_release() which will either
6238 		 * kfree() the node or call binder_put_node()
6239 		 */
6240 		binder_inc_node_tmpref_ilocked(node);
6241 		rb_erase(&node->rb_node, &proc->nodes);
6242 		binder_inner_proc_unlock(proc);
6243 		incoming_refs = binder_node_release(node, incoming_refs);
6244 		binder_inner_proc_lock(proc);
6245 	}
6246 	binder_inner_proc_unlock(proc);
6247 
6248 	outgoing_refs = 0;
6249 	binder_proc_lock(proc);
6250 	while ((n = rb_first(&proc->refs_by_desc))) {
6251 		struct binder_ref *ref;
6252 
6253 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
6254 		outgoing_refs++;
6255 		binder_cleanup_ref_olocked(ref);
6256 		binder_proc_unlock(proc);
6257 		binder_free_ref(ref);
6258 		binder_proc_lock(proc);
6259 	}
6260 	binder_proc_unlock(proc);
6261 
6262 	binder_release_work(proc, &proc->todo);
6263 	binder_release_work(proc, &proc->delivered_death);
6264 	binder_release_work(proc, &proc->delivered_freeze);
6265 
6266 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6267 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6268 		     __func__, proc->pid, threads, nodes, incoming_refs,
6269 		     outgoing_refs, active_transactions);
6270 
6271 	binder_proc_dec_tmpref(proc);
6272 }
6273 
binder_deferred_func(struct work_struct * work)6274 static void binder_deferred_func(struct work_struct *work)
6275 {
6276 	struct binder_proc *proc;
6277 
6278 	int defer;
6279 
6280 	do {
6281 		mutex_lock(&binder_deferred_lock);
6282 		if (!hlist_empty(&binder_deferred_list)) {
6283 			proc = hlist_entry(binder_deferred_list.first,
6284 					struct binder_proc, deferred_work_node);
6285 			hlist_del_init(&proc->deferred_work_node);
6286 			defer = proc->deferred_work;
6287 			proc->deferred_work = 0;
6288 		} else {
6289 			proc = NULL;
6290 			defer = 0;
6291 		}
6292 		mutex_unlock(&binder_deferred_lock);
6293 
6294 		if (defer & BINDER_DEFERRED_FLUSH)
6295 			binder_deferred_flush(proc);
6296 
6297 		if (defer & BINDER_DEFERRED_RELEASE)
6298 			binder_deferred_release(proc); /* frees proc */
6299 	} while (proc);
6300 }
6301 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6302 
6303 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6304 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6305 {
6306 	guard(mutex)(&binder_deferred_lock);
6307 	proc->deferred_work |= defer;
6308 	if (hlist_unhashed(&proc->deferred_work_node)) {
6309 		hlist_add_head(&proc->deferred_work_node,
6310 				&binder_deferred_list);
6311 		schedule_work(&binder_deferred_work);
6312 	}
6313 }
6314 
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6315 static void print_binder_transaction_ilocked(struct seq_file *m,
6316 					     struct binder_proc *proc,
6317 					     const char *prefix,
6318 					     struct binder_transaction *t)
6319 {
6320 	struct binder_proc *to_proc;
6321 	struct binder_buffer *buffer = t->buffer;
6322 	ktime_t current_time = ktime_get();
6323 
6324 	spin_lock(&t->lock);
6325 	to_proc = t->to_proc;
6326 	seq_printf(m,
6327 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6328 		   prefix, t->debug_id, t,
6329 		   t->from_pid,
6330 		   t->from_tid,
6331 		   to_proc ? to_proc->pid : 0,
6332 		   t->to_thread ? t->to_thread->pid : 0,
6333 		   t->code, t->flags, t->priority, t->need_reply,
6334 		   ktime_ms_delta(current_time, t->start_time));
6335 	spin_unlock(&t->lock);
6336 
6337 	if (proc != to_proc) {
6338 		/*
6339 		 * Can only safely deref buffer if we are holding the
6340 		 * correct proc inner lock for this node
6341 		 */
6342 		seq_puts(m, "\n");
6343 		return;
6344 	}
6345 
6346 	if (buffer == NULL) {
6347 		seq_puts(m, " buffer free\n");
6348 		return;
6349 	}
6350 	if (buffer->target_node)
6351 		seq_printf(m, " node %d", buffer->target_node->debug_id);
6352 	seq_printf(m, " size %zd:%zd offset %lx\n",
6353 		   buffer->data_size, buffer->offsets_size,
6354 		   buffer->user_data - proc->alloc.vm_start);
6355 }
6356 
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w,bool hash_ptrs)6357 static void print_binder_work_ilocked(struct seq_file *m,
6358 				      struct binder_proc *proc,
6359 				      const char *prefix,
6360 				      const char *transaction_prefix,
6361 				      struct binder_work *w, bool hash_ptrs)
6362 {
6363 	struct binder_node *node;
6364 	struct binder_transaction *t;
6365 
6366 	switch (w->type) {
6367 	case BINDER_WORK_TRANSACTION:
6368 		t = container_of(w, struct binder_transaction, work);
6369 		print_binder_transaction_ilocked(
6370 				m, proc, transaction_prefix, t);
6371 		break;
6372 	case BINDER_WORK_RETURN_ERROR: {
6373 		struct binder_error *e = container_of(
6374 				w, struct binder_error, work);
6375 
6376 		seq_printf(m, "%stransaction error: %u\n",
6377 			   prefix, e->cmd);
6378 	} break;
6379 	case BINDER_WORK_TRANSACTION_COMPLETE:
6380 		seq_printf(m, "%stransaction complete\n", prefix);
6381 		break;
6382 	case BINDER_WORK_NODE:
6383 		node = container_of(w, struct binder_node, work);
6384 		if (hash_ptrs)
6385 			seq_printf(m, "%snode work %d: u%p c%p\n",
6386 				   prefix, node->debug_id,
6387 				   (void *)(long)node->ptr,
6388 				   (void *)(long)node->cookie);
6389 		else
6390 			seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6391 				   prefix, node->debug_id,
6392 				   (u64)node->ptr, (u64)node->cookie);
6393 		break;
6394 	case BINDER_WORK_DEAD_BINDER:
6395 		seq_printf(m, "%shas dead binder\n", prefix);
6396 		break;
6397 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6398 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6399 		break;
6400 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6401 		seq_printf(m, "%shas cleared death notification\n", prefix);
6402 		break;
6403 	case BINDER_WORK_FROZEN_BINDER:
6404 		seq_printf(m, "%shas frozen binder\n", prefix);
6405 		break;
6406 	case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6407 		seq_printf(m, "%shas cleared freeze notification\n", prefix);
6408 		break;
6409 	default:
6410 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6411 		break;
6412 	}
6413 }
6414 
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,bool print_always,bool hash_ptrs)6415 static void print_binder_thread_ilocked(struct seq_file *m,
6416 					struct binder_thread *thread,
6417 					bool print_always, bool hash_ptrs)
6418 {
6419 	struct binder_transaction *t;
6420 	struct binder_work *w;
6421 	size_t start_pos = m->count;
6422 	size_t header_pos;
6423 
6424 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6425 			thread->pid, thread->looper,
6426 			thread->looper_need_return,
6427 			atomic_read(&thread->tmp_ref));
6428 	header_pos = m->count;
6429 	t = thread->transaction_stack;
6430 	while (t) {
6431 		if (t->from == thread) {
6432 			print_binder_transaction_ilocked(m, thread->proc,
6433 					"    outgoing transaction", t);
6434 			t = t->from_parent;
6435 		} else if (t->to_thread == thread) {
6436 			print_binder_transaction_ilocked(m, thread->proc,
6437 						 "    incoming transaction", t);
6438 			t = t->to_parent;
6439 		} else {
6440 			print_binder_transaction_ilocked(m, thread->proc,
6441 					"    bad transaction", t);
6442 			t = NULL;
6443 		}
6444 	}
6445 	list_for_each_entry(w, &thread->todo, entry) {
6446 		print_binder_work_ilocked(m, thread->proc, "    ",
6447 					  "    pending transaction",
6448 					  w, hash_ptrs);
6449 	}
6450 	if (!print_always && m->count == header_pos)
6451 		m->count = start_pos;
6452 }
6453 
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node,bool hash_ptrs)6454 static void print_binder_node_nilocked(struct seq_file *m,
6455 				       struct binder_node *node,
6456 				       bool hash_ptrs)
6457 {
6458 	struct binder_ref *ref;
6459 	struct binder_work *w;
6460 	int count;
6461 
6462 	count = hlist_count_nodes(&node->refs);
6463 
6464 	if (hash_ptrs)
6465 		seq_printf(m, "  node %d: u%p c%p", node->debug_id,
6466 			   (void *)(long)node->ptr, (void *)(long)node->cookie);
6467 	else
6468 		seq_printf(m, "  node %d: u%016llx c%016llx", node->debug_id,
6469 			   (u64)node->ptr, (u64)node->cookie);
6470 	seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6471 		   node->has_strong_ref, node->has_weak_ref,
6472 		   node->local_strong_refs, node->local_weak_refs,
6473 		   node->internal_strong_refs, count, node->tmp_refs);
6474 	if (count) {
6475 		seq_puts(m, " proc");
6476 		hlist_for_each_entry(ref, &node->refs, node_entry)
6477 			seq_printf(m, " %d", ref->proc->pid);
6478 	}
6479 	seq_puts(m, "\n");
6480 	if (node->proc) {
6481 		list_for_each_entry(w, &node->async_todo, entry)
6482 			print_binder_work_ilocked(m, node->proc, "    ",
6483 					  "    pending async transaction",
6484 					  w, hash_ptrs);
6485 	}
6486 }
6487 
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6488 static void print_binder_ref_olocked(struct seq_file *m,
6489 				     struct binder_ref *ref)
6490 {
6491 	binder_node_lock(ref->node);
6492 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6493 		   ref->data.debug_id, ref->data.desc,
6494 		   ref->node->proc ? "" : "dead ",
6495 		   ref->node->debug_id, ref->data.strong,
6496 		   ref->data.weak, ref->death);
6497 	binder_node_unlock(ref->node);
6498 }
6499 
6500 /**
6501  * print_next_binder_node_ilocked() - Print binder_node from a locked list
6502  * @m:          struct seq_file for output via seq_printf()
6503  * @proc:       struct binder_proc we hold the inner_proc_lock to (if any)
6504  * @node:       struct binder_node to print fields of
6505  * @prev_node:	struct binder_node we hold a temporary reference to (if any)
6506  * @hash_ptrs:  whether to hash @node's binder_uintptr_t fields
6507  *
6508  * Helper function to handle synchronization around printing a struct
6509  * binder_node while iterating through @proc->nodes or the dead nodes list.
6510  * Caller must hold either @proc->inner_lock (for live nodes) or
6511  * binder_dead_nodes_lock. This lock will be released during the body of this
6512  * function, but it will be reacquired before returning to the caller.
6513  *
6514  * Return:	pointer to the struct binder_node we hold a tmpref on
6515  */
6516 static struct binder_node *
print_next_binder_node_ilocked(struct seq_file * m,struct binder_proc * proc,struct binder_node * node,struct binder_node * prev_node,bool hash_ptrs)6517 print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6518 			       struct binder_node *node,
6519 			       struct binder_node *prev_node, bool hash_ptrs)
6520 {
6521 	/*
6522 	 * Take a temporary reference on the node so that isn't freed while
6523 	 * we print it.
6524 	 */
6525 	binder_inc_node_tmpref_ilocked(node);
6526 	/*
6527 	 * Live nodes need to drop the inner proc lock and dead nodes need to
6528 	 * drop the binder_dead_nodes_lock before trying to take the node lock.
6529 	 */
6530 	if (proc)
6531 		binder_inner_proc_unlock(proc);
6532 	else
6533 		spin_unlock(&binder_dead_nodes_lock);
6534 	if (prev_node)
6535 		binder_put_node(prev_node);
6536 	binder_node_inner_lock(node);
6537 	print_binder_node_nilocked(m, node, hash_ptrs);
6538 	binder_node_inner_unlock(node);
6539 	if (proc)
6540 		binder_inner_proc_lock(proc);
6541 	else
6542 		spin_lock(&binder_dead_nodes_lock);
6543 	return node;
6544 }
6545 
print_binder_proc(struct seq_file * m,struct binder_proc * proc,bool print_all,bool hash_ptrs)6546 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6547 			      bool print_all, bool hash_ptrs)
6548 {
6549 	struct binder_work *w;
6550 	struct rb_node *n;
6551 	size_t start_pos = m->count;
6552 	size_t header_pos;
6553 	struct binder_node *last_node = NULL;
6554 
6555 	seq_printf(m, "proc %d\n", proc->pid);
6556 	seq_printf(m, "context %s\n", proc->context->name);
6557 	header_pos = m->count;
6558 
6559 	binder_inner_proc_lock(proc);
6560 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6561 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6562 						rb_node), print_all, hash_ptrs);
6563 
6564 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6565 		struct binder_node *node = rb_entry(n, struct binder_node,
6566 						    rb_node);
6567 		if (!print_all && !node->has_async_transaction)
6568 			continue;
6569 
6570 		last_node = print_next_binder_node_ilocked(m, proc, node,
6571 							   last_node,
6572 							   hash_ptrs);
6573 	}
6574 	binder_inner_proc_unlock(proc);
6575 	if (last_node)
6576 		binder_put_node(last_node);
6577 
6578 	if (print_all) {
6579 		binder_proc_lock(proc);
6580 		for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6581 			print_binder_ref_olocked(m, rb_entry(n,
6582 							     struct binder_ref,
6583 							     rb_node_desc));
6584 		binder_proc_unlock(proc);
6585 	}
6586 	binder_alloc_print_allocated(m, &proc->alloc);
6587 	binder_inner_proc_lock(proc);
6588 	list_for_each_entry(w, &proc->todo, entry)
6589 		print_binder_work_ilocked(m, proc, "  ",
6590 					  "  pending transaction", w,
6591 					  hash_ptrs);
6592 	list_for_each_entry(w, &proc->delivered_death, entry) {
6593 		seq_puts(m, "  has delivered dead binder\n");
6594 		break;
6595 	}
6596 	list_for_each_entry(w, &proc->delivered_freeze, entry) {
6597 		seq_puts(m, "  has delivered freeze binder\n");
6598 		break;
6599 	}
6600 	binder_inner_proc_unlock(proc);
6601 	if (!print_all && m->count == header_pos)
6602 		m->count = start_pos;
6603 }
6604 
6605 static const char * const binder_return_strings[] = {
6606 	"BR_ERROR",
6607 	"BR_OK",
6608 	"BR_TRANSACTION",
6609 	"BR_REPLY",
6610 	"BR_ACQUIRE_RESULT",
6611 	"BR_DEAD_REPLY",
6612 	"BR_TRANSACTION_COMPLETE",
6613 	"BR_INCREFS",
6614 	"BR_ACQUIRE",
6615 	"BR_RELEASE",
6616 	"BR_DECREFS",
6617 	"BR_ATTEMPT_ACQUIRE",
6618 	"BR_NOOP",
6619 	"BR_SPAWN_LOOPER",
6620 	"BR_FINISHED",
6621 	"BR_DEAD_BINDER",
6622 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6623 	"BR_FAILED_REPLY",
6624 	"BR_FROZEN_REPLY",
6625 	"BR_ONEWAY_SPAM_SUSPECT",
6626 	"BR_TRANSACTION_PENDING_FROZEN",
6627 	"BR_FROZEN_BINDER",
6628 	"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6629 };
6630 
6631 static const char * const binder_command_strings[] = {
6632 	"BC_TRANSACTION",
6633 	"BC_REPLY",
6634 	"BC_ACQUIRE_RESULT",
6635 	"BC_FREE_BUFFER",
6636 	"BC_INCREFS",
6637 	"BC_ACQUIRE",
6638 	"BC_RELEASE",
6639 	"BC_DECREFS",
6640 	"BC_INCREFS_DONE",
6641 	"BC_ACQUIRE_DONE",
6642 	"BC_ATTEMPT_ACQUIRE",
6643 	"BC_REGISTER_LOOPER",
6644 	"BC_ENTER_LOOPER",
6645 	"BC_EXIT_LOOPER",
6646 	"BC_REQUEST_DEATH_NOTIFICATION",
6647 	"BC_CLEAR_DEATH_NOTIFICATION",
6648 	"BC_DEAD_BINDER_DONE",
6649 	"BC_TRANSACTION_SG",
6650 	"BC_REPLY_SG",
6651 	"BC_REQUEST_FREEZE_NOTIFICATION",
6652 	"BC_CLEAR_FREEZE_NOTIFICATION",
6653 	"BC_FREEZE_NOTIFICATION_DONE",
6654 };
6655 
6656 static const char * const binder_objstat_strings[] = {
6657 	"proc",
6658 	"thread",
6659 	"node",
6660 	"ref",
6661 	"death",
6662 	"transaction",
6663 	"transaction_complete",
6664 	"freeze",
6665 };
6666 
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6667 static void print_binder_stats(struct seq_file *m, const char *prefix,
6668 			       struct binder_stats *stats)
6669 {
6670 	int i;
6671 
6672 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6673 		     ARRAY_SIZE(binder_command_strings));
6674 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6675 		int temp = atomic_read(&stats->bc[i]);
6676 
6677 		if (temp)
6678 			seq_printf(m, "%s%s: %d\n", prefix,
6679 				   binder_command_strings[i], temp);
6680 	}
6681 
6682 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6683 		     ARRAY_SIZE(binder_return_strings));
6684 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6685 		int temp = atomic_read(&stats->br[i]);
6686 
6687 		if (temp)
6688 			seq_printf(m, "%s%s: %d\n", prefix,
6689 				   binder_return_strings[i], temp);
6690 	}
6691 
6692 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6693 		     ARRAY_SIZE(binder_objstat_strings));
6694 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6695 		     ARRAY_SIZE(stats->obj_deleted));
6696 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6697 		int created = atomic_read(&stats->obj_created[i]);
6698 		int deleted = atomic_read(&stats->obj_deleted[i]);
6699 
6700 		if (created || deleted)
6701 			seq_printf(m, "%s%s: active %d total %d\n",
6702 				prefix,
6703 				binder_objstat_strings[i],
6704 				created - deleted,
6705 				created);
6706 	}
6707 }
6708 
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6709 static void print_binder_proc_stats(struct seq_file *m,
6710 				    struct binder_proc *proc)
6711 {
6712 	struct binder_work *w;
6713 	struct binder_thread *thread;
6714 	struct rb_node *n;
6715 	int count, strong, weak, ready_threads;
6716 	size_t free_async_space =
6717 		binder_alloc_get_free_async_space(&proc->alloc);
6718 
6719 	seq_printf(m, "proc %d\n", proc->pid);
6720 	seq_printf(m, "context %s\n", proc->context->name);
6721 	count = 0;
6722 	ready_threads = 0;
6723 	binder_inner_proc_lock(proc);
6724 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
6725 		count++;
6726 
6727 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6728 		ready_threads++;
6729 
6730 	seq_printf(m, "  threads: %d\n", count);
6731 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6732 			"  ready threads %d\n"
6733 			"  free async space %zd\n", proc->requested_threads,
6734 			proc->requested_threads_started, proc->max_threads,
6735 			ready_threads,
6736 			free_async_space);
6737 	count = 0;
6738 	for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6739 		count++;
6740 	binder_inner_proc_unlock(proc);
6741 	seq_printf(m, "  nodes: %d\n", count);
6742 	count = 0;
6743 	strong = 0;
6744 	weak = 0;
6745 	binder_proc_lock(proc);
6746 	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6747 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6748 						  rb_node_desc);
6749 		count++;
6750 		strong += ref->data.strong;
6751 		weak += ref->data.weak;
6752 	}
6753 	binder_proc_unlock(proc);
6754 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6755 
6756 	count = binder_alloc_get_allocated_count(&proc->alloc);
6757 	seq_printf(m, "  buffers: %d\n", count);
6758 
6759 	binder_alloc_print_pages(m, &proc->alloc);
6760 
6761 	count = 0;
6762 	binder_inner_proc_lock(proc);
6763 	list_for_each_entry(w, &proc->todo, entry) {
6764 		if (w->type == BINDER_WORK_TRANSACTION)
6765 			count++;
6766 	}
6767 	binder_inner_proc_unlock(proc);
6768 	seq_printf(m, "  pending transactions: %d\n", count);
6769 
6770 	print_binder_stats(m, "  ", &proc->stats);
6771 }
6772 
print_binder_state(struct seq_file * m,bool hash_ptrs)6773 static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6774 {
6775 	struct binder_proc *proc;
6776 	struct binder_node *node;
6777 	struct binder_node *last_node = NULL;
6778 
6779 	seq_puts(m, "binder state:\n");
6780 
6781 	spin_lock(&binder_dead_nodes_lock);
6782 	if (!hlist_empty(&binder_dead_nodes))
6783 		seq_puts(m, "dead nodes:\n");
6784 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6785 		last_node = print_next_binder_node_ilocked(m, NULL, node,
6786 							   last_node,
6787 							   hash_ptrs);
6788 	spin_unlock(&binder_dead_nodes_lock);
6789 	if (last_node)
6790 		binder_put_node(last_node);
6791 
6792 	mutex_lock(&binder_procs_lock);
6793 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6794 		print_binder_proc(m, proc, true, hash_ptrs);
6795 	mutex_unlock(&binder_procs_lock);
6796 }
6797 
print_binder_transactions(struct seq_file * m,bool hash_ptrs)6798 static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6799 {
6800 	struct binder_proc *proc;
6801 
6802 	seq_puts(m, "binder transactions:\n");
6803 	mutex_lock(&binder_procs_lock);
6804 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6805 		print_binder_proc(m, proc, false, hash_ptrs);
6806 	mutex_unlock(&binder_procs_lock);
6807 }
6808 
state_show(struct seq_file * m,void * unused)6809 static int state_show(struct seq_file *m, void *unused)
6810 {
6811 	print_binder_state(m, false);
6812 	return 0;
6813 }
6814 
state_hashed_show(struct seq_file * m,void * unused)6815 static int state_hashed_show(struct seq_file *m, void *unused)
6816 {
6817 	print_binder_state(m, true);
6818 	return 0;
6819 }
6820 
stats_show(struct seq_file * m,void * unused)6821 static int stats_show(struct seq_file *m, void *unused)
6822 {
6823 	struct binder_proc *proc;
6824 
6825 	seq_puts(m, "binder stats:\n");
6826 
6827 	print_binder_stats(m, "", &binder_stats);
6828 
6829 	mutex_lock(&binder_procs_lock);
6830 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6831 		print_binder_proc_stats(m, proc);
6832 	mutex_unlock(&binder_procs_lock);
6833 
6834 	return 0;
6835 }
6836 
transactions_show(struct seq_file * m,void * unused)6837 static int transactions_show(struct seq_file *m, void *unused)
6838 {
6839 	print_binder_transactions(m, false);
6840 	return 0;
6841 }
6842 
transactions_hashed_show(struct seq_file * m,void * unused)6843 static int transactions_hashed_show(struct seq_file *m, void *unused)
6844 {
6845 	print_binder_transactions(m, true);
6846 	return 0;
6847 }
6848 
proc_show(struct seq_file * m,void * unused)6849 static int proc_show(struct seq_file *m, void *unused)
6850 {
6851 	struct binder_proc *itr;
6852 	int pid = (unsigned long)m->private;
6853 
6854 	guard(mutex)(&binder_procs_lock);
6855 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6856 		if (itr->pid == pid) {
6857 			seq_puts(m, "binder proc state:\n");
6858 			print_binder_proc(m, itr, true, false);
6859 		}
6860 	}
6861 
6862 	return 0;
6863 }
6864 
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6865 static void print_binder_transaction_log_entry(struct seq_file *m,
6866 					struct binder_transaction_log_entry *e)
6867 {
6868 	int debug_id = READ_ONCE(e->debug_id_done);
6869 	/*
6870 	 * read barrier to guarantee debug_id_done read before
6871 	 * we print the log values
6872 	 */
6873 	smp_rmb();
6874 	seq_printf(m,
6875 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6876 		   e->debug_id, (e->call_type == 2) ? "reply" :
6877 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6878 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6879 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6880 		   e->return_error, e->return_error_param,
6881 		   e->return_error_line);
6882 	/*
6883 	 * read-barrier to guarantee read of debug_id_done after
6884 	 * done printing the fields of the entry
6885 	 */
6886 	smp_rmb();
6887 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6888 			"\n" : " (incomplete)\n");
6889 }
6890 
transaction_log_show(struct seq_file * m,void * unused)6891 static int transaction_log_show(struct seq_file *m, void *unused)
6892 {
6893 	struct binder_transaction_log *log = m->private;
6894 	unsigned int log_cur = atomic_read(&log->cur);
6895 	unsigned int count;
6896 	unsigned int cur;
6897 	int i;
6898 
6899 	count = log_cur + 1;
6900 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6901 		0 : count % ARRAY_SIZE(log->entry);
6902 	if (count > ARRAY_SIZE(log->entry) || log->full)
6903 		count = ARRAY_SIZE(log->entry);
6904 	for (i = 0; i < count; i++) {
6905 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6906 
6907 		print_binder_transaction_log_entry(m, &log->entry[index]);
6908 	}
6909 	return 0;
6910 }
6911 
6912 const struct file_operations binder_fops = {
6913 	.owner = THIS_MODULE,
6914 	.poll = binder_poll,
6915 	.unlocked_ioctl = binder_ioctl,
6916 	.compat_ioctl = compat_ptr_ioctl,
6917 	.mmap = binder_mmap,
6918 	.open = binder_open,
6919 	.flush = binder_flush,
6920 	.release = binder_release,
6921 };
6922 
6923 DEFINE_SHOW_ATTRIBUTE(state);
6924 DEFINE_SHOW_ATTRIBUTE(state_hashed);
6925 DEFINE_SHOW_ATTRIBUTE(stats);
6926 DEFINE_SHOW_ATTRIBUTE(transactions);
6927 DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6928 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6929 
6930 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6931 	{
6932 		.name = "state",
6933 		.mode = 0444,
6934 		.fops = &state_fops,
6935 		.data = NULL,
6936 	},
6937 	{
6938 		.name = "state_hashed",
6939 		.mode = 0444,
6940 		.fops = &state_hashed_fops,
6941 		.data = NULL,
6942 	},
6943 	{
6944 		.name = "stats",
6945 		.mode = 0444,
6946 		.fops = &stats_fops,
6947 		.data = NULL,
6948 	},
6949 	{
6950 		.name = "transactions",
6951 		.mode = 0444,
6952 		.fops = &transactions_fops,
6953 		.data = NULL,
6954 	},
6955 	{
6956 		.name = "transactions_hashed",
6957 		.mode = 0444,
6958 		.fops = &transactions_hashed_fops,
6959 		.data = NULL,
6960 	},
6961 	{
6962 		.name = "transaction_log",
6963 		.mode = 0444,
6964 		.fops = &transaction_log_fops,
6965 		.data = &binder_transaction_log,
6966 	},
6967 	{
6968 		.name = "failed_transaction_log",
6969 		.mode = 0444,
6970 		.fops = &transaction_log_fops,
6971 		.data = &binder_transaction_log_failed,
6972 	},
6973 	{} /* terminator */
6974 };
6975 
binder_add_device(struct binder_device * device)6976 void binder_add_device(struct binder_device *device)
6977 {
6978 	guard(spinlock)(&binder_devices_lock);
6979 	hlist_add_head(&device->hlist, &binder_devices);
6980 }
6981 
binder_remove_device(struct binder_device * device)6982 void binder_remove_device(struct binder_device *device)
6983 {
6984 	guard(spinlock)(&binder_devices_lock);
6985 	hlist_del_init(&device->hlist);
6986 }
6987 
init_binder_device(const char * name)6988 static int __init init_binder_device(const char *name)
6989 {
6990 	int ret;
6991 	struct binder_device *binder_device;
6992 
6993 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6994 	if (!binder_device)
6995 		return -ENOMEM;
6996 
6997 	binder_device->miscdev.fops = &binder_fops;
6998 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6999 	binder_device->miscdev.name = name;
7000 
7001 	refcount_set(&binder_device->ref, 1);
7002 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
7003 	binder_device->context.name = name;
7004 	mutex_init(&binder_device->context.context_mgr_node_lock);
7005 
7006 	ret = misc_register(&binder_device->miscdev);
7007 	if (ret < 0) {
7008 		kfree(binder_device);
7009 		return ret;
7010 	}
7011 
7012 	binder_add_device(binder_device);
7013 
7014 	return ret;
7015 }
7016 
binder_init(void)7017 static int __init binder_init(void)
7018 {
7019 	int ret;
7020 	char *device_name, *device_tmp;
7021 	struct binder_device *device;
7022 	struct hlist_node *tmp;
7023 	char *device_names = NULL;
7024 	const struct binder_debugfs_entry *db_entry;
7025 
7026 	ret = binder_alloc_shrinker_init();
7027 	if (ret)
7028 		return ret;
7029 
7030 	atomic_set(&binder_transaction_log.cur, ~0U);
7031 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
7032 
7033 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7034 
7035 	binder_for_each_debugfs_entry(db_entry)
7036 		debugfs_create_file(db_entry->name,
7037 					db_entry->mode,
7038 					binder_debugfs_dir_entry_root,
7039 					db_entry->data,
7040 					db_entry->fops);
7041 
7042 	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7043 						binder_debugfs_dir_entry_root);
7044 
7045 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7046 	    strcmp(binder_devices_param, "") != 0) {
7047 		/*
7048 		* Copy the module_parameter string, because we don't want to
7049 		* tokenize it in-place.
7050 		 */
7051 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7052 		if (!device_names) {
7053 			ret = -ENOMEM;
7054 			goto err_alloc_device_names_failed;
7055 		}
7056 
7057 		device_tmp = device_names;
7058 		while ((device_name = strsep(&device_tmp, ","))) {
7059 			ret = init_binder_device(device_name);
7060 			if (ret)
7061 				goto err_init_binder_device_failed;
7062 		}
7063 	}
7064 
7065 	ret = init_binderfs();
7066 	if (ret)
7067 		goto err_init_binder_device_failed;
7068 
7069 	return ret;
7070 
7071 err_init_binder_device_failed:
7072 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7073 		misc_deregister(&device->miscdev);
7074 		binder_remove_device(device);
7075 		kfree(device);
7076 	}
7077 
7078 	kfree(device_names);
7079 
7080 err_alloc_device_names_failed:
7081 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7082 	binder_alloc_shrinker_exit();
7083 
7084 	return ret;
7085 }
7086 
7087 device_initcall(binder_init);
7088 
7089 #define CREATE_TRACE_POINTS
7090 #include "binder_trace.h"
7091 
7092 MODULE_LICENSE("GPL v2");
7093