1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70
71 #include <kunit/visibility.h>
72
73 #include <uapi/linux/android/binder.h>
74
75 #include <linux/cacheflush.h>
76
77 #include "binder_netlink.h"
78 #include "binder_internal.h"
79 #include "binder_trace.h"
80
81 static HLIST_HEAD(binder_deferred_list);
82 static DEFINE_MUTEX(binder_deferred_lock);
83
84 static HLIST_HEAD(binder_devices);
85 static DEFINE_SPINLOCK(binder_devices_lock);
86
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 static int proc_show(struct seq_file *m, void *unused);
98 DEFINE_SHOW_ATTRIBUTE(proc);
99
100 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102 enum {
103 BINDER_DEBUG_USER_ERROR = 1U << 0,
104 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109 BINDER_DEBUG_READ_WRITE = 1U << 6,
110 BINDER_DEBUG_USER_REFS = 1U << 7,
111 BINDER_DEBUG_THREADS = 1U << 8,
112 BINDER_DEBUG_TRANSACTION = 1U << 9,
113 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117 BINDER_DEBUG_SPINLOCKS = 1U << 14,
118 };
119 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124 module_param_named(devices, binder_devices_param, charp, 0444);
125
126 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127 static int binder_stop_on_user_error;
128
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)129 static int binder_set_stop_on_user_error(const char *val,
130 const struct kernel_param *kp)
131 {
132 int ret;
133
134 ret = param_set_int(val, kp);
135 if (binder_stop_on_user_error < 2)
136 wake_up(&binder_user_error_wait);
137 return ret;
138 }
139 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 param_get_int, &binder_stop_on_user_error, 0644);
141
binder_debug(int mask,const char * format,...)142 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
143 {
144 struct va_format vaf;
145 va_list args;
146
147 if (binder_debug_mask & mask) {
148 va_start(args, format);
149 vaf.va = &args;
150 vaf.fmt = format;
151 pr_info_ratelimited("%pV", &vaf);
152 va_end(args);
153 }
154 }
155
156 #define binder_txn_error(x...) \
157 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
158
binder_user_error(const char * format,...)159 static __printf(1, 2) void binder_user_error(const char *format, ...)
160 {
161 struct va_format vaf;
162 va_list args;
163
164 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
165 va_start(args, format);
166 vaf.va = &args;
167 vaf.fmt = format;
168 pr_info_ratelimited("%pV", &vaf);
169 va_end(args);
170 }
171
172 if (binder_stop_on_user_error)
173 binder_stop_on_user_error = 2;
174 }
175
176 #define binder_set_extended_error(ee, _id, _command, _param) \
177 do { \
178 (ee)->id = _id; \
179 (ee)->command = _command; \
180 (ee)->param = _param; \
181 } while (0)
182
183 #define to_flat_binder_object(hdr) \
184 container_of(hdr, struct flat_binder_object, hdr)
185
186 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187
188 #define to_binder_buffer_object(hdr) \
189 container_of(hdr, struct binder_buffer_object, hdr)
190
191 #define to_binder_fd_array_object(hdr) \
192 container_of(hdr, struct binder_fd_array_object, hdr)
193
194 static struct binder_stats binder_stats;
195
binder_stats_deleted(enum binder_stat_types type)196 static inline void binder_stats_deleted(enum binder_stat_types type)
197 {
198 atomic_inc(&binder_stats.obj_deleted[type]);
199 }
200
binder_stats_created(enum binder_stat_types type)201 static inline void binder_stats_created(enum binder_stat_types type)
202 {
203 atomic_inc(&binder_stats.obj_created[type]);
204 }
205
206 struct binder_transaction_log_entry {
207 int debug_id;
208 int debug_id_done;
209 int call_type;
210 int from_proc;
211 int from_thread;
212 int target_handle;
213 int to_proc;
214 int to_thread;
215 int to_node;
216 int data_size;
217 int offsets_size;
218 int return_error_line;
219 uint32_t return_error;
220 uint32_t return_error_param;
221 char context_name[BINDERFS_MAX_NAME + 1];
222 };
223
224 struct binder_transaction_log {
225 atomic_t cur;
226 bool full;
227 struct binder_transaction_log_entry entry[32];
228 };
229
230 static struct binder_transaction_log binder_transaction_log;
231 static struct binder_transaction_log binder_transaction_log_failed;
232
binder_transaction_log_add(struct binder_transaction_log * log)233 static struct binder_transaction_log_entry *binder_transaction_log_add(
234 struct binder_transaction_log *log)
235 {
236 struct binder_transaction_log_entry *e;
237 unsigned int cur = atomic_inc_return(&log->cur);
238
239 if (cur >= ARRAY_SIZE(log->entry))
240 log->full = true;
241 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242 WRITE_ONCE(e->debug_id_done, 0);
243 /*
244 * write-barrier to synchronize access to e->debug_id_done.
245 * We make sure the initialized 0 value is seen before
246 * memset() other fields are zeroed by memset.
247 */
248 smp_wmb();
249 memset(e, 0, sizeof(*e));
250 return e;
251 }
252
253 enum binder_deferred_state {
254 BINDER_DEFERRED_FLUSH = 0x01,
255 BINDER_DEFERRED_RELEASE = 0x02,
256 };
257
258 enum {
259 BINDER_LOOPER_STATE_REGISTERED = 0x01,
260 BINDER_LOOPER_STATE_ENTERED = 0x02,
261 BINDER_LOOPER_STATE_EXITED = 0x04,
262 BINDER_LOOPER_STATE_INVALID = 0x08,
263 BINDER_LOOPER_STATE_WAITING = 0x10,
264 BINDER_LOOPER_STATE_POLL = 0x20,
265 };
266
267 /**
268 * binder_proc_lock() - Acquire outer lock for given binder_proc
269 * @proc: struct binder_proc to acquire
270 *
271 * Acquires proc->outer_lock. Used to protect binder_ref
272 * structures associated with the given proc.
273 */
274 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
275 static void
_binder_proc_lock(struct binder_proc * proc,int line)276 _binder_proc_lock(struct binder_proc *proc, int line)
277 __acquires(&proc->outer_lock)
278 {
279 binder_debug(BINDER_DEBUG_SPINLOCKS,
280 "%s: line=%d\n", __func__, line);
281 spin_lock(&proc->outer_lock);
282 }
283
284 /**
285 * binder_proc_unlock() - Release outer lock for given binder_proc
286 * @proc: struct binder_proc to acquire
287 *
288 * Release lock acquired via binder_proc_lock()
289 */
290 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
291 static void
_binder_proc_unlock(struct binder_proc * proc,int line)292 _binder_proc_unlock(struct binder_proc *proc, int line)
293 __releases(&proc->outer_lock)
294 {
295 binder_debug(BINDER_DEBUG_SPINLOCKS,
296 "%s: line=%d\n", __func__, line);
297 spin_unlock(&proc->outer_lock);
298 }
299
300 /**
301 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
302 * @proc: struct binder_proc to acquire
303 *
304 * Acquires proc->inner_lock. Used to protect todo lists
305 */
306 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
307 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)308 _binder_inner_proc_lock(struct binder_proc *proc, int line)
309 __acquires(&proc->inner_lock)
310 {
311 binder_debug(BINDER_DEBUG_SPINLOCKS,
312 "%s: line=%d\n", __func__, line);
313 spin_lock(&proc->inner_lock);
314 }
315
316 /**
317 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
318 * @proc: struct binder_proc to acquire
319 *
320 * Release lock acquired via binder_inner_proc_lock()
321 */
322 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
323 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)324 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
325 __releases(&proc->inner_lock)
326 {
327 binder_debug(BINDER_DEBUG_SPINLOCKS,
328 "%s: line=%d\n", __func__, line);
329 spin_unlock(&proc->inner_lock);
330 }
331
332 /**
333 * binder_node_lock() - Acquire spinlock for given binder_node
334 * @node: struct binder_node to acquire
335 *
336 * Acquires node->lock. Used to protect binder_node fields
337 */
338 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
339 static void
_binder_node_lock(struct binder_node * node,int line)340 _binder_node_lock(struct binder_node *node, int line)
341 __acquires(&node->lock)
342 {
343 binder_debug(BINDER_DEBUG_SPINLOCKS,
344 "%s: line=%d\n", __func__, line);
345 spin_lock(&node->lock);
346 }
347
348 /**
349 * binder_node_unlock() - Release spinlock for given binder_proc
350 * @node: struct binder_node to acquire
351 *
352 * Release lock acquired via binder_node_lock()
353 */
354 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
355 static void
_binder_node_unlock(struct binder_node * node,int line)356 _binder_node_unlock(struct binder_node *node, int line)
357 __releases(&node->lock)
358 {
359 binder_debug(BINDER_DEBUG_SPINLOCKS,
360 "%s: line=%d\n", __func__, line);
361 spin_unlock(&node->lock);
362 }
363
364 /**
365 * binder_node_inner_lock() - Acquire node and inner locks
366 * @node: struct binder_node to acquire
367 *
368 * Acquires node->lock. If node->proc also acquires
369 * proc->inner_lock. Used to protect binder_node fields
370 */
371 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
372 static void
_binder_node_inner_lock(struct binder_node * node,int line)373 _binder_node_inner_lock(struct binder_node *node, int line)
374 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
375 {
376 binder_debug(BINDER_DEBUG_SPINLOCKS,
377 "%s: line=%d\n", __func__, line);
378 spin_lock(&node->lock);
379 if (node->proc)
380 binder_inner_proc_lock(node->proc);
381 else
382 /* annotation for sparse */
383 __acquire(&node->proc->inner_lock);
384 }
385
386 /**
387 * binder_node_inner_unlock() - Release node and inner locks
388 * @node: struct binder_node to acquire
389 *
390 * Release lock acquired via binder_node_lock()
391 */
392 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
393 static void
_binder_node_inner_unlock(struct binder_node * node,int line)394 _binder_node_inner_unlock(struct binder_node *node, int line)
395 __releases(&node->lock) __releases(&node->proc->inner_lock)
396 {
397 struct binder_proc *proc = node->proc;
398
399 binder_debug(BINDER_DEBUG_SPINLOCKS,
400 "%s: line=%d\n", __func__, line);
401 if (proc)
402 binder_inner_proc_unlock(proc);
403 else
404 /* annotation for sparse */
405 __release(&node->proc->inner_lock);
406 spin_unlock(&node->lock);
407 }
408
binder_worklist_empty_ilocked(struct list_head * list)409 static bool binder_worklist_empty_ilocked(struct list_head *list)
410 {
411 return list_empty(list);
412 }
413
414 /**
415 * binder_worklist_empty() - Check if no items on the work list
416 * @proc: binder_proc associated with list
417 * @list: list to check
418 *
419 * Return: true if there are no items on list, else false
420 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)421 static bool binder_worklist_empty(struct binder_proc *proc,
422 struct list_head *list)
423 {
424 bool ret;
425
426 binder_inner_proc_lock(proc);
427 ret = binder_worklist_empty_ilocked(list);
428 binder_inner_proc_unlock(proc);
429 return ret;
430 }
431
432 /**
433 * binder_enqueue_work_ilocked() - Add an item to the work list
434 * @work: struct binder_work to add to list
435 * @target_list: list to add work to
436 *
437 * Adds the work to the specified list. Asserts that work
438 * is not already on a list.
439 *
440 * Requires the proc->inner_lock to be held.
441 */
442 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)443 binder_enqueue_work_ilocked(struct binder_work *work,
444 struct list_head *target_list)
445 {
446 BUG_ON(target_list == NULL);
447 BUG_ON(work->entry.next && !list_empty(&work->entry));
448 list_add_tail(&work->entry, target_list);
449 }
450
451 /**
452 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
453 * @thread: thread to queue work to
454 * @work: struct binder_work to add to list
455 *
456 * Adds the work to the todo list of the thread. Doesn't set the process_todo
457 * flag, which means that (if it wasn't already set) the thread will go to
458 * sleep without handling this work when it calls read.
459 *
460 * Requires the proc->inner_lock to be held.
461 */
462 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)463 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
464 struct binder_work *work)
465 {
466 WARN_ON(!list_empty(&thread->waiting_thread_node));
467 binder_enqueue_work_ilocked(work, &thread->todo);
468 }
469
470 /**
471 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
472 * @thread: thread to queue work to
473 * @work: struct binder_work to add to list
474 *
475 * Adds the work to the todo list of the thread, and enables processing
476 * of the todo queue.
477 *
478 * Requires the proc->inner_lock to be held.
479 */
480 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)481 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
482 struct binder_work *work)
483 {
484 WARN_ON(!list_empty(&thread->waiting_thread_node));
485 binder_enqueue_work_ilocked(work, &thread->todo);
486
487 /* (e)poll-based threads require an explicit wakeup signal when
488 * queuing their own work; they rely on these events to consume
489 * messages without I/O block. Without it, threads risk waiting
490 * indefinitely without handling the work.
491 */
492 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
493 thread->pid == current->pid && !thread->process_todo)
494 wake_up_interruptible_sync(&thread->wait);
495
496 thread->process_todo = true;
497 }
498
499 /**
500 * binder_enqueue_thread_work() - Add an item to the thread work list
501 * @thread: thread to queue work to
502 * @work: struct binder_work to add to list
503 *
504 * Adds the work to the todo list of the thread, and enables processing
505 * of the todo queue.
506 */
507 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)508 binder_enqueue_thread_work(struct binder_thread *thread,
509 struct binder_work *work)
510 {
511 binder_inner_proc_lock(thread->proc);
512 binder_enqueue_thread_work_ilocked(thread, work);
513 binder_inner_proc_unlock(thread->proc);
514 }
515
516 static void
binder_dequeue_work_ilocked(struct binder_work * work)517 binder_dequeue_work_ilocked(struct binder_work *work)
518 {
519 list_del_init(&work->entry);
520 }
521
522 /**
523 * binder_dequeue_work() - Removes an item from the work list
524 * @proc: binder_proc associated with list
525 * @work: struct binder_work to remove from list
526 *
527 * Removes the specified work item from whatever list it is on.
528 * Can safely be called if work is not on any list.
529 */
530 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)531 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
532 {
533 binder_inner_proc_lock(proc);
534 binder_dequeue_work_ilocked(work);
535 binder_inner_proc_unlock(proc);
536 }
537
binder_dequeue_work_head_ilocked(struct list_head * list)538 static struct binder_work *binder_dequeue_work_head_ilocked(
539 struct list_head *list)
540 {
541 struct binder_work *w;
542
543 w = list_first_entry_or_null(list, struct binder_work, entry);
544 if (w)
545 list_del_init(&w->entry);
546 return w;
547 }
548
549 static void
550 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
551 static void binder_free_thread(struct binder_thread *thread);
552 static void binder_free_proc(struct binder_proc *proc);
553 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
554
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)555 static bool binder_has_work_ilocked(struct binder_thread *thread,
556 bool do_proc_work)
557 {
558 return thread->process_todo ||
559 thread->looper_need_return ||
560 (do_proc_work &&
561 !binder_worklist_empty_ilocked(&thread->proc->todo));
562 }
563
binder_has_work(struct binder_thread * thread,bool do_proc_work)564 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565 {
566 bool has_work;
567
568 binder_inner_proc_lock(thread->proc);
569 has_work = binder_has_work_ilocked(thread, do_proc_work);
570 binder_inner_proc_unlock(thread->proc);
571
572 return has_work;
573 }
574
binder_available_for_proc_work_ilocked(struct binder_thread * thread)575 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576 {
577 return !thread->transaction_stack &&
578 binder_worklist_empty_ilocked(&thread->todo);
579 }
580
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)581 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582 bool sync)
583 {
584 struct rb_node *n;
585 struct binder_thread *thread;
586
587 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588 thread = rb_entry(n, struct binder_thread, rb_node);
589 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590 binder_available_for_proc_work_ilocked(thread)) {
591 if (sync)
592 wake_up_interruptible_sync(&thread->wait);
593 else
594 wake_up_interruptible(&thread->wait);
595 }
596 }
597 }
598
599 /**
600 * binder_select_thread_ilocked() - selects a thread for doing proc work.
601 * @proc: process to select a thread from
602 *
603 * Note that calling this function moves the thread off the waiting_threads
604 * list, so it can only be woken up by the caller of this function, or a
605 * signal. Therefore, callers *should* always wake up the thread this function
606 * returns.
607 *
608 * Return: If there's a thread currently waiting for process work,
609 * returns that thread. Otherwise returns NULL.
610 */
611 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)612 binder_select_thread_ilocked(struct binder_proc *proc)
613 {
614 struct binder_thread *thread;
615
616 assert_spin_locked(&proc->inner_lock);
617 thread = list_first_entry_or_null(&proc->waiting_threads,
618 struct binder_thread,
619 waiting_thread_node);
620
621 if (thread)
622 list_del_init(&thread->waiting_thread_node);
623
624 return thread;
625 }
626
627 /**
628 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629 * @proc: process to wake up a thread in
630 * @thread: specific thread to wake-up (may be NULL)
631 * @sync: whether to do a synchronous wake-up
632 *
633 * This function wakes up a thread in the @proc process.
634 * The caller may provide a specific thread to wake-up in
635 * the @thread parameter. If @thread is NULL, this function
636 * will wake up threads that have called poll().
637 *
638 * Note that for this function to work as expected, callers
639 * should first call binder_select_thread() to find a thread
640 * to handle the work (if they don't have a thread already),
641 * and pass the result into the @thread parameter.
642 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)643 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644 struct binder_thread *thread,
645 bool sync)
646 {
647 assert_spin_locked(&proc->inner_lock);
648
649 if (thread) {
650 if (sync)
651 wake_up_interruptible_sync(&thread->wait);
652 else
653 wake_up_interruptible(&thread->wait);
654 return;
655 }
656
657 /* Didn't find a thread waiting for proc work; this can happen
658 * in two scenarios:
659 * 1. All threads are busy handling transactions
660 * In that case, one of those threads should call back into
661 * the kernel driver soon and pick up this work.
662 * 2. Threads are using the (e)poll interface, in which case
663 * they may be blocked on the waitqueue without having been
664 * added to waiting_threads. For this case, we just iterate
665 * over all threads not handling transaction work, and
666 * wake them all up. We wake all because we don't know whether
667 * a thread that called into (e)poll is handling non-binder
668 * work currently.
669 */
670 binder_wakeup_poll_threads_ilocked(proc, sync);
671 }
672
binder_wakeup_proc_ilocked(struct binder_proc * proc)673 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674 {
675 struct binder_thread *thread = binder_select_thread_ilocked(proc);
676
677 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678 }
679
binder_set_nice(long nice)680 static void binder_set_nice(long nice)
681 {
682 long min_nice;
683
684 if (can_nice(current, nice)) {
685 set_user_nice(current, nice);
686 return;
687 }
688 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
689 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
690 "%d: nice value %ld not allowed use %ld instead\n",
691 current->pid, nice, min_nice);
692 set_user_nice(current, min_nice);
693 if (min_nice <= MAX_NICE)
694 return;
695 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
696 }
697
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)698 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
699 binder_uintptr_t ptr)
700 {
701 struct rb_node *n = proc->nodes.rb_node;
702 struct binder_node *node;
703
704 assert_spin_locked(&proc->inner_lock);
705
706 while (n) {
707 node = rb_entry(n, struct binder_node, rb_node);
708
709 if (ptr < node->ptr)
710 n = n->rb_left;
711 else if (ptr > node->ptr)
712 n = n->rb_right;
713 else {
714 /*
715 * take an implicit weak reference
716 * to ensure node stays alive until
717 * call to binder_put_node()
718 */
719 binder_inc_node_tmpref_ilocked(node);
720 return node;
721 }
722 }
723 return NULL;
724 }
725
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)726 static struct binder_node *binder_get_node(struct binder_proc *proc,
727 binder_uintptr_t ptr)
728 {
729 struct binder_node *node;
730
731 binder_inner_proc_lock(proc);
732 node = binder_get_node_ilocked(proc, ptr);
733 binder_inner_proc_unlock(proc);
734 return node;
735 }
736
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)737 static struct binder_node *binder_init_node_ilocked(
738 struct binder_proc *proc,
739 struct binder_node *new_node,
740 struct flat_binder_object *fp)
741 {
742 struct rb_node **p = &proc->nodes.rb_node;
743 struct rb_node *parent = NULL;
744 struct binder_node *node;
745 binder_uintptr_t ptr = fp ? fp->binder : 0;
746 binder_uintptr_t cookie = fp ? fp->cookie : 0;
747 __u32 flags = fp ? fp->flags : 0;
748
749 assert_spin_locked(&proc->inner_lock);
750
751 while (*p) {
752
753 parent = *p;
754 node = rb_entry(parent, struct binder_node, rb_node);
755
756 if (ptr < node->ptr)
757 p = &(*p)->rb_left;
758 else if (ptr > node->ptr)
759 p = &(*p)->rb_right;
760 else {
761 /*
762 * A matching node is already in
763 * the rb tree. Abandon the init
764 * and return it.
765 */
766 binder_inc_node_tmpref_ilocked(node);
767 return node;
768 }
769 }
770 node = new_node;
771 binder_stats_created(BINDER_STAT_NODE);
772 node->tmp_refs++;
773 rb_link_node(&node->rb_node, parent, p);
774 rb_insert_color(&node->rb_node, &proc->nodes);
775 node->debug_id = atomic_inc_return(&binder_last_id);
776 node->proc = proc;
777 node->ptr = ptr;
778 node->cookie = cookie;
779 node->work.type = BINDER_WORK_NODE;
780 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
781 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
782 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
783 spin_lock_init(&node->lock);
784 INIT_LIST_HEAD(&node->work.entry);
785 INIT_LIST_HEAD(&node->async_todo);
786 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
787 "%d:%d node %d u%016llx c%016llx created\n",
788 proc->pid, current->pid, node->debug_id,
789 (u64)node->ptr, (u64)node->cookie);
790
791 return node;
792 }
793
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)794 static struct binder_node *binder_new_node(struct binder_proc *proc,
795 struct flat_binder_object *fp)
796 {
797 struct binder_node *node;
798 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
799
800 if (!new_node)
801 return NULL;
802 binder_inner_proc_lock(proc);
803 node = binder_init_node_ilocked(proc, new_node, fp);
804 binder_inner_proc_unlock(proc);
805 if (node != new_node)
806 /*
807 * The node was already added by another thread
808 */
809 kfree(new_node);
810
811 return node;
812 }
813
binder_free_node(struct binder_node * node)814 static void binder_free_node(struct binder_node *node)
815 {
816 kfree(node);
817 binder_stats_deleted(BINDER_STAT_NODE);
818 }
819
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)820 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
821 int internal,
822 struct list_head *target_list)
823 {
824 struct binder_proc *proc = node->proc;
825
826 assert_spin_locked(&node->lock);
827 if (proc)
828 assert_spin_locked(&proc->inner_lock);
829 if (strong) {
830 if (internal) {
831 if (target_list == NULL &&
832 node->internal_strong_refs == 0 &&
833 !(node->proc &&
834 node == node->proc->context->binder_context_mgr_node &&
835 node->has_strong_ref)) {
836 pr_err("invalid inc strong node for %d\n",
837 node->debug_id);
838 return -EINVAL;
839 }
840 node->internal_strong_refs++;
841 } else
842 node->local_strong_refs++;
843 if (!node->has_strong_ref && target_list) {
844 struct binder_thread *thread = container_of(target_list,
845 struct binder_thread, todo);
846 binder_dequeue_work_ilocked(&node->work);
847 BUG_ON(&thread->todo != target_list);
848 binder_enqueue_deferred_thread_work_ilocked(thread,
849 &node->work);
850 }
851 } else {
852 if (!internal)
853 node->local_weak_refs++;
854 if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
855 binder_enqueue_work_ilocked(&node->work, target_list);
856 }
857 return 0;
858 }
859
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)860 static int binder_inc_node(struct binder_node *node, int strong, int internal,
861 struct list_head *target_list)
862 {
863 int ret;
864
865 binder_node_inner_lock(node);
866 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
867 binder_node_inner_unlock(node);
868
869 return ret;
870 }
871
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)872 static bool binder_dec_node_nilocked(struct binder_node *node,
873 int strong, int internal)
874 {
875 struct binder_proc *proc = node->proc;
876
877 assert_spin_locked(&node->lock);
878 if (proc)
879 assert_spin_locked(&proc->inner_lock);
880 if (strong) {
881 if (internal)
882 node->internal_strong_refs--;
883 else
884 node->local_strong_refs--;
885 if (node->local_strong_refs || node->internal_strong_refs)
886 return false;
887 } else {
888 if (!internal)
889 node->local_weak_refs--;
890 if (node->local_weak_refs || node->tmp_refs ||
891 !hlist_empty(&node->refs))
892 return false;
893 }
894
895 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
896 if (list_empty(&node->work.entry)) {
897 binder_enqueue_work_ilocked(&node->work, &proc->todo);
898 binder_wakeup_proc_ilocked(proc);
899 }
900 } else {
901 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
902 !node->local_weak_refs && !node->tmp_refs) {
903 if (proc) {
904 binder_dequeue_work_ilocked(&node->work);
905 rb_erase(&node->rb_node, &proc->nodes);
906 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
907 "refless node %d deleted\n",
908 node->debug_id);
909 } else {
910 BUG_ON(!list_empty(&node->work.entry));
911 spin_lock(&binder_dead_nodes_lock);
912 /*
913 * tmp_refs could have changed so
914 * check it again
915 */
916 if (node->tmp_refs) {
917 spin_unlock(&binder_dead_nodes_lock);
918 return false;
919 }
920 hlist_del(&node->dead_node);
921 spin_unlock(&binder_dead_nodes_lock);
922 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
923 "dead node %d deleted\n",
924 node->debug_id);
925 }
926 return true;
927 }
928 }
929 return false;
930 }
931
binder_dec_node(struct binder_node * node,int strong,int internal)932 static void binder_dec_node(struct binder_node *node, int strong, int internal)
933 {
934 bool free_node;
935
936 binder_node_inner_lock(node);
937 free_node = binder_dec_node_nilocked(node, strong, internal);
938 binder_node_inner_unlock(node);
939 if (free_node)
940 binder_free_node(node);
941 }
942
binder_inc_node_tmpref_ilocked(struct binder_node * node)943 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
944 {
945 /*
946 * No call to binder_inc_node() is needed since we
947 * don't need to inform userspace of any changes to
948 * tmp_refs
949 */
950 node->tmp_refs++;
951 }
952
953 /**
954 * binder_inc_node_tmpref() - take a temporary reference on node
955 * @node: node to reference
956 *
957 * Take reference on node to prevent the node from being freed
958 * while referenced only by a local variable. The inner lock is
959 * needed to serialize with the node work on the queue (which
960 * isn't needed after the node is dead). If the node is dead
961 * (node->proc is NULL), use binder_dead_nodes_lock to protect
962 * node->tmp_refs against dead-node-only cases where the node
963 * lock cannot be acquired (eg traversing the dead node list to
964 * print nodes)
965 */
binder_inc_node_tmpref(struct binder_node * node)966 static void binder_inc_node_tmpref(struct binder_node *node)
967 {
968 binder_node_lock(node);
969 if (node->proc)
970 binder_inner_proc_lock(node->proc);
971 else
972 spin_lock(&binder_dead_nodes_lock);
973 binder_inc_node_tmpref_ilocked(node);
974 if (node->proc)
975 binder_inner_proc_unlock(node->proc);
976 else
977 spin_unlock(&binder_dead_nodes_lock);
978 binder_node_unlock(node);
979 }
980
981 /**
982 * binder_dec_node_tmpref() - remove a temporary reference on node
983 * @node: node to reference
984 *
985 * Release temporary reference on node taken via binder_inc_node_tmpref()
986 */
binder_dec_node_tmpref(struct binder_node * node)987 static void binder_dec_node_tmpref(struct binder_node *node)
988 {
989 bool free_node;
990
991 binder_node_inner_lock(node);
992 if (!node->proc)
993 spin_lock(&binder_dead_nodes_lock);
994 else
995 __acquire(&binder_dead_nodes_lock);
996 node->tmp_refs--;
997 BUG_ON(node->tmp_refs < 0);
998 if (!node->proc)
999 spin_unlock(&binder_dead_nodes_lock);
1000 else
1001 __release(&binder_dead_nodes_lock);
1002 /*
1003 * Call binder_dec_node() to check if all refcounts are 0
1004 * and cleanup is needed. Calling with strong=0 and internal=1
1005 * causes no actual reference to be released in binder_dec_node().
1006 * If that changes, a change is needed here too.
1007 */
1008 free_node = binder_dec_node_nilocked(node, 0, 1);
1009 binder_node_inner_unlock(node);
1010 if (free_node)
1011 binder_free_node(node);
1012 }
1013
binder_put_node(struct binder_node * node)1014 static void binder_put_node(struct binder_node *node)
1015 {
1016 binder_dec_node_tmpref(node);
1017 }
1018
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1019 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1020 u32 desc, bool need_strong_ref)
1021 {
1022 struct rb_node *n = proc->refs_by_desc.rb_node;
1023 struct binder_ref *ref;
1024
1025 while (n) {
1026 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1027
1028 if (desc < ref->data.desc) {
1029 n = n->rb_left;
1030 } else if (desc > ref->data.desc) {
1031 n = n->rb_right;
1032 } else if (need_strong_ref && !ref->data.strong) {
1033 binder_user_error("tried to use weak ref as strong ref\n");
1034 return NULL;
1035 } else {
1036 return ref;
1037 }
1038 }
1039 return NULL;
1040 }
1041
1042 /* Find the smallest unused descriptor the "slow way" */
slow_desc_lookup_olocked(struct binder_proc * proc,u32 offset)1043 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1044 {
1045 struct binder_ref *ref;
1046 struct rb_node *n;
1047 u32 desc;
1048
1049 desc = offset;
1050 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1051 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1052 if (ref->data.desc > desc)
1053 break;
1054 desc = ref->data.desc + 1;
1055 }
1056
1057 return desc;
1058 }
1059
1060 /*
1061 * Find an available reference descriptor ID. The proc->outer_lock might
1062 * be released in the process, in which case -EAGAIN is returned and the
1063 * @desc should be considered invalid.
1064 */
get_ref_desc_olocked(struct binder_proc * proc,struct binder_node * node,u32 * desc)1065 static int get_ref_desc_olocked(struct binder_proc *proc,
1066 struct binder_node *node,
1067 u32 *desc)
1068 {
1069 struct dbitmap *dmap = &proc->dmap;
1070 unsigned int nbits, offset;
1071 unsigned long *new, bit;
1072
1073 /* 0 is reserved for the context manager */
1074 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1075
1076 if (!dbitmap_enabled(dmap)) {
1077 *desc = slow_desc_lookup_olocked(proc, offset);
1078 return 0;
1079 }
1080
1081 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1082 *desc = bit;
1083 return 0;
1084 }
1085
1086 /*
1087 * The dbitmap is full and needs to grow. The proc->outer_lock
1088 * is briefly released to allocate the new bitmap safely.
1089 */
1090 nbits = dbitmap_grow_nbits(dmap);
1091 binder_proc_unlock(proc);
1092 new = bitmap_zalloc(nbits, GFP_KERNEL);
1093 binder_proc_lock(proc);
1094 dbitmap_grow(dmap, new, nbits);
1095
1096 return -EAGAIN;
1097 }
1098
1099 /**
1100 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1101 * @proc: binder_proc that owns the ref
1102 * @node: binder_node of target
1103 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1104 *
1105 * Look up the ref for the given node and return it if it exists
1106 *
1107 * If it doesn't exist and the caller provides a newly allocated
1108 * ref, initialize the fields of the newly allocated ref and insert
1109 * into the given proc rb_trees and node refs list.
1110 *
1111 * Return: the ref for node. It is possible that another thread
1112 * allocated/initialized the ref first in which case the
1113 * returned ref would be different than the passed-in
1114 * new_ref. new_ref must be kfree'd by the caller in
1115 * this case.
1116 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1117 static struct binder_ref *binder_get_ref_for_node_olocked(
1118 struct binder_proc *proc,
1119 struct binder_node *node,
1120 struct binder_ref *new_ref)
1121 {
1122 struct binder_ref *ref;
1123 struct rb_node *parent;
1124 struct rb_node **p;
1125 u32 desc;
1126
1127 retry:
1128 p = &proc->refs_by_node.rb_node;
1129 parent = NULL;
1130 while (*p) {
1131 parent = *p;
1132 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1133
1134 if (node < ref->node)
1135 p = &(*p)->rb_left;
1136 else if (node > ref->node)
1137 p = &(*p)->rb_right;
1138 else
1139 return ref;
1140 }
1141 if (!new_ref)
1142 return NULL;
1143
1144 /* might release the proc->outer_lock */
1145 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1146 goto retry;
1147
1148 binder_stats_created(BINDER_STAT_REF);
1149 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1150 new_ref->proc = proc;
1151 new_ref->node = node;
1152 rb_link_node(&new_ref->rb_node_node, parent, p);
1153 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1154
1155 new_ref->data.desc = desc;
1156 p = &proc->refs_by_desc.rb_node;
1157 while (*p) {
1158 parent = *p;
1159 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1160
1161 if (new_ref->data.desc < ref->data.desc)
1162 p = &(*p)->rb_left;
1163 else if (new_ref->data.desc > ref->data.desc)
1164 p = &(*p)->rb_right;
1165 else
1166 BUG();
1167 }
1168 rb_link_node(&new_ref->rb_node_desc, parent, p);
1169 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1170
1171 binder_node_lock(node);
1172 hlist_add_head(&new_ref->node_entry, &node->refs);
1173
1174 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 "%d new ref %d desc %d for node %d\n",
1176 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1177 node->debug_id);
1178 binder_node_unlock(node);
1179 return new_ref;
1180 }
1181
binder_cleanup_ref_olocked(struct binder_ref * ref)1182 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1183 {
1184 struct dbitmap *dmap = &ref->proc->dmap;
1185 bool delete_node = false;
1186
1187 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1188 "%d delete ref %d desc %d for node %d\n",
1189 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1190 ref->node->debug_id);
1191
1192 if (dbitmap_enabled(dmap))
1193 dbitmap_clear_bit(dmap, ref->data.desc);
1194 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1195 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1196
1197 binder_node_inner_lock(ref->node);
1198 if (ref->data.strong)
1199 binder_dec_node_nilocked(ref->node, 1, 1);
1200
1201 hlist_del(&ref->node_entry);
1202 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1203 binder_node_inner_unlock(ref->node);
1204 /*
1205 * Clear ref->node unless we want the caller to free the node
1206 */
1207 if (!delete_node) {
1208 /*
1209 * The caller uses ref->node to determine
1210 * whether the node needs to be freed. Clear
1211 * it since the node is still alive.
1212 */
1213 ref->node = NULL;
1214 }
1215
1216 if (ref->death) {
1217 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1218 "%d delete ref %d desc %d has death notification\n",
1219 ref->proc->pid, ref->data.debug_id,
1220 ref->data.desc);
1221 binder_dequeue_work(ref->proc, &ref->death->work);
1222 binder_stats_deleted(BINDER_STAT_DEATH);
1223 }
1224
1225 if (ref->freeze) {
1226 binder_dequeue_work(ref->proc, &ref->freeze->work);
1227 binder_stats_deleted(BINDER_STAT_FREEZE);
1228 }
1229
1230 binder_stats_deleted(BINDER_STAT_REF);
1231 }
1232
1233 /**
1234 * binder_inc_ref_olocked() - increment the ref for given handle
1235 * @ref: ref to be incremented
1236 * @strong: if true, strong increment, else weak
1237 * @target_list: list to queue node work on
1238 *
1239 * Increment the ref. @ref->proc->outer_lock must be held on entry
1240 *
1241 * Return: 0, if successful, else errno
1242 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1243 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1244 struct list_head *target_list)
1245 {
1246 int ret;
1247
1248 if (strong) {
1249 if (ref->data.strong == 0) {
1250 ret = binder_inc_node(ref->node, 1, 1, target_list);
1251 if (ret)
1252 return ret;
1253 }
1254 ref->data.strong++;
1255 } else {
1256 if (ref->data.weak == 0) {
1257 ret = binder_inc_node(ref->node, 0, 1, target_list);
1258 if (ret)
1259 return ret;
1260 }
1261 ref->data.weak++;
1262 }
1263 return 0;
1264 }
1265
1266 /**
1267 * binder_dec_ref_olocked() - dec the ref for given handle
1268 * @ref: ref to be decremented
1269 * @strong: if true, strong decrement, else weak
1270 *
1271 * Decrement the ref.
1272 *
1273 * Return: %true if ref is cleaned up and ready to be freed.
1274 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1275 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1276 {
1277 if (strong) {
1278 if (ref->data.strong == 0) {
1279 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1280 ref->proc->pid, ref->data.debug_id,
1281 ref->data.desc, ref->data.strong,
1282 ref->data.weak);
1283 return false;
1284 }
1285 ref->data.strong--;
1286 if (ref->data.strong == 0)
1287 binder_dec_node(ref->node, strong, 1);
1288 } else {
1289 if (ref->data.weak == 0) {
1290 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1291 ref->proc->pid, ref->data.debug_id,
1292 ref->data.desc, ref->data.strong,
1293 ref->data.weak);
1294 return false;
1295 }
1296 ref->data.weak--;
1297 }
1298 if (ref->data.strong == 0 && ref->data.weak == 0) {
1299 binder_cleanup_ref_olocked(ref);
1300 return true;
1301 }
1302 return false;
1303 }
1304
1305 /**
1306 * binder_get_node_from_ref() - get the node from the given proc/desc
1307 * @proc: proc containing the ref
1308 * @desc: the handle associated with the ref
1309 * @need_strong_ref: if true, only return node if ref is strong
1310 * @rdata: the id/refcount data for the ref
1311 *
1312 * Given a proc and ref handle, return the associated binder_node
1313 *
1314 * Return: a binder_node or NULL if not found or not strong when strong required
1315 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1316 static struct binder_node *binder_get_node_from_ref(
1317 struct binder_proc *proc,
1318 u32 desc, bool need_strong_ref,
1319 struct binder_ref_data *rdata)
1320 {
1321 struct binder_node *node;
1322 struct binder_ref *ref;
1323
1324 binder_proc_lock(proc);
1325 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1326 if (!ref)
1327 goto err_no_ref;
1328 node = ref->node;
1329 /*
1330 * Take an implicit reference on the node to ensure
1331 * it stays alive until the call to binder_put_node()
1332 */
1333 binder_inc_node_tmpref(node);
1334 if (rdata)
1335 *rdata = ref->data;
1336 binder_proc_unlock(proc);
1337
1338 return node;
1339
1340 err_no_ref:
1341 binder_proc_unlock(proc);
1342 return NULL;
1343 }
1344
1345 /**
1346 * binder_free_ref() - free the binder_ref
1347 * @ref: ref to free
1348 *
1349 * Free the binder_ref. Free the binder_node indicated by ref->node
1350 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1351 */
binder_free_ref(struct binder_ref * ref)1352 static void binder_free_ref(struct binder_ref *ref)
1353 {
1354 if (ref->node)
1355 binder_free_node(ref->node);
1356 kfree(ref->death);
1357 kfree(ref->freeze);
1358 kfree(ref);
1359 }
1360
1361 /* shrink descriptor bitmap if needed */
try_shrink_dmap(struct binder_proc * proc)1362 static void try_shrink_dmap(struct binder_proc *proc)
1363 {
1364 unsigned long *new;
1365 int nbits;
1366
1367 binder_proc_lock(proc);
1368 nbits = dbitmap_shrink_nbits(&proc->dmap);
1369 binder_proc_unlock(proc);
1370
1371 if (!nbits)
1372 return;
1373
1374 new = bitmap_zalloc(nbits, GFP_KERNEL);
1375 binder_proc_lock(proc);
1376 dbitmap_shrink(&proc->dmap, new, nbits);
1377 binder_proc_unlock(proc);
1378 }
1379
1380 /**
1381 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1382 * @proc: proc containing the ref
1383 * @desc: the handle associated with the ref
1384 * @increment: true=inc reference, false=dec reference
1385 * @strong: true=strong reference, false=weak reference
1386 * @rdata: the id/refcount data for the ref
1387 *
1388 * Given a proc and ref handle, increment or decrement the ref
1389 * according to "increment" arg.
1390 *
1391 * Return: 0 if successful, else errno
1392 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1393 static int binder_update_ref_for_handle(struct binder_proc *proc,
1394 uint32_t desc, bool increment, bool strong,
1395 struct binder_ref_data *rdata)
1396 {
1397 int ret = 0;
1398 struct binder_ref *ref;
1399 bool delete_ref = false;
1400
1401 binder_proc_lock(proc);
1402 ref = binder_get_ref_olocked(proc, desc, strong);
1403 if (!ref) {
1404 ret = -EINVAL;
1405 goto err_no_ref;
1406 }
1407 if (increment)
1408 ret = binder_inc_ref_olocked(ref, strong, NULL);
1409 else
1410 delete_ref = binder_dec_ref_olocked(ref, strong);
1411
1412 if (rdata)
1413 *rdata = ref->data;
1414 binder_proc_unlock(proc);
1415
1416 if (delete_ref) {
1417 binder_free_ref(ref);
1418 try_shrink_dmap(proc);
1419 }
1420 return ret;
1421
1422 err_no_ref:
1423 binder_proc_unlock(proc);
1424 return ret;
1425 }
1426
1427 /**
1428 * binder_dec_ref_for_handle() - dec the ref for given handle
1429 * @proc: proc containing the ref
1430 * @desc: the handle associated with the ref
1431 * @strong: true=strong reference, false=weak reference
1432 * @rdata: the id/refcount data for the ref
1433 *
1434 * Just calls binder_update_ref_for_handle() to decrement the ref.
1435 *
1436 * Return: 0 if successful, else errno
1437 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1438 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1439 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1440 {
1441 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1442 }
1443
1444
1445 /**
1446 * binder_inc_ref_for_node() - increment the ref for given proc/node
1447 * @proc: proc containing the ref
1448 * @node: target node
1449 * @strong: true=strong reference, false=weak reference
1450 * @target_list: worklist to use if node is incremented
1451 * @rdata: the id/refcount data for the ref
1452 *
1453 * Given a proc and node, increment the ref. Create the ref if it
1454 * doesn't already exist
1455 *
1456 * Return: 0 if successful, else errno
1457 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1458 static int binder_inc_ref_for_node(struct binder_proc *proc,
1459 struct binder_node *node,
1460 bool strong,
1461 struct list_head *target_list,
1462 struct binder_ref_data *rdata)
1463 {
1464 struct binder_ref *ref;
1465 struct binder_ref *new_ref = NULL;
1466 int ret = 0;
1467
1468 binder_proc_lock(proc);
1469 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1470 if (!ref) {
1471 binder_proc_unlock(proc);
1472 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1473 if (!new_ref)
1474 return -ENOMEM;
1475 binder_proc_lock(proc);
1476 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1477 }
1478 ret = binder_inc_ref_olocked(ref, strong, target_list);
1479 *rdata = ref->data;
1480 if (ret && ref == new_ref) {
1481 /*
1482 * Cleanup the failed reference here as the target
1483 * could now be dead and have already released its
1484 * references by now. Calling on the new reference
1485 * with strong=0 and a tmp_refs will not decrement
1486 * the node. The new_ref gets kfree'd below.
1487 */
1488 binder_cleanup_ref_olocked(new_ref);
1489 ref = NULL;
1490 }
1491
1492 binder_proc_unlock(proc);
1493 if (new_ref && ref != new_ref)
1494 /*
1495 * Another thread created the ref first so
1496 * free the one we allocated
1497 */
1498 kfree(new_ref);
1499 return ret;
1500 }
1501
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1502 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1503 struct binder_transaction *t)
1504 {
1505 BUG_ON(!target_thread);
1506 assert_spin_locked(&target_thread->proc->inner_lock);
1507 BUG_ON(target_thread->transaction_stack != t);
1508 BUG_ON(target_thread->transaction_stack->from != target_thread);
1509 target_thread->transaction_stack =
1510 target_thread->transaction_stack->from_parent;
1511 t->from = NULL;
1512 }
1513
1514 /**
1515 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1516 * @thread: thread to decrement
1517 *
1518 * A thread needs to be kept alive while being used to create or
1519 * handle a transaction. binder_get_txn_from() is used to safely
1520 * extract t->from from a binder_transaction and keep the thread
1521 * indicated by t->from from being freed. When done with that
1522 * binder_thread, this function is called to decrement the
1523 * tmp_ref and free if appropriate (thread has been released
1524 * and no transaction being processed by the driver)
1525 */
binder_thread_dec_tmpref(struct binder_thread * thread)1526 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1527 {
1528 /*
1529 * atomic is used to protect the counter value while
1530 * it cannot reach zero or thread->is_dead is false
1531 */
1532 binder_inner_proc_lock(thread->proc);
1533 atomic_dec(&thread->tmp_ref);
1534 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1535 binder_inner_proc_unlock(thread->proc);
1536 binder_free_thread(thread);
1537 return;
1538 }
1539 binder_inner_proc_unlock(thread->proc);
1540 }
1541
1542 /**
1543 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1544 * @proc: proc to decrement
1545 *
1546 * A binder_proc needs to be kept alive while being used to create or
1547 * handle a transaction. proc->tmp_ref is incremented when
1548 * creating a new transaction or the binder_proc is currently in-use
1549 * by threads that are being released. When done with the binder_proc,
1550 * this function is called to decrement the counter and free the
1551 * proc if appropriate (proc has been released, all threads have
1552 * been released and not currently in-use to process a transaction).
1553 */
binder_proc_dec_tmpref(struct binder_proc * proc)1554 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1555 {
1556 binder_inner_proc_lock(proc);
1557 proc->tmp_ref--;
1558 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1559 !proc->tmp_ref) {
1560 binder_inner_proc_unlock(proc);
1561 binder_free_proc(proc);
1562 return;
1563 }
1564 binder_inner_proc_unlock(proc);
1565 }
1566
1567 /**
1568 * binder_get_txn_from() - safely extract the "from" thread in transaction
1569 * @t: binder transaction for t->from
1570 *
1571 * Atomically return the "from" thread and increment the tmp_ref
1572 * count for the thread to ensure it stays alive until
1573 * binder_thread_dec_tmpref() is called.
1574 *
1575 * Return: the value of t->from
1576 */
binder_get_txn_from(struct binder_transaction * t)1577 static struct binder_thread *binder_get_txn_from(
1578 struct binder_transaction *t)
1579 {
1580 struct binder_thread *from;
1581
1582 guard(spinlock)(&t->lock);
1583 from = t->from;
1584 if (from)
1585 atomic_inc(&from->tmp_ref);
1586 return from;
1587 }
1588
1589 /**
1590 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1591 * @t: binder transaction for t->from
1592 *
1593 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1594 * to guarantee that the thread cannot be released while operating on it.
1595 * The caller must call binder_inner_proc_unlock() to release the inner lock
1596 * as well as call binder_dec_thread_txn() to release the reference.
1597 *
1598 * Return: the value of t->from
1599 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1600 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1601 struct binder_transaction *t)
1602 __acquires(&t->from->proc->inner_lock)
1603 {
1604 struct binder_thread *from;
1605
1606 from = binder_get_txn_from(t);
1607 if (!from) {
1608 __acquire(&from->proc->inner_lock);
1609 return NULL;
1610 }
1611 binder_inner_proc_lock(from->proc);
1612 if (t->from) {
1613 BUG_ON(from != t->from);
1614 return from;
1615 }
1616 binder_inner_proc_unlock(from->proc);
1617 __acquire(&from->proc->inner_lock);
1618 binder_thread_dec_tmpref(from);
1619 return NULL;
1620 }
1621
1622 /**
1623 * binder_free_txn_fixups() - free unprocessed fd fixups
1624 * @t: binder transaction for t->from
1625 *
1626 * If the transaction is being torn down prior to being
1627 * processed by the target process, free all of the
1628 * fd fixups and fput the file structs. It is safe to
1629 * call this function after the fixups have been
1630 * processed -- in that case, the list will be empty.
1631 */
binder_free_txn_fixups(struct binder_transaction * t)1632 static void binder_free_txn_fixups(struct binder_transaction *t)
1633 {
1634 struct binder_txn_fd_fixup *fixup, *tmp;
1635
1636 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1637 fput(fixup->file);
1638 if (fixup->target_fd >= 0)
1639 put_unused_fd(fixup->target_fd);
1640 list_del(&fixup->fixup_entry);
1641 kfree(fixup);
1642 }
1643 }
1644
binder_txn_latency_free(struct binder_transaction * t)1645 static void binder_txn_latency_free(struct binder_transaction *t)
1646 {
1647 int from_proc, from_thread, to_proc, to_thread;
1648
1649 spin_lock(&t->lock);
1650 from_proc = t->from ? t->from->proc->pid : 0;
1651 from_thread = t->from ? t->from->pid : 0;
1652 to_proc = t->to_proc ? t->to_proc->pid : 0;
1653 to_thread = t->to_thread ? t->to_thread->pid : 0;
1654 spin_unlock(&t->lock);
1655
1656 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1657 }
1658
binder_free_transaction(struct binder_transaction * t)1659 static void binder_free_transaction(struct binder_transaction *t)
1660 {
1661 struct binder_proc *target_proc = t->to_proc;
1662
1663 if (target_proc) {
1664 binder_inner_proc_lock(target_proc);
1665 target_proc->outstanding_txns--;
1666 if (target_proc->outstanding_txns < 0)
1667 pr_warn("%s: Unexpected outstanding_txns %d\n",
1668 __func__, target_proc->outstanding_txns);
1669 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1670 wake_up_interruptible_all(&target_proc->freeze_wait);
1671 if (t->buffer)
1672 t->buffer->transaction = NULL;
1673 binder_inner_proc_unlock(target_proc);
1674 }
1675 if (trace_binder_txn_latency_free_enabled())
1676 binder_txn_latency_free(t);
1677 /*
1678 * If the transaction has no target_proc, then
1679 * t->buffer->transaction has already been cleared.
1680 */
1681 binder_free_txn_fixups(t);
1682 kfree(t);
1683 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1684 }
1685
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1686 static void binder_send_failed_reply(struct binder_transaction *t,
1687 uint32_t error_code)
1688 {
1689 struct binder_thread *target_thread;
1690 struct binder_transaction *next;
1691
1692 BUG_ON(t->flags & TF_ONE_WAY);
1693 while (1) {
1694 target_thread = binder_get_txn_from_and_acq_inner(t);
1695 if (target_thread) {
1696 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1697 "send failed reply for transaction %d to %d:%d\n",
1698 t->debug_id,
1699 target_thread->proc->pid,
1700 target_thread->pid);
1701
1702 binder_pop_transaction_ilocked(target_thread, t);
1703 if (target_thread->reply_error.cmd == BR_OK) {
1704 target_thread->reply_error.cmd = error_code;
1705 binder_enqueue_thread_work_ilocked(
1706 target_thread,
1707 &target_thread->reply_error.work);
1708 wake_up_interruptible(&target_thread->wait);
1709 } else {
1710 /*
1711 * Cannot get here for normal operation, but
1712 * we can if multiple synchronous transactions
1713 * are sent without blocking for responses.
1714 * Just ignore the 2nd error in this case.
1715 */
1716 pr_warn("Unexpected reply error: %u\n",
1717 target_thread->reply_error.cmd);
1718 }
1719 binder_inner_proc_unlock(target_thread->proc);
1720 binder_thread_dec_tmpref(target_thread);
1721 binder_free_transaction(t);
1722 return;
1723 }
1724 __release(&target_thread->proc->inner_lock);
1725 next = t->from_parent;
1726
1727 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1728 "send failed reply for transaction %d, target dead\n",
1729 t->debug_id);
1730
1731 binder_free_transaction(t);
1732 if (next == NULL) {
1733 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1734 "reply failed, no target thread at root\n");
1735 return;
1736 }
1737 t = next;
1738 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1739 "reply failed, no target thread -- retry %d\n",
1740 t->debug_id);
1741 }
1742 }
1743
1744 /**
1745 * binder_cleanup_transaction() - cleans up undelivered transaction
1746 * @t: transaction that needs to be cleaned up
1747 * @reason: reason the transaction wasn't delivered
1748 * @error_code: error to return to caller (if synchronous call)
1749 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1750 static void binder_cleanup_transaction(struct binder_transaction *t,
1751 const char *reason,
1752 uint32_t error_code)
1753 {
1754 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1755 binder_send_failed_reply(t, error_code);
1756 } else {
1757 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1758 "undelivered transaction %d, %s\n",
1759 t->debug_id, reason);
1760 binder_free_transaction(t);
1761 }
1762 }
1763
1764 /**
1765 * binder_get_object() - gets object and checks for valid metadata
1766 * @proc: binder_proc owning the buffer
1767 * @u: sender's user pointer to base of buffer
1768 * @buffer: binder_buffer that we're parsing.
1769 * @offset: offset in the @buffer at which to validate an object.
1770 * @object: struct binder_object to read into
1771 *
1772 * Copy the binder object at the given offset into @object. If @u is
1773 * provided then the copy is from the sender's buffer. If not, then
1774 * it is copied from the target's @buffer.
1775 *
1776 * Return: If there's a valid metadata object at @offset, the
1777 * size of that object. Otherwise, it returns zero. The object
1778 * is read into the struct binder_object pointed to by @object.
1779 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1780 static size_t binder_get_object(struct binder_proc *proc,
1781 const void __user *u,
1782 struct binder_buffer *buffer,
1783 unsigned long offset,
1784 struct binder_object *object)
1785 {
1786 size_t read_size;
1787 struct binder_object_header *hdr;
1788 size_t object_size = 0;
1789
1790 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1791 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1792 !IS_ALIGNED(offset, sizeof(u32)))
1793 return 0;
1794
1795 if (u) {
1796 if (copy_from_user(object, u + offset, read_size))
1797 return 0;
1798 } else {
1799 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1800 offset, read_size))
1801 return 0;
1802 }
1803
1804 /* Ok, now see if we read a complete object. */
1805 hdr = &object->hdr;
1806 switch (hdr->type) {
1807 case BINDER_TYPE_BINDER:
1808 case BINDER_TYPE_WEAK_BINDER:
1809 case BINDER_TYPE_HANDLE:
1810 case BINDER_TYPE_WEAK_HANDLE:
1811 object_size = sizeof(struct flat_binder_object);
1812 break;
1813 case BINDER_TYPE_FD:
1814 object_size = sizeof(struct binder_fd_object);
1815 break;
1816 case BINDER_TYPE_PTR:
1817 object_size = sizeof(struct binder_buffer_object);
1818 break;
1819 case BINDER_TYPE_FDA:
1820 object_size = sizeof(struct binder_fd_array_object);
1821 break;
1822 default:
1823 return 0;
1824 }
1825 if (offset <= buffer->data_size - object_size &&
1826 buffer->data_size >= object_size)
1827 return object_size;
1828 else
1829 return 0;
1830 }
1831
1832 /**
1833 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1834 * @proc: binder_proc owning the buffer
1835 * @b: binder_buffer containing the object
1836 * @object: struct binder_object to read into
1837 * @index: index in offset array at which the binder_buffer_object is
1838 * located
1839 * @start_offset: points to the start of the offset array
1840 * @object_offsetp: offset of @object read from @b
1841 * @num_valid: the number of valid offsets in the offset array
1842 *
1843 * Return: If @index is within the valid range of the offset array
1844 * described by @start and @num_valid, and if there's a valid
1845 * binder_buffer_object at the offset found in index @index
1846 * of the offset array, that object is returned. Otherwise,
1847 * %NULL is returned.
1848 * Note that the offset found in index @index itself is not
1849 * verified; this function assumes that @num_valid elements
1850 * from @start were previously verified to have valid offsets.
1851 * If @object_offsetp is non-NULL, then the offset within
1852 * @b is written to it.
1853 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1854 static struct binder_buffer_object *binder_validate_ptr(
1855 struct binder_proc *proc,
1856 struct binder_buffer *b,
1857 struct binder_object *object,
1858 binder_size_t index,
1859 binder_size_t start_offset,
1860 binder_size_t *object_offsetp,
1861 binder_size_t num_valid)
1862 {
1863 size_t object_size;
1864 binder_size_t object_offset;
1865 unsigned long buffer_offset;
1866
1867 if (index >= num_valid)
1868 return NULL;
1869
1870 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1871 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1872 b, buffer_offset,
1873 sizeof(object_offset)))
1874 return NULL;
1875 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1876 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1877 return NULL;
1878 if (object_offsetp)
1879 *object_offsetp = object_offset;
1880
1881 return &object->bbo;
1882 }
1883
1884 /**
1885 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1886 * @proc: binder_proc owning the buffer
1887 * @b: transaction buffer
1888 * @objects_start_offset: offset to start of objects buffer
1889 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1890 * @fixup_offset: start offset in @buffer to fix up
1891 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1892 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1893 *
1894 * Return: %true if a fixup in buffer @buffer at offset @offset is
1895 * allowed.
1896 *
1897 * For safety reasons, we only allow fixups inside a buffer to happen
1898 * at increasing offsets; additionally, we only allow fixup on the last
1899 * buffer object that was verified, or one of its parents.
1900 *
1901 * Example of what is allowed:
1902 *
1903 * A
1904 * B (parent = A, offset = 0)
1905 * C (parent = A, offset = 16)
1906 * D (parent = C, offset = 0)
1907 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1908 *
1909 * Examples of what is not allowed:
1910 *
1911 * Decreasing offsets within the same parent:
1912 * A
1913 * C (parent = A, offset = 16)
1914 * B (parent = A, offset = 0) // decreasing offset within A
1915 *
1916 * Referring to a parent that wasn't the last object or any of its parents:
1917 * A
1918 * B (parent = A, offset = 0)
1919 * C (parent = A, offset = 0)
1920 * C (parent = A, offset = 16)
1921 * D (parent = B, offset = 0) // B is not A or any of A's parents
1922 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1923 static bool binder_validate_fixup(struct binder_proc *proc,
1924 struct binder_buffer *b,
1925 binder_size_t objects_start_offset,
1926 binder_size_t buffer_obj_offset,
1927 binder_size_t fixup_offset,
1928 binder_size_t last_obj_offset,
1929 binder_size_t last_min_offset)
1930 {
1931 if (!last_obj_offset) {
1932 /* Nothing to fix up in */
1933 return false;
1934 }
1935
1936 while (last_obj_offset != buffer_obj_offset) {
1937 unsigned long buffer_offset;
1938 struct binder_object last_object;
1939 struct binder_buffer_object *last_bbo;
1940 size_t object_size = binder_get_object(proc, NULL, b,
1941 last_obj_offset,
1942 &last_object);
1943 if (object_size != sizeof(*last_bbo))
1944 return false;
1945
1946 last_bbo = &last_object.bbo;
1947 /*
1948 * Safe to retrieve the parent of last_obj, since it
1949 * was already previously verified by the driver.
1950 */
1951 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1952 return false;
1953 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1954 buffer_offset = objects_start_offset +
1955 sizeof(binder_size_t) * last_bbo->parent;
1956 if (binder_alloc_copy_from_buffer(&proc->alloc,
1957 &last_obj_offset,
1958 b, buffer_offset,
1959 sizeof(last_obj_offset)))
1960 return false;
1961 }
1962 return (fixup_offset >= last_min_offset);
1963 }
1964
1965 /**
1966 * struct binder_task_work_cb - for deferred close
1967 *
1968 * @twork: callback_head for task work
1969 * @file: file to close
1970 *
1971 * Structure to pass task work to be handled after
1972 * returning from binder_ioctl() via task_work_add().
1973 */
1974 struct binder_task_work_cb {
1975 struct callback_head twork;
1976 struct file *file;
1977 };
1978
1979 /**
1980 * binder_do_fd_close() - close list of file descriptors
1981 * @twork: callback head for task work
1982 *
1983 * It is not safe to call ksys_close() during the binder_ioctl()
1984 * function if there is a chance that binder's own file descriptor
1985 * might be closed. This is to meet the requirements for using
1986 * fdget() (see comments for __fget_light()). Therefore use
1987 * task_work_add() to schedule the close operation once we have
1988 * returned from binder_ioctl(). This function is a callback
1989 * for that mechanism and does the actual ksys_close() on the
1990 * given file descriptor.
1991 */
binder_do_fd_close(struct callback_head * twork)1992 static void binder_do_fd_close(struct callback_head *twork)
1993 {
1994 struct binder_task_work_cb *twcb = container_of(twork,
1995 struct binder_task_work_cb, twork);
1996
1997 fput(twcb->file);
1998 kfree(twcb);
1999 }
2000
2001 /**
2002 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2003 * @fd: file-descriptor to close
2004 *
2005 * See comments in binder_do_fd_close(). This function is used to schedule
2006 * a file-descriptor to be closed after returning from binder_ioctl().
2007 */
binder_deferred_fd_close(int fd)2008 static void binder_deferred_fd_close(int fd)
2009 {
2010 struct binder_task_work_cb *twcb;
2011
2012 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2013 if (!twcb)
2014 return;
2015 init_task_work(&twcb->twork, binder_do_fd_close);
2016 twcb->file = file_close_fd(fd);
2017 if (twcb->file) {
2018 // pin it until binder_do_fd_close(); see comments there
2019 get_file(twcb->file);
2020 filp_close(twcb->file, current->files);
2021 task_work_add(current, &twcb->twork, TWA_RESUME);
2022 } else {
2023 kfree(twcb);
2024 }
2025 }
2026
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2027 static void binder_transaction_buffer_release(struct binder_proc *proc,
2028 struct binder_thread *thread,
2029 struct binder_buffer *buffer,
2030 binder_size_t off_end_offset,
2031 bool is_failure)
2032 {
2033 int debug_id = buffer->debug_id;
2034 binder_size_t off_start_offset, buffer_offset;
2035
2036 binder_debug(BINDER_DEBUG_TRANSACTION,
2037 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2038 proc->pid, buffer->debug_id,
2039 buffer->data_size, buffer->offsets_size,
2040 (unsigned long long)off_end_offset);
2041
2042 if (buffer->target_node)
2043 binder_dec_node(buffer->target_node, 1, 0);
2044
2045 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2046
2047 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2048 buffer_offset += sizeof(binder_size_t)) {
2049 struct binder_object_header *hdr;
2050 size_t object_size = 0;
2051 struct binder_object object;
2052 binder_size_t object_offset;
2053
2054 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2055 buffer, buffer_offset,
2056 sizeof(object_offset)))
2057 object_size = binder_get_object(proc, NULL, buffer,
2058 object_offset, &object);
2059 if (object_size == 0) {
2060 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2061 debug_id, (u64)object_offset, buffer->data_size);
2062 continue;
2063 }
2064 hdr = &object.hdr;
2065 switch (hdr->type) {
2066 case BINDER_TYPE_BINDER:
2067 case BINDER_TYPE_WEAK_BINDER: {
2068 struct flat_binder_object *fp;
2069 struct binder_node *node;
2070
2071 fp = to_flat_binder_object(hdr);
2072 node = binder_get_node(proc, fp->binder);
2073 if (node == NULL) {
2074 pr_err("transaction release %d bad node %016llx\n",
2075 debug_id, (u64)fp->binder);
2076 break;
2077 }
2078 binder_debug(BINDER_DEBUG_TRANSACTION,
2079 " node %d u%016llx\n",
2080 node->debug_id, (u64)node->ptr);
2081 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2082 0);
2083 binder_put_node(node);
2084 } break;
2085 case BINDER_TYPE_HANDLE:
2086 case BINDER_TYPE_WEAK_HANDLE: {
2087 struct flat_binder_object *fp;
2088 struct binder_ref_data rdata;
2089 int ret;
2090
2091 fp = to_flat_binder_object(hdr);
2092 ret = binder_dec_ref_for_handle(proc, fp->handle,
2093 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2094
2095 if (ret) {
2096 pr_err("transaction release %d bad handle %d, ret = %d\n",
2097 debug_id, fp->handle, ret);
2098 break;
2099 }
2100 binder_debug(BINDER_DEBUG_TRANSACTION,
2101 " ref %d desc %d\n",
2102 rdata.debug_id, rdata.desc);
2103 } break;
2104
2105 case BINDER_TYPE_FD: {
2106 /*
2107 * No need to close the file here since user-space
2108 * closes it for successfully delivered
2109 * transactions. For transactions that weren't
2110 * delivered, the new fd was never allocated so
2111 * there is no need to close and the fput on the
2112 * file is done when the transaction is torn
2113 * down.
2114 */
2115 } break;
2116 case BINDER_TYPE_PTR:
2117 /*
2118 * Nothing to do here, this will get cleaned up when the
2119 * transaction buffer gets freed
2120 */
2121 break;
2122 case BINDER_TYPE_FDA: {
2123 struct binder_fd_array_object *fda;
2124 struct binder_buffer_object *parent;
2125 struct binder_object ptr_object;
2126 binder_size_t fda_offset;
2127 size_t fd_index;
2128 binder_size_t fd_buf_size;
2129 binder_size_t num_valid;
2130
2131 if (is_failure) {
2132 /*
2133 * The fd fixups have not been applied so no
2134 * fds need to be closed.
2135 */
2136 continue;
2137 }
2138
2139 num_valid = (buffer_offset - off_start_offset) /
2140 sizeof(binder_size_t);
2141 fda = to_binder_fd_array_object(hdr);
2142 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2143 fda->parent,
2144 off_start_offset,
2145 NULL,
2146 num_valid);
2147 if (!parent) {
2148 pr_err("transaction release %d bad parent offset\n",
2149 debug_id);
2150 continue;
2151 }
2152 fd_buf_size = sizeof(u32) * fda->num_fds;
2153 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2154 pr_err("transaction release %d invalid number of fds (%lld)\n",
2155 debug_id, (u64)fda->num_fds);
2156 continue;
2157 }
2158 if (fd_buf_size > parent->length ||
2159 fda->parent_offset > parent->length - fd_buf_size) {
2160 /* No space for all file descriptors here. */
2161 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2162 debug_id, (u64)fda->num_fds);
2163 continue;
2164 }
2165 /*
2166 * the source data for binder_buffer_object is visible
2167 * to user-space and the @buffer element is the user
2168 * pointer to the buffer_object containing the fd_array.
2169 * Convert the address to an offset relative to
2170 * the base of the transaction buffer.
2171 */
2172 fda_offset = parent->buffer - buffer->user_data +
2173 fda->parent_offset;
2174 for (fd_index = 0; fd_index < fda->num_fds;
2175 fd_index++) {
2176 u32 fd;
2177 int err;
2178 binder_size_t offset = fda_offset +
2179 fd_index * sizeof(fd);
2180
2181 err = binder_alloc_copy_from_buffer(
2182 &proc->alloc, &fd, buffer,
2183 offset, sizeof(fd));
2184 WARN_ON(err);
2185 if (!err) {
2186 binder_deferred_fd_close(fd);
2187 /*
2188 * Need to make sure the thread goes
2189 * back to userspace to complete the
2190 * deferred close
2191 */
2192 if (thread)
2193 thread->looper_need_return = true;
2194 }
2195 }
2196 } break;
2197 default:
2198 pr_err("transaction release %d bad object type %x\n",
2199 debug_id, hdr->type);
2200 break;
2201 }
2202 }
2203 }
2204
2205 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2206 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2207 struct binder_thread *thread,
2208 struct binder_buffer *buffer,
2209 bool is_failure)
2210 {
2211 binder_size_t off_end_offset;
2212
2213 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2214 off_end_offset += buffer->offsets_size;
2215
2216 binder_transaction_buffer_release(proc, thread, buffer,
2217 off_end_offset, is_failure);
2218 }
2219
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2220 static int binder_translate_binder(struct flat_binder_object *fp,
2221 struct binder_transaction *t,
2222 struct binder_thread *thread)
2223 {
2224 struct binder_node *node;
2225 struct binder_proc *proc = thread->proc;
2226 struct binder_proc *target_proc = t->to_proc;
2227 struct binder_ref_data rdata;
2228 int ret = 0;
2229
2230 node = binder_get_node(proc, fp->binder);
2231 if (!node) {
2232 node = binder_new_node(proc, fp);
2233 if (!node)
2234 return -ENOMEM;
2235 }
2236 if (fp->cookie != node->cookie) {
2237 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2238 proc->pid, thread->pid, (u64)fp->binder,
2239 node->debug_id, (u64)fp->cookie,
2240 (u64)node->cookie);
2241 ret = -EINVAL;
2242 goto done;
2243 }
2244 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2245 ret = -EPERM;
2246 goto done;
2247 }
2248
2249 ret = binder_inc_ref_for_node(target_proc, node,
2250 fp->hdr.type == BINDER_TYPE_BINDER,
2251 &thread->todo, &rdata);
2252 if (ret)
2253 goto done;
2254
2255 if (fp->hdr.type == BINDER_TYPE_BINDER)
2256 fp->hdr.type = BINDER_TYPE_HANDLE;
2257 else
2258 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2259 fp->binder = 0;
2260 fp->handle = rdata.desc;
2261 fp->cookie = 0;
2262
2263 trace_binder_transaction_node_to_ref(t, node, &rdata);
2264 binder_debug(BINDER_DEBUG_TRANSACTION,
2265 " node %d u%016llx -> ref %d desc %d\n",
2266 node->debug_id, (u64)node->ptr,
2267 rdata.debug_id, rdata.desc);
2268 done:
2269 binder_put_node(node);
2270 return ret;
2271 }
2272
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2273 static int binder_translate_handle(struct flat_binder_object *fp,
2274 struct binder_transaction *t,
2275 struct binder_thread *thread)
2276 {
2277 struct binder_proc *proc = thread->proc;
2278 struct binder_proc *target_proc = t->to_proc;
2279 struct binder_node *node;
2280 struct binder_ref_data src_rdata;
2281 int ret = 0;
2282
2283 node = binder_get_node_from_ref(proc, fp->handle,
2284 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2285 if (!node) {
2286 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2287 proc->pid, thread->pid, fp->handle);
2288 return -EINVAL;
2289 }
2290 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2291 ret = -EPERM;
2292 goto done;
2293 }
2294
2295 binder_node_lock(node);
2296 if (node->proc == target_proc) {
2297 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2298 fp->hdr.type = BINDER_TYPE_BINDER;
2299 else
2300 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2301 fp->binder = node->ptr;
2302 fp->cookie = node->cookie;
2303 if (node->proc)
2304 binder_inner_proc_lock(node->proc);
2305 else
2306 __acquire(&node->proc->inner_lock);
2307 binder_inc_node_nilocked(node,
2308 fp->hdr.type == BINDER_TYPE_BINDER,
2309 0, NULL);
2310 if (node->proc)
2311 binder_inner_proc_unlock(node->proc);
2312 else
2313 __release(&node->proc->inner_lock);
2314 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2315 binder_debug(BINDER_DEBUG_TRANSACTION,
2316 " ref %d desc %d -> node %d u%016llx\n",
2317 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2318 (u64)node->ptr);
2319 binder_node_unlock(node);
2320 } else {
2321 struct binder_ref_data dest_rdata;
2322
2323 binder_node_unlock(node);
2324 ret = binder_inc_ref_for_node(target_proc, node,
2325 fp->hdr.type == BINDER_TYPE_HANDLE,
2326 NULL, &dest_rdata);
2327 if (ret)
2328 goto done;
2329
2330 fp->binder = 0;
2331 fp->handle = dest_rdata.desc;
2332 fp->cookie = 0;
2333 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2334 &dest_rdata);
2335 binder_debug(BINDER_DEBUG_TRANSACTION,
2336 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2337 src_rdata.debug_id, src_rdata.desc,
2338 dest_rdata.debug_id, dest_rdata.desc,
2339 node->debug_id);
2340 }
2341 done:
2342 binder_put_node(node);
2343 return ret;
2344 }
2345
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2346 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2347 struct binder_transaction *t,
2348 struct binder_thread *thread,
2349 struct binder_transaction *in_reply_to)
2350 {
2351 struct binder_proc *proc = thread->proc;
2352 struct binder_proc *target_proc = t->to_proc;
2353 struct binder_txn_fd_fixup *fixup;
2354 struct file *file;
2355 int ret = 0;
2356 bool target_allows_fd;
2357
2358 if (in_reply_to)
2359 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2360 else
2361 target_allows_fd = t->buffer->target_node->accept_fds;
2362 if (!target_allows_fd) {
2363 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2364 proc->pid, thread->pid,
2365 in_reply_to ? "reply" : "transaction",
2366 fd);
2367 ret = -EPERM;
2368 goto err_fd_not_accepted;
2369 }
2370
2371 file = fget(fd);
2372 if (!file) {
2373 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2374 proc->pid, thread->pid, fd);
2375 ret = -EBADF;
2376 goto err_fget;
2377 }
2378 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2379 if (ret < 0) {
2380 ret = -EPERM;
2381 goto err_security;
2382 }
2383
2384 /*
2385 * Add fixup record for this transaction. The allocation
2386 * of the fd in the target needs to be done from a
2387 * target thread.
2388 */
2389 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2390 if (!fixup) {
2391 ret = -ENOMEM;
2392 goto err_alloc;
2393 }
2394 fixup->file = file;
2395 fixup->offset = fd_offset;
2396 fixup->target_fd = -1;
2397 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2398 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2399
2400 return ret;
2401
2402 err_alloc:
2403 err_security:
2404 fput(file);
2405 err_fget:
2406 err_fd_not_accepted:
2407 return ret;
2408 }
2409
2410 /**
2411 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2412 * @offset: offset in target buffer to fixup
2413 * @skip_size: bytes to skip in copy (fixup will be written later)
2414 * @fixup_data: data to write at fixup offset
2415 * @node: list node
2416 *
2417 * This is used for the pointer fixup list (pf) which is created and consumed
2418 * during binder_transaction() and is only accessed locally. No
2419 * locking is necessary.
2420 *
2421 * The list is ordered by @offset.
2422 */
2423 struct binder_ptr_fixup {
2424 binder_size_t offset;
2425 size_t skip_size;
2426 binder_uintptr_t fixup_data;
2427 struct list_head node;
2428 };
2429
2430 /**
2431 * struct binder_sg_copy - scatter-gather data to be copied
2432 * @offset: offset in target buffer
2433 * @sender_uaddr: user address in source buffer
2434 * @length: bytes to copy
2435 * @node: list node
2436 *
2437 * This is used for the sg copy list (sgc) which is created and consumed
2438 * during binder_transaction() and is only accessed locally. No
2439 * locking is necessary.
2440 *
2441 * The list is ordered by @offset.
2442 */
2443 struct binder_sg_copy {
2444 binder_size_t offset;
2445 const void __user *sender_uaddr;
2446 size_t length;
2447 struct list_head node;
2448 };
2449
2450 /**
2451 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2452 * @alloc: binder_alloc associated with @buffer
2453 * @buffer: binder buffer in target process
2454 * @sgc_head: list_head of scatter-gather copy list
2455 * @pf_head: list_head of pointer fixup list
2456 *
2457 * Processes all elements of @sgc_head, applying fixups from @pf_head
2458 * and copying the scatter-gather data from the source process' user
2459 * buffer to the target's buffer. It is expected that the list creation
2460 * and processing all occurs during binder_transaction() so these lists
2461 * are only accessed in local context.
2462 *
2463 * Return: 0=success, else -errno
2464 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2465 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2466 struct binder_buffer *buffer,
2467 struct list_head *sgc_head,
2468 struct list_head *pf_head)
2469 {
2470 int ret = 0;
2471 struct binder_sg_copy *sgc, *tmpsgc;
2472 struct binder_ptr_fixup *tmppf;
2473 struct binder_ptr_fixup *pf =
2474 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2475 node);
2476
2477 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2478 size_t bytes_copied = 0;
2479
2480 while (bytes_copied < sgc->length) {
2481 size_t copy_size;
2482 size_t bytes_left = sgc->length - bytes_copied;
2483 size_t offset = sgc->offset + bytes_copied;
2484
2485 /*
2486 * We copy up to the fixup (pointed to by pf)
2487 */
2488 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2489 : bytes_left;
2490 if (!ret && copy_size)
2491 ret = binder_alloc_copy_user_to_buffer(
2492 alloc, buffer,
2493 offset,
2494 sgc->sender_uaddr + bytes_copied,
2495 copy_size);
2496 bytes_copied += copy_size;
2497 if (copy_size != bytes_left) {
2498 BUG_ON(!pf);
2499 /* we stopped at a fixup offset */
2500 if (pf->skip_size) {
2501 /*
2502 * we are just skipping. This is for
2503 * BINDER_TYPE_FDA where the translated
2504 * fds will be fixed up when we get
2505 * to target context.
2506 */
2507 bytes_copied += pf->skip_size;
2508 } else {
2509 /* apply the fixup indicated by pf */
2510 if (!ret)
2511 ret = binder_alloc_copy_to_buffer(
2512 alloc, buffer,
2513 pf->offset,
2514 &pf->fixup_data,
2515 sizeof(pf->fixup_data));
2516 bytes_copied += sizeof(pf->fixup_data);
2517 }
2518 list_del(&pf->node);
2519 kfree(pf);
2520 pf = list_first_entry_or_null(pf_head,
2521 struct binder_ptr_fixup, node);
2522 }
2523 }
2524 list_del(&sgc->node);
2525 kfree(sgc);
2526 }
2527 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2528 BUG_ON(pf->skip_size == 0);
2529 list_del(&pf->node);
2530 kfree(pf);
2531 }
2532 BUG_ON(!list_empty(sgc_head));
2533
2534 return ret > 0 ? -EINVAL : ret;
2535 }
2536
2537 /**
2538 * binder_cleanup_deferred_txn_lists() - free specified lists
2539 * @sgc_head: list_head of scatter-gather copy list
2540 * @pf_head: list_head of pointer fixup list
2541 *
2542 * Called to clean up @sgc_head and @pf_head if there is an
2543 * error.
2544 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2545 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2546 struct list_head *pf_head)
2547 {
2548 struct binder_sg_copy *sgc, *tmpsgc;
2549 struct binder_ptr_fixup *pf, *tmppf;
2550
2551 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2552 list_del(&sgc->node);
2553 kfree(sgc);
2554 }
2555 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2556 list_del(&pf->node);
2557 kfree(pf);
2558 }
2559 }
2560
2561 /**
2562 * binder_defer_copy() - queue a scatter-gather buffer for copy
2563 * @sgc_head: list_head of scatter-gather copy list
2564 * @offset: binder buffer offset in target process
2565 * @sender_uaddr: user address in source process
2566 * @length: bytes to copy
2567 *
2568 * Specify a scatter-gather block to be copied. The actual copy must
2569 * be deferred until all the needed fixups are identified and queued.
2570 * Then the copy and fixups are done together so un-translated values
2571 * from the source are never visible in the target buffer.
2572 *
2573 * We are guaranteed that repeated calls to this function will have
2574 * monotonically increasing @offset values so the list will naturally
2575 * be ordered.
2576 *
2577 * Return: 0=success, else -errno
2578 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2579 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2580 const void __user *sender_uaddr, size_t length)
2581 {
2582 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2583
2584 if (!bc)
2585 return -ENOMEM;
2586
2587 bc->offset = offset;
2588 bc->sender_uaddr = sender_uaddr;
2589 bc->length = length;
2590 INIT_LIST_HEAD(&bc->node);
2591
2592 /*
2593 * We are guaranteed that the deferred copies are in-order
2594 * so just add to the tail.
2595 */
2596 list_add_tail(&bc->node, sgc_head);
2597
2598 return 0;
2599 }
2600
2601 /**
2602 * binder_add_fixup() - queue a fixup to be applied to sg copy
2603 * @pf_head: list_head of binder ptr fixup list
2604 * @offset: binder buffer offset in target process
2605 * @fixup: bytes to be copied for fixup
2606 * @skip_size: bytes to skip when copying (fixup will be applied later)
2607 *
2608 * Add the specified fixup to a list ordered by @offset. When copying
2609 * the scatter-gather buffers, the fixup will be copied instead of
2610 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2611 * will be applied later (in target process context), so we just skip
2612 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2613 * value in @fixup.
2614 *
2615 * This function is called *mostly* in @offset order, but there are
2616 * exceptions. Since out-of-order inserts are relatively uncommon,
2617 * we insert the new element by searching backward from the tail of
2618 * the list.
2619 *
2620 * Return: 0=success, else -errno
2621 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2622 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2623 binder_uintptr_t fixup, size_t skip_size)
2624 {
2625 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2626 struct binder_ptr_fixup *tmppf;
2627
2628 if (!pf)
2629 return -ENOMEM;
2630
2631 pf->offset = offset;
2632 pf->fixup_data = fixup;
2633 pf->skip_size = skip_size;
2634 INIT_LIST_HEAD(&pf->node);
2635
2636 /* Fixups are *mostly* added in-order, but there are some
2637 * exceptions. Look backwards through list for insertion point.
2638 */
2639 list_for_each_entry_reverse(tmppf, pf_head, node) {
2640 if (tmppf->offset < pf->offset) {
2641 list_add(&pf->node, &tmppf->node);
2642 return 0;
2643 }
2644 }
2645 /*
2646 * if we get here, then the new offset is the lowest so
2647 * insert at the head
2648 */
2649 list_add(&pf->node, pf_head);
2650 return 0;
2651 }
2652
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2653 static int binder_translate_fd_array(struct list_head *pf_head,
2654 struct binder_fd_array_object *fda,
2655 const void __user *sender_ubuffer,
2656 struct binder_buffer_object *parent,
2657 struct binder_buffer_object *sender_uparent,
2658 struct binder_transaction *t,
2659 struct binder_thread *thread,
2660 struct binder_transaction *in_reply_to)
2661 {
2662 binder_size_t fdi, fd_buf_size;
2663 binder_size_t fda_offset;
2664 const void __user *sender_ufda_base;
2665 struct binder_proc *proc = thread->proc;
2666 int ret;
2667
2668 if (fda->num_fds == 0)
2669 return 0;
2670
2671 fd_buf_size = sizeof(u32) * fda->num_fds;
2672 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2673 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2674 proc->pid, thread->pid, (u64)fda->num_fds);
2675 return -EINVAL;
2676 }
2677 if (fd_buf_size > parent->length ||
2678 fda->parent_offset > parent->length - fd_buf_size) {
2679 /* No space for all file descriptors here. */
2680 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2681 proc->pid, thread->pid, (u64)fda->num_fds);
2682 return -EINVAL;
2683 }
2684 /*
2685 * the source data for binder_buffer_object is visible
2686 * to user-space and the @buffer element is the user
2687 * pointer to the buffer_object containing the fd_array.
2688 * Convert the address to an offset relative to
2689 * the base of the transaction buffer.
2690 */
2691 fda_offset = parent->buffer - t->buffer->user_data +
2692 fda->parent_offset;
2693 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2694 fda->parent_offset;
2695
2696 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2697 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2698 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2699 proc->pid, thread->pid);
2700 return -EINVAL;
2701 }
2702 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2703 if (ret)
2704 return ret;
2705
2706 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2707 u32 fd;
2708 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2709 binder_size_t sender_uoffset = fdi * sizeof(fd);
2710
2711 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2712 if (!ret)
2713 ret = binder_translate_fd(fd, offset, t, thread,
2714 in_reply_to);
2715 if (ret)
2716 return ret > 0 ? -EINVAL : ret;
2717 }
2718 return 0;
2719 }
2720
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2721 static int binder_fixup_parent(struct list_head *pf_head,
2722 struct binder_transaction *t,
2723 struct binder_thread *thread,
2724 struct binder_buffer_object *bp,
2725 binder_size_t off_start_offset,
2726 binder_size_t num_valid,
2727 binder_size_t last_fixup_obj_off,
2728 binder_size_t last_fixup_min_off)
2729 {
2730 struct binder_buffer_object *parent;
2731 struct binder_buffer *b = t->buffer;
2732 struct binder_proc *proc = thread->proc;
2733 struct binder_proc *target_proc = t->to_proc;
2734 struct binder_object object;
2735 binder_size_t buffer_offset;
2736 binder_size_t parent_offset;
2737
2738 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2739 return 0;
2740
2741 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2742 off_start_offset, &parent_offset,
2743 num_valid);
2744 if (!parent) {
2745 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2746 proc->pid, thread->pid);
2747 return -EINVAL;
2748 }
2749
2750 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2751 parent_offset, bp->parent_offset,
2752 last_fixup_obj_off,
2753 last_fixup_min_off)) {
2754 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2755 proc->pid, thread->pid);
2756 return -EINVAL;
2757 }
2758
2759 if (parent->length < sizeof(binder_uintptr_t) ||
2760 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2761 /* No space for a pointer here! */
2762 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2763 proc->pid, thread->pid);
2764 return -EINVAL;
2765 }
2766
2767 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2768
2769 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2770 }
2771
2772 /**
2773 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2774 * @t1: the pending async txn in the frozen process
2775 * @t2: the new async txn to supersede the outdated pending one
2776 *
2777 * Return: true if t2 can supersede t1
2778 * false if t2 can not supersede t1
2779 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2780 static bool binder_can_update_transaction(struct binder_transaction *t1,
2781 struct binder_transaction *t2)
2782 {
2783 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2784 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2785 return false;
2786 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2787 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2788 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2789 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2790 return true;
2791 return false;
2792 }
2793
2794 /**
2795 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2796 * @t: new async transaction
2797 * @target_list: list to find outdated transaction
2798 *
2799 * Return: the outdated transaction if found
2800 * NULL if no outdated transacton can be found
2801 *
2802 * Requires the proc->inner_lock to be held.
2803 */
2804 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2805 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2806 struct list_head *target_list)
2807 {
2808 struct binder_work *w;
2809
2810 list_for_each_entry(w, target_list, entry) {
2811 struct binder_transaction *t_queued;
2812
2813 if (w->type != BINDER_WORK_TRANSACTION)
2814 continue;
2815 t_queued = container_of(w, struct binder_transaction, work);
2816 if (binder_can_update_transaction(t_queued, t))
2817 return t_queued;
2818 }
2819 return NULL;
2820 }
2821
2822 /**
2823 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2824 * @t: transaction to send
2825 * @proc: process to send the transaction to
2826 * @thread: thread in @proc to send the transaction to (may be NULL)
2827 *
2828 * This function queues a transaction to the specified process. It will try
2829 * to find a thread in the target process to handle the transaction and
2830 * wake it up. If no thread is found, the work is queued to the proc
2831 * waitqueue.
2832 *
2833 * If the @thread parameter is not NULL, the transaction is always queued
2834 * to the waitlist of that specific thread.
2835 *
2836 * Return: 0 if the transaction was successfully queued
2837 * BR_DEAD_REPLY if the target process or thread is dead
2838 * BR_FROZEN_REPLY if the target process or thread is frozen and
2839 * the sync transaction was rejected
2840 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2841 * and the async transaction was successfully queued
2842 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2843 static int binder_proc_transaction(struct binder_transaction *t,
2844 struct binder_proc *proc,
2845 struct binder_thread *thread)
2846 {
2847 struct binder_node *node = t->buffer->target_node;
2848 bool oneway = !!(t->flags & TF_ONE_WAY);
2849 bool pending_async = false;
2850 struct binder_transaction *t_outdated = NULL;
2851 bool frozen = false;
2852
2853 BUG_ON(!node);
2854 binder_node_lock(node);
2855 if (oneway) {
2856 BUG_ON(thread);
2857 if (node->has_async_transaction)
2858 pending_async = true;
2859 else
2860 node->has_async_transaction = true;
2861 }
2862
2863 binder_inner_proc_lock(proc);
2864 if (proc->is_frozen) {
2865 frozen = true;
2866 proc->sync_recv |= !oneway;
2867 proc->async_recv |= oneway;
2868 }
2869
2870 if ((frozen && !oneway) || proc->is_dead ||
2871 (thread && thread->is_dead)) {
2872 binder_inner_proc_unlock(proc);
2873 binder_node_unlock(node);
2874 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2875 }
2876
2877 if (!thread && !pending_async)
2878 thread = binder_select_thread_ilocked(proc);
2879
2880 if (thread) {
2881 binder_enqueue_thread_work_ilocked(thread, &t->work);
2882 } else if (!pending_async) {
2883 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2884 } else {
2885 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2886 t_outdated = binder_find_outdated_transaction_ilocked(t,
2887 &node->async_todo);
2888 if (t_outdated) {
2889 binder_debug(BINDER_DEBUG_TRANSACTION,
2890 "txn %d supersedes %d\n",
2891 t->debug_id, t_outdated->debug_id);
2892 list_del_init(&t_outdated->work.entry);
2893 proc->outstanding_txns--;
2894 }
2895 }
2896 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2897 }
2898
2899 if (!pending_async)
2900 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2901
2902 proc->outstanding_txns++;
2903 binder_inner_proc_unlock(proc);
2904 binder_node_unlock(node);
2905
2906 /*
2907 * To reduce potential contention, free the outdated transaction and
2908 * buffer after releasing the locks.
2909 */
2910 if (t_outdated) {
2911 struct binder_buffer *buffer = t_outdated->buffer;
2912
2913 t_outdated->buffer = NULL;
2914 buffer->transaction = NULL;
2915 trace_binder_transaction_update_buffer_release(buffer);
2916 binder_release_entire_buffer(proc, NULL, buffer, false);
2917 binder_alloc_free_buf(&proc->alloc, buffer);
2918 kfree(t_outdated);
2919 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2920 }
2921
2922 if (oneway && frozen)
2923 return BR_TRANSACTION_PENDING_FROZEN;
2924
2925 return 0;
2926 }
2927
2928 /**
2929 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2930 * @node: struct binder_node for which to get refs
2931 * @procp: returns @node->proc if valid
2932 * @error: if no @procp then returns BR_DEAD_REPLY
2933 *
2934 * User-space normally keeps the node alive when creating a transaction
2935 * since it has a reference to the target. The local strong ref keeps it
2936 * alive if the sending process dies before the target process processes
2937 * the transaction. If the source process is malicious or has a reference
2938 * counting bug, relying on the local strong ref can fail.
2939 *
2940 * Since user-space can cause the local strong ref to go away, we also take
2941 * a tmpref on the node to ensure it survives while we are constructing
2942 * the transaction. We also need a tmpref on the proc while we are
2943 * constructing the transaction, so we take that here as well.
2944 *
2945 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2946 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2947 * target proc has died, @error is set to BR_DEAD_REPLY.
2948 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2949 static struct binder_node *binder_get_node_refs_for_txn(
2950 struct binder_node *node,
2951 struct binder_proc **procp,
2952 uint32_t *error)
2953 {
2954 struct binder_node *target_node = NULL;
2955
2956 binder_node_inner_lock(node);
2957 if (node->proc) {
2958 target_node = node;
2959 binder_inc_node_nilocked(node, 1, 0, NULL);
2960 binder_inc_node_tmpref_ilocked(node);
2961 node->proc->tmp_ref++;
2962 *procp = node->proc;
2963 } else
2964 *error = BR_DEAD_REPLY;
2965 binder_node_inner_unlock(node);
2966
2967 return target_node;
2968 }
2969
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2970 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2971 uint32_t command, int32_t param)
2972 {
2973 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2974
2975 if (!from) {
2976 /* annotation for sparse */
2977 __release(&from->proc->inner_lock);
2978 return;
2979 }
2980
2981 /* don't override existing errors */
2982 if (from->ee.command == BR_OK)
2983 binder_set_extended_error(&from->ee, id, command, param);
2984 binder_inner_proc_unlock(from->proc);
2985 binder_thread_dec_tmpref(from);
2986 }
2987
2988 /**
2989 * binder_netlink_report() - report a transaction failure via netlink
2990 * @proc: the binder proc sending the transaction
2991 * @t: the binder transaction that failed
2992 * @data_size: the user provided data size for the transaction
2993 * @error: enum binder_driver_return_protocol returned to sender
2994 */
binder_netlink_report(struct binder_proc * proc,struct binder_transaction * t,u32 data_size,u32 error)2995 static void binder_netlink_report(struct binder_proc *proc,
2996 struct binder_transaction *t,
2997 u32 data_size,
2998 u32 error)
2999 {
3000 const char *context = proc->context->name;
3001 struct sk_buff *skb;
3002 void *hdr;
3003
3004 if (!genl_has_listeners(&binder_nl_family, &init_net,
3005 BINDER_NLGRP_REPORT))
3006 return;
3007
3008 trace_binder_netlink_report(context, t, data_size, error);
3009
3010 skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
3011 if (!skb)
3012 return;
3013
3014 hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
3015 if (!hdr)
3016 goto free_skb;
3017
3018 if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
3019 nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
3020 nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
3021 nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
3022 goto cancel_skb;
3023
3024 if (t->to_proc &&
3025 nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
3026 goto cancel_skb;
3027
3028 if (t->to_thread &&
3029 nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
3030 goto cancel_skb;
3031
3032 if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
3033 goto cancel_skb;
3034
3035 if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
3036 nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
3037 nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
3038 goto cancel_skb;
3039
3040 genlmsg_end(skb, hdr);
3041 genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
3042 GFP_KERNEL);
3043 return;
3044
3045 cancel_skb:
3046 genlmsg_cancel(skb, hdr);
3047 free_skb:
3048 nlmsg_free(skb);
3049 }
3050
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)3051 static void binder_transaction(struct binder_proc *proc,
3052 struct binder_thread *thread,
3053 struct binder_transaction_data *tr, int reply,
3054 binder_size_t extra_buffers_size)
3055 {
3056 int ret;
3057 struct binder_transaction *t;
3058 struct binder_work *w;
3059 struct binder_work *tcomplete;
3060 binder_size_t buffer_offset = 0;
3061 binder_size_t off_start_offset, off_end_offset;
3062 binder_size_t off_min;
3063 binder_size_t sg_buf_offset, sg_buf_end_offset;
3064 binder_size_t user_offset = 0;
3065 struct binder_proc *target_proc = NULL;
3066 struct binder_thread *target_thread = NULL;
3067 struct binder_node *target_node = NULL;
3068 struct binder_transaction *in_reply_to = NULL;
3069 struct binder_transaction_log_entry *e;
3070 uint32_t return_error = 0;
3071 uint32_t return_error_param = 0;
3072 uint32_t return_error_line = 0;
3073 binder_size_t last_fixup_obj_off = 0;
3074 binder_size_t last_fixup_min_off = 0;
3075 struct binder_context *context = proc->context;
3076 int t_debug_id = atomic_inc_return(&binder_last_id);
3077 ktime_t t_start_time = ktime_get();
3078 struct lsm_context lsmctx = { };
3079 struct list_head sgc_head;
3080 struct list_head pf_head;
3081 const void __user *user_buffer = (const void __user *)
3082 (uintptr_t)tr->data.ptr.buffer;
3083 INIT_LIST_HEAD(&sgc_head);
3084 INIT_LIST_HEAD(&pf_head);
3085
3086 e = binder_transaction_log_add(&binder_transaction_log);
3087 e->debug_id = t_debug_id;
3088 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3089 e->from_proc = proc->pid;
3090 e->from_thread = thread->pid;
3091 e->target_handle = tr->target.handle;
3092 e->data_size = tr->data_size;
3093 e->offsets_size = tr->offsets_size;
3094 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3095
3096 binder_inner_proc_lock(proc);
3097 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3098 binder_inner_proc_unlock(proc);
3099
3100 t = kzalloc(sizeof(*t), GFP_KERNEL);
3101 if (!t) {
3102 binder_txn_error("%d:%d cannot allocate transaction\n",
3103 thread->pid, proc->pid);
3104 return_error = BR_FAILED_REPLY;
3105 return_error_param = -ENOMEM;
3106 return_error_line = __LINE__;
3107 goto err_alloc_t_failed;
3108 }
3109 INIT_LIST_HEAD(&t->fd_fixups);
3110 binder_stats_created(BINDER_STAT_TRANSACTION);
3111 spin_lock_init(&t->lock);
3112 t->debug_id = t_debug_id;
3113 t->start_time = t_start_time;
3114 t->from_pid = proc->pid;
3115 t->from_tid = thread->pid;
3116 t->sender_euid = task_euid(proc->tsk);
3117 t->code = tr->code;
3118 t->flags = tr->flags;
3119 t->priority = task_nice(current);
3120 t->work.type = BINDER_WORK_TRANSACTION;
3121 t->is_async = !reply && (tr->flags & TF_ONE_WAY);
3122 t->is_reply = reply;
3123 if (!reply && !(tr->flags & TF_ONE_WAY))
3124 t->from = thread;
3125
3126 if (reply) {
3127 binder_inner_proc_lock(proc);
3128 in_reply_to = thread->transaction_stack;
3129 if (in_reply_to == NULL) {
3130 binder_inner_proc_unlock(proc);
3131 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3132 proc->pid, thread->pid);
3133 return_error = BR_FAILED_REPLY;
3134 return_error_param = -EPROTO;
3135 return_error_line = __LINE__;
3136 goto err_empty_call_stack;
3137 }
3138 if (in_reply_to->to_thread != thread) {
3139 spin_lock(&in_reply_to->lock);
3140 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3141 proc->pid, thread->pid, in_reply_to->debug_id,
3142 in_reply_to->to_proc ?
3143 in_reply_to->to_proc->pid : 0,
3144 in_reply_to->to_thread ?
3145 in_reply_to->to_thread->pid : 0);
3146 spin_unlock(&in_reply_to->lock);
3147 binder_inner_proc_unlock(proc);
3148 return_error = BR_FAILED_REPLY;
3149 return_error_param = -EPROTO;
3150 return_error_line = __LINE__;
3151 in_reply_to = NULL;
3152 goto err_bad_call_stack;
3153 }
3154 thread->transaction_stack = in_reply_to->to_parent;
3155 binder_inner_proc_unlock(proc);
3156 binder_set_nice(in_reply_to->saved_priority);
3157 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3158 if (target_thread == NULL) {
3159 /* annotation for sparse */
3160 __release(&target_thread->proc->inner_lock);
3161 binder_txn_error("%d:%d reply target not found\n",
3162 thread->pid, proc->pid);
3163 return_error = BR_DEAD_REPLY;
3164 return_error_line = __LINE__;
3165 goto err_dead_binder;
3166 }
3167 if (target_thread->transaction_stack != in_reply_to) {
3168 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3169 proc->pid, thread->pid,
3170 target_thread->transaction_stack ?
3171 target_thread->transaction_stack->debug_id : 0,
3172 in_reply_to->debug_id);
3173 binder_inner_proc_unlock(target_thread->proc);
3174 return_error = BR_FAILED_REPLY;
3175 return_error_param = -EPROTO;
3176 return_error_line = __LINE__;
3177 in_reply_to = NULL;
3178 target_thread = NULL;
3179 goto err_dead_binder;
3180 }
3181 target_proc = target_thread->proc;
3182 target_proc->tmp_ref++;
3183 binder_inner_proc_unlock(target_thread->proc);
3184 } else {
3185 if (tr->target.handle) {
3186 struct binder_ref *ref;
3187
3188 /*
3189 * There must already be a strong ref
3190 * on this node. If so, do a strong
3191 * increment on the node to ensure it
3192 * stays alive until the transaction is
3193 * done.
3194 */
3195 binder_proc_lock(proc);
3196 ref = binder_get_ref_olocked(proc, tr->target.handle,
3197 true);
3198 if (ref) {
3199 target_node = binder_get_node_refs_for_txn(
3200 ref->node, &target_proc,
3201 &return_error);
3202 } else {
3203 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3204 proc->pid, thread->pid, tr->target.handle);
3205 return_error = BR_FAILED_REPLY;
3206 }
3207 binder_proc_unlock(proc);
3208 } else {
3209 mutex_lock(&context->context_mgr_node_lock);
3210 target_node = context->binder_context_mgr_node;
3211 if (target_node)
3212 target_node = binder_get_node_refs_for_txn(
3213 target_node, &target_proc,
3214 &return_error);
3215 else
3216 return_error = BR_DEAD_REPLY;
3217 mutex_unlock(&context->context_mgr_node_lock);
3218 if (target_node && target_proc->pid == proc->pid) {
3219 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3220 proc->pid, thread->pid);
3221 return_error = BR_FAILED_REPLY;
3222 return_error_param = -EINVAL;
3223 return_error_line = __LINE__;
3224 goto err_invalid_target_handle;
3225 }
3226 }
3227 if (!target_node) {
3228 binder_txn_error("%d:%d cannot find target node\n",
3229 proc->pid, thread->pid);
3230 /* return_error is set above */
3231 return_error_param = -EINVAL;
3232 return_error_line = __LINE__;
3233 goto err_dead_binder;
3234 }
3235 e->to_node = target_node->debug_id;
3236 if (WARN_ON(proc == target_proc)) {
3237 binder_txn_error("%d:%d self transactions not allowed\n",
3238 thread->pid, proc->pid);
3239 return_error = BR_FAILED_REPLY;
3240 return_error_param = -EINVAL;
3241 return_error_line = __LINE__;
3242 goto err_invalid_target_handle;
3243 }
3244 if (security_binder_transaction(proc->cred,
3245 target_proc->cred) < 0) {
3246 binder_txn_error("%d:%d transaction credentials failed\n",
3247 thread->pid, proc->pid);
3248 return_error = BR_FAILED_REPLY;
3249 return_error_param = -EPERM;
3250 return_error_line = __LINE__;
3251 goto err_invalid_target_handle;
3252 }
3253 binder_inner_proc_lock(proc);
3254
3255 w = list_first_entry_or_null(&thread->todo,
3256 struct binder_work, entry);
3257 if (!(tr->flags & TF_ONE_WAY) && w &&
3258 w->type == BINDER_WORK_TRANSACTION) {
3259 /*
3260 * Do not allow new outgoing transaction from a
3261 * thread that has a transaction at the head of
3262 * its todo list. Only need to check the head
3263 * because binder_select_thread_ilocked picks a
3264 * thread from proc->waiting_threads to enqueue
3265 * the transaction, and nothing is queued to the
3266 * todo list while the thread is on waiting_threads.
3267 */
3268 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3269 proc->pid, thread->pid);
3270 binder_inner_proc_unlock(proc);
3271 return_error = BR_FAILED_REPLY;
3272 return_error_param = -EPROTO;
3273 return_error_line = __LINE__;
3274 goto err_bad_todo_list;
3275 }
3276
3277 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3278 struct binder_transaction *tmp;
3279
3280 tmp = thread->transaction_stack;
3281 if (tmp->to_thread != thread) {
3282 spin_lock(&tmp->lock);
3283 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3284 proc->pid, thread->pid, tmp->debug_id,
3285 tmp->to_proc ? tmp->to_proc->pid : 0,
3286 tmp->to_thread ?
3287 tmp->to_thread->pid : 0);
3288 spin_unlock(&tmp->lock);
3289 binder_inner_proc_unlock(proc);
3290 return_error = BR_FAILED_REPLY;
3291 return_error_param = -EPROTO;
3292 return_error_line = __LINE__;
3293 goto err_bad_call_stack;
3294 }
3295 while (tmp) {
3296 struct binder_thread *from;
3297
3298 spin_lock(&tmp->lock);
3299 from = tmp->from;
3300 if (from && from->proc == target_proc) {
3301 atomic_inc(&from->tmp_ref);
3302 target_thread = from;
3303 spin_unlock(&tmp->lock);
3304 break;
3305 }
3306 spin_unlock(&tmp->lock);
3307 tmp = tmp->from_parent;
3308 }
3309 }
3310 binder_inner_proc_unlock(proc);
3311 }
3312
3313 t->to_proc = target_proc;
3314 t->to_thread = target_thread;
3315 if (target_thread)
3316 e->to_thread = target_thread->pid;
3317 e->to_proc = target_proc->pid;
3318
3319 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3320 if (tcomplete == NULL) {
3321 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3322 thread->pid, proc->pid);
3323 return_error = BR_FAILED_REPLY;
3324 return_error_param = -ENOMEM;
3325 return_error_line = __LINE__;
3326 goto err_alloc_tcomplete_failed;
3327 }
3328 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3329
3330 if (reply)
3331 binder_debug(BINDER_DEBUG_TRANSACTION,
3332 "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3333 proc->pid, thread->pid, t->debug_id,
3334 target_proc->pid, target_thread->pid,
3335 (u64)tr->data_size, (u64)tr->offsets_size,
3336 (u64)extra_buffers_size);
3337 else
3338 binder_debug(BINDER_DEBUG_TRANSACTION,
3339 "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3340 proc->pid, thread->pid, t->debug_id,
3341 target_proc->pid, target_node->debug_id,
3342 (u64)tr->data_size, (u64)tr->offsets_size,
3343 (u64)extra_buffers_size);
3344
3345 if (target_node && target_node->txn_security_ctx) {
3346 u32 secid;
3347 size_t added_size;
3348
3349 security_cred_getsecid(proc->cred, &secid);
3350 ret = security_secid_to_secctx(secid, &lsmctx);
3351 if (ret < 0) {
3352 binder_txn_error("%d:%d failed to get security context\n",
3353 thread->pid, proc->pid);
3354 return_error = BR_FAILED_REPLY;
3355 return_error_param = ret;
3356 return_error_line = __LINE__;
3357 goto err_get_secctx_failed;
3358 }
3359 added_size = ALIGN(lsmctx.len, sizeof(u64));
3360 extra_buffers_size += added_size;
3361 if (extra_buffers_size < added_size) {
3362 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3363 thread->pid, proc->pid);
3364 return_error = BR_FAILED_REPLY;
3365 return_error_param = -EINVAL;
3366 return_error_line = __LINE__;
3367 goto err_bad_extra_size;
3368 }
3369 }
3370
3371 trace_binder_transaction(reply, t, target_node);
3372
3373 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3374 tr->offsets_size, extra_buffers_size,
3375 !reply && (t->flags & TF_ONE_WAY));
3376 if (IS_ERR(t->buffer)) {
3377 char *s;
3378
3379 ret = PTR_ERR(t->buffer);
3380 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3381 : (ret == -ENOSPC) ? ": no space left"
3382 : (ret == -ENOMEM) ? ": memory allocation failed"
3383 : "";
3384 binder_txn_error("cannot allocate buffer%s", s);
3385
3386 return_error_param = PTR_ERR(t->buffer);
3387 return_error = return_error_param == -ESRCH ?
3388 BR_DEAD_REPLY : BR_FAILED_REPLY;
3389 return_error_line = __LINE__;
3390 t->buffer = NULL;
3391 goto err_binder_alloc_buf_failed;
3392 }
3393 if (lsmctx.context) {
3394 int err;
3395 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3396 ALIGN(tr->offsets_size, sizeof(void *)) +
3397 ALIGN(extra_buffers_size, sizeof(void *)) -
3398 ALIGN(lsmctx.len, sizeof(u64));
3399
3400 t->security_ctx = t->buffer->user_data + buf_offset;
3401 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3402 t->buffer, buf_offset,
3403 lsmctx.context, lsmctx.len);
3404 if (err) {
3405 t->security_ctx = 0;
3406 WARN_ON(1);
3407 }
3408 security_release_secctx(&lsmctx);
3409 lsmctx.context = NULL;
3410 }
3411 t->buffer->debug_id = t->debug_id;
3412 t->buffer->transaction = t;
3413 t->buffer->target_node = target_node;
3414 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3415 trace_binder_transaction_alloc_buf(t->buffer);
3416
3417 if (binder_alloc_copy_user_to_buffer(
3418 &target_proc->alloc,
3419 t->buffer,
3420 ALIGN(tr->data_size, sizeof(void *)),
3421 (const void __user *)
3422 (uintptr_t)tr->data.ptr.offsets,
3423 tr->offsets_size)) {
3424 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3425 proc->pid, thread->pid);
3426 return_error = BR_FAILED_REPLY;
3427 return_error_param = -EFAULT;
3428 return_error_line = __LINE__;
3429 goto err_copy_data_failed;
3430 }
3431 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3432 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3433 proc->pid, thread->pid, (u64)tr->offsets_size);
3434 return_error = BR_FAILED_REPLY;
3435 return_error_param = -EINVAL;
3436 return_error_line = __LINE__;
3437 goto err_bad_offset;
3438 }
3439 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3440 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3441 proc->pid, thread->pid,
3442 (u64)extra_buffers_size);
3443 return_error = BR_FAILED_REPLY;
3444 return_error_param = -EINVAL;
3445 return_error_line = __LINE__;
3446 goto err_bad_offset;
3447 }
3448 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3449 buffer_offset = off_start_offset;
3450 off_end_offset = off_start_offset + tr->offsets_size;
3451 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3452 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3453 ALIGN(lsmctx.len, sizeof(u64));
3454 off_min = 0;
3455 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3456 buffer_offset += sizeof(binder_size_t)) {
3457 struct binder_object_header *hdr;
3458 size_t object_size;
3459 struct binder_object object;
3460 binder_size_t object_offset;
3461 binder_size_t copy_size;
3462
3463 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3464 &object_offset,
3465 t->buffer,
3466 buffer_offset,
3467 sizeof(object_offset))) {
3468 binder_txn_error("%d:%d copy offset from buffer failed\n",
3469 thread->pid, proc->pid);
3470 return_error = BR_FAILED_REPLY;
3471 return_error_param = -EINVAL;
3472 return_error_line = __LINE__;
3473 goto err_bad_offset;
3474 }
3475
3476 /*
3477 * Copy the source user buffer up to the next object
3478 * that will be processed.
3479 */
3480 copy_size = object_offset - user_offset;
3481 if (copy_size && (user_offset > object_offset ||
3482 object_offset > tr->data_size ||
3483 binder_alloc_copy_user_to_buffer(
3484 &target_proc->alloc,
3485 t->buffer, user_offset,
3486 user_buffer + user_offset,
3487 copy_size))) {
3488 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3489 proc->pid, thread->pid);
3490 return_error = BR_FAILED_REPLY;
3491 return_error_param = -EFAULT;
3492 return_error_line = __LINE__;
3493 goto err_copy_data_failed;
3494 }
3495 object_size = binder_get_object(target_proc, user_buffer,
3496 t->buffer, object_offset, &object);
3497 if (object_size == 0 || object_offset < off_min) {
3498 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3499 proc->pid, thread->pid,
3500 (u64)object_offset,
3501 (u64)off_min,
3502 (u64)t->buffer->data_size);
3503 return_error = BR_FAILED_REPLY;
3504 return_error_param = -EINVAL;
3505 return_error_line = __LINE__;
3506 goto err_bad_offset;
3507 }
3508 /*
3509 * Set offset to the next buffer fragment to be
3510 * copied
3511 */
3512 user_offset = object_offset + object_size;
3513
3514 hdr = &object.hdr;
3515 off_min = object_offset + object_size;
3516 switch (hdr->type) {
3517 case BINDER_TYPE_BINDER:
3518 case BINDER_TYPE_WEAK_BINDER: {
3519 struct flat_binder_object *fp;
3520
3521 fp = to_flat_binder_object(hdr);
3522 ret = binder_translate_binder(fp, t, thread);
3523
3524 if (ret < 0 ||
3525 binder_alloc_copy_to_buffer(&target_proc->alloc,
3526 t->buffer,
3527 object_offset,
3528 fp, sizeof(*fp))) {
3529 binder_txn_error("%d:%d translate binder failed\n",
3530 thread->pid, proc->pid);
3531 return_error = BR_FAILED_REPLY;
3532 return_error_param = ret;
3533 return_error_line = __LINE__;
3534 goto err_translate_failed;
3535 }
3536 } break;
3537 case BINDER_TYPE_HANDLE:
3538 case BINDER_TYPE_WEAK_HANDLE: {
3539 struct flat_binder_object *fp;
3540
3541 fp = to_flat_binder_object(hdr);
3542 ret = binder_translate_handle(fp, t, thread);
3543 if (ret < 0 ||
3544 binder_alloc_copy_to_buffer(&target_proc->alloc,
3545 t->buffer,
3546 object_offset,
3547 fp, sizeof(*fp))) {
3548 binder_txn_error("%d:%d translate handle failed\n",
3549 thread->pid, proc->pid);
3550 return_error = BR_FAILED_REPLY;
3551 return_error_param = ret;
3552 return_error_line = __LINE__;
3553 goto err_translate_failed;
3554 }
3555 } break;
3556
3557 case BINDER_TYPE_FD: {
3558 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3559 binder_size_t fd_offset = object_offset +
3560 (uintptr_t)&fp->fd - (uintptr_t)fp;
3561 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3562 thread, in_reply_to);
3563
3564 fp->pad_binder = 0;
3565 if (ret < 0 ||
3566 binder_alloc_copy_to_buffer(&target_proc->alloc,
3567 t->buffer,
3568 object_offset,
3569 fp, sizeof(*fp))) {
3570 binder_txn_error("%d:%d translate fd failed\n",
3571 thread->pid, proc->pid);
3572 return_error = BR_FAILED_REPLY;
3573 return_error_param = ret;
3574 return_error_line = __LINE__;
3575 goto err_translate_failed;
3576 }
3577 } break;
3578 case BINDER_TYPE_FDA: {
3579 struct binder_object ptr_object;
3580 binder_size_t parent_offset;
3581 struct binder_object user_object;
3582 size_t user_parent_size;
3583 struct binder_fd_array_object *fda =
3584 to_binder_fd_array_object(hdr);
3585 size_t num_valid = (buffer_offset - off_start_offset) /
3586 sizeof(binder_size_t);
3587 struct binder_buffer_object *parent =
3588 binder_validate_ptr(target_proc, t->buffer,
3589 &ptr_object, fda->parent,
3590 off_start_offset,
3591 &parent_offset,
3592 num_valid);
3593 if (!parent) {
3594 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3595 proc->pid, thread->pid);
3596 return_error = BR_FAILED_REPLY;
3597 return_error_param = -EINVAL;
3598 return_error_line = __LINE__;
3599 goto err_bad_parent;
3600 }
3601 if (!binder_validate_fixup(target_proc, t->buffer,
3602 off_start_offset,
3603 parent_offset,
3604 fda->parent_offset,
3605 last_fixup_obj_off,
3606 last_fixup_min_off)) {
3607 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3608 proc->pid, thread->pid);
3609 return_error = BR_FAILED_REPLY;
3610 return_error_param = -EINVAL;
3611 return_error_line = __LINE__;
3612 goto err_bad_parent;
3613 }
3614 /*
3615 * We need to read the user version of the parent
3616 * object to get the original user offset
3617 */
3618 user_parent_size =
3619 binder_get_object(proc, user_buffer, t->buffer,
3620 parent_offset, &user_object);
3621 if (user_parent_size != sizeof(user_object.bbo)) {
3622 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3623 proc->pid, thread->pid,
3624 user_parent_size,
3625 sizeof(user_object.bbo));
3626 return_error = BR_FAILED_REPLY;
3627 return_error_param = -EINVAL;
3628 return_error_line = __LINE__;
3629 goto err_bad_parent;
3630 }
3631 ret = binder_translate_fd_array(&pf_head, fda,
3632 user_buffer, parent,
3633 &user_object.bbo, t,
3634 thread, in_reply_to);
3635 if (!ret)
3636 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3637 t->buffer,
3638 object_offset,
3639 fda, sizeof(*fda));
3640 if (ret) {
3641 binder_txn_error("%d:%d translate fd array failed\n",
3642 thread->pid, proc->pid);
3643 return_error = BR_FAILED_REPLY;
3644 return_error_param = ret > 0 ? -EINVAL : ret;
3645 return_error_line = __LINE__;
3646 goto err_translate_failed;
3647 }
3648 last_fixup_obj_off = parent_offset;
3649 last_fixup_min_off =
3650 fda->parent_offset + sizeof(u32) * fda->num_fds;
3651 } break;
3652 case BINDER_TYPE_PTR: {
3653 struct binder_buffer_object *bp =
3654 to_binder_buffer_object(hdr);
3655 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3656 size_t num_valid;
3657
3658 if (bp->length > buf_left) {
3659 binder_user_error("%d:%d got transaction with too large buffer\n",
3660 proc->pid, thread->pid);
3661 return_error = BR_FAILED_REPLY;
3662 return_error_param = -EINVAL;
3663 return_error_line = __LINE__;
3664 goto err_bad_offset;
3665 }
3666 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3667 (const void __user *)(uintptr_t)bp->buffer,
3668 bp->length);
3669 if (ret) {
3670 binder_txn_error("%d:%d deferred copy failed\n",
3671 thread->pid, proc->pid);
3672 return_error = BR_FAILED_REPLY;
3673 return_error_param = ret;
3674 return_error_line = __LINE__;
3675 goto err_translate_failed;
3676 }
3677 /* Fixup buffer pointer to target proc address space */
3678 bp->buffer = t->buffer->user_data + sg_buf_offset;
3679 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3680
3681 num_valid = (buffer_offset - off_start_offset) /
3682 sizeof(binder_size_t);
3683 ret = binder_fixup_parent(&pf_head, t,
3684 thread, bp,
3685 off_start_offset,
3686 num_valid,
3687 last_fixup_obj_off,
3688 last_fixup_min_off);
3689 if (ret < 0 ||
3690 binder_alloc_copy_to_buffer(&target_proc->alloc,
3691 t->buffer,
3692 object_offset,
3693 bp, sizeof(*bp))) {
3694 binder_txn_error("%d:%d failed to fixup parent\n",
3695 thread->pid, proc->pid);
3696 return_error = BR_FAILED_REPLY;
3697 return_error_param = ret;
3698 return_error_line = __LINE__;
3699 goto err_translate_failed;
3700 }
3701 last_fixup_obj_off = object_offset;
3702 last_fixup_min_off = 0;
3703 } break;
3704 default:
3705 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3706 proc->pid, thread->pid, hdr->type);
3707 return_error = BR_FAILED_REPLY;
3708 return_error_param = -EINVAL;
3709 return_error_line = __LINE__;
3710 goto err_bad_object_type;
3711 }
3712 }
3713 /* Done processing objects, copy the rest of the buffer */
3714 if (binder_alloc_copy_user_to_buffer(
3715 &target_proc->alloc,
3716 t->buffer, user_offset,
3717 user_buffer + user_offset,
3718 tr->data_size - user_offset)) {
3719 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3720 proc->pid, thread->pid);
3721 return_error = BR_FAILED_REPLY;
3722 return_error_param = -EFAULT;
3723 return_error_line = __LINE__;
3724 goto err_copy_data_failed;
3725 }
3726
3727 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3728 &sgc_head, &pf_head);
3729 if (ret) {
3730 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3731 proc->pid, thread->pid);
3732 return_error = BR_FAILED_REPLY;
3733 return_error_param = ret;
3734 return_error_line = __LINE__;
3735 goto err_copy_data_failed;
3736 }
3737 if (t->buffer->oneway_spam_suspect) {
3738 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3739 binder_netlink_report(proc, t, tr->data_size,
3740 BR_ONEWAY_SPAM_SUSPECT);
3741 } else {
3742 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3743 }
3744
3745 if (reply) {
3746 binder_enqueue_thread_work(thread, tcomplete);
3747 binder_inner_proc_lock(target_proc);
3748 if (target_thread->is_dead) {
3749 return_error = BR_DEAD_REPLY;
3750 binder_inner_proc_unlock(target_proc);
3751 goto err_dead_proc_or_thread;
3752 }
3753 BUG_ON(t->buffer->async_transaction != 0);
3754 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3755 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3756 target_proc->outstanding_txns++;
3757 binder_inner_proc_unlock(target_proc);
3758 wake_up_interruptible_sync(&target_thread->wait);
3759 binder_free_transaction(in_reply_to);
3760 } else if (!(t->flags & TF_ONE_WAY)) {
3761 BUG_ON(t->buffer->async_transaction != 0);
3762 binder_inner_proc_lock(proc);
3763 /*
3764 * Defer the TRANSACTION_COMPLETE, so we don't return to
3765 * userspace immediately; this allows the target process to
3766 * immediately start processing this transaction, reducing
3767 * latency. We will then return the TRANSACTION_COMPLETE when
3768 * the target replies (or there is an error).
3769 */
3770 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3771 t->from_parent = thread->transaction_stack;
3772 thread->transaction_stack = t;
3773 binder_inner_proc_unlock(proc);
3774 return_error = binder_proc_transaction(t,
3775 target_proc, target_thread);
3776 if (return_error) {
3777 binder_inner_proc_lock(proc);
3778 binder_pop_transaction_ilocked(thread, t);
3779 binder_inner_proc_unlock(proc);
3780 goto err_dead_proc_or_thread;
3781 }
3782 } else {
3783 BUG_ON(target_node == NULL);
3784 BUG_ON(t->buffer->async_transaction != 1);
3785 return_error = binder_proc_transaction(t, target_proc, NULL);
3786 /*
3787 * Let the caller know when async transaction reaches a frozen
3788 * process and is put in a pending queue, waiting for the target
3789 * process to be unfrozen.
3790 */
3791 if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
3792 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3793 binder_netlink_report(proc, t, tr->data_size,
3794 return_error);
3795 }
3796 binder_enqueue_thread_work(thread, tcomplete);
3797 if (return_error &&
3798 return_error != BR_TRANSACTION_PENDING_FROZEN)
3799 goto err_dead_proc_or_thread;
3800 }
3801 if (target_thread)
3802 binder_thread_dec_tmpref(target_thread);
3803 binder_proc_dec_tmpref(target_proc);
3804 if (target_node)
3805 binder_dec_node_tmpref(target_node);
3806 /*
3807 * write barrier to synchronize with initialization
3808 * of log entry
3809 */
3810 smp_wmb();
3811 WRITE_ONCE(e->debug_id_done, t_debug_id);
3812 return;
3813
3814 err_dead_proc_or_thread:
3815 binder_txn_error("%d:%d dead process or thread\n",
3816 thread->pid, proc->pid);
3817 return_error_line = __LINE__;
3818 binder_dequeue_work(proc, tcomplete);
3819 err_translate_failed:
3820 err_bad_object_type:
3821 err_bad_offset:
3822 err_bad_parent:
3823 err_copy_data_failed:
3824 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3825 binder_free_txn_fixups(t);
3826 trace_binder_transaction_failed_buffer_release(t->buffer);
3827 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3828 buffer_offset, true);
3829 if (target_node)
3830 binder_dec_node_tmpref(target_node);
3831 target_node = NULL;
3832 t->buffer->transaction = NULL;
3833 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3834 err_binder_alloc_buf_failed:
3835 err_bad_extra_size:
3836 if (lsmctx.context)
3837 security_release_secctx(&lsmctx);
3838 err_get_secctx_failed:
3839 kfree(tcomplete);
3840 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3841 err_alloc_tcomplete_failed:
3842 if (trace_binder_txn_latency_free_enabled())
3843 binder_txn_latency_free(t);
3844 err_bad_todo_list:
3845 err_bad_call_stack:
3846 err_empty_call_stack:
3847 err_dead_binder:
3848 err_invalid_target_handle:
3849 if (target_node) {
3850 binder_dec_node(target_node, 1, 0);
3851 binder_dec_node_tmpref(target_node);
3852 }
3853
3854 binder_netlink_report(proc, t, tr->data_size, return_error);
3855 kfree(t);
3856 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3857 err_alloc_t_failed:
3858
3859 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3860 "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3861 proc->pid, thread->pid, reply ? "reply" :
3862 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3863 target_proc ? target_proc->pid : 0,
3864 target_thread ? target_thread->pid : 0,
3865 t_debug_id, return_error, return_error_param,
3866 tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3867 return_error_line);
3868
3869 if (target_thread)
3870 binder_thread_dec_tmpref(target_thread);
3871 if (target_proc)
3872 binder_proc_dec_tmpref(target_proc);
3873
3874 {
3875 struct binder_transaction_log_entry *fe;
3876
3877 e->return_error = return_error;
3878 e->return_error_param = return_error_param;
3879 e->return_error_line = return_error_line;
3880 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3881 *fe = *e;
3882 /*
3883 * write barrier to synchronize with initialization
3884 * of log entry
3885 */
3886 smp_wmb();
3887 WRITE_ONCE(e->debug_id_done, t_debug_id);
3888 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3889 }
3890
3891 BUG_ON(thread->return_error.cmd != BR_OK);
3892 if (in_reply_to) {
3893 binder_set_txn_from_error(in_reply_to, t_debug_id,
3894 return_error, return_error_param);
3895 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3896 binder_enqueue_thread_work(thread, &thread->return_error.work);
3897 binder_send_failed_reply(in_reply_to, return_error);
3898 } else {
3899 binder_inner_proc_lock(proc);
3900 binder_set_extended_error(&thread->ee, t_debug_id,
3901 return_error, return_error_param);
3902 binder_inner_proc_unlock(proc);
3903 thread->return_error.cmd = return_error;
3904 binder_enqueue_thread_work(thread, &thread->return_error.work);
3905 }
3906 }
3907
3908 static int
binder_request_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3909 binder_request_freeze_notification(struct binder_proc *proc,
3910 struct binder_thread *thread,
3911 struct binder_handle_cookie *handle_cookie)
3912 {
3913 struct binder_ref_freeze *freeze;
3914 struct binder_ref *ref;
3915
3916 freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3917 if (!freeze)
3918 return -ENOMEM;
3919 binder_proc_lock(proc);
3920 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3921 if (!ref) {
3922 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3923 proc->pid, thread->pid, handle_cookie->handle);
3924 binder_proc_unlock(proc);
3925 kfree(freeze);
3926 return -EINVAL;
3927 }
3928
3929 binder_node_lock(ref->node);
3930 if (ref->freeze) {
3931 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3932 proc->pid, thread->pid);
3933 binder_node_unlock(ref->node);
3934 binder_proc_unlock(proc);
3935 kfree(freeze);
3936 return -EINVAL;
3937 }
3938
3939 binder_stats_created(BINDER_STAT_FREEZE);
3940 INIT_LIST_HEAD(&freeze->work.entry);
3941 freeze->cookie = handle_cookie->cookie;
3942 freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3943 ref->freeze = freeze;
3944
3945 if (ref->node->proc) {
3946 binder_inner_proc_lock(ref->node->proc);
3947 freeze->is_frozen = ref->node->proc->is_frozen;
3948 binder_inner_proc_unlock(ref->node->proc);
3949
3950 binder_inner_proc_lock(proc);
3951 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3952 binder_wakeup_proc_ilocked(proc);
3953 binder_inner_proc_unlock(proc);
3954 }
3955
3956 binder_node_unlock(ref->node);
3957 binder_proc_unlock(proc);
3958 return 0;
3959 }
3960
3961 static int
binder_clear_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3962 binder_clear_freeze_notification(struct binder_proc *proc,
3963 struct binder_thread *thread,
3964 struct binder_handle_cookie *handle_cookie)
3965 {
3966 struct binder_ref_freeze *freeze;
3967 struct binder_ref *ref;
3968
3969 binder_proc_lock(proc);
3970 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3971 if (!ref) {
3972 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3973 proc->pid, thread->pid, handle_cookie->handle);
3974 binder_proc_unlock(proc);
3975 return -EINVAL;
3976 }
3977
3978 binder_node_lock(ref->node);
3979
3980 if (!ref->freeze) {
3981 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3982 proc->pid, thread->pid);
3983 binder_node_unlock(ref->node);
3984 binder_proc_unlock(proc);
3985 return -EINVAL;
3986 }
3987 freeze = ref->freeze;
3988 binder_inner_proc_lock(proc);
3989 if (freeze->cookie != handle_cookie->cookie) {
3990 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3991 proc->pid, thread->pid, (u64)freeze->cookie,
3992 (u64)handle_cookie->cookie);
3993 binder_inner_proc_unlock(proc);
3994 binder_node_unlock(ref->node);
3995 binder_proc_unlock(proc);
3996 return -EINVAL;
3997 }
3998 ref->freeze = NULL;
3999 /*
4000 * Take the existing freeze object and overwrite its work type. There are three cases here:
4001 * 1. No pending notification. In this case just add the work to the queue.
4002 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
4003 * should resend with the new work type.
4004 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
4005 * needs to be done here.
4006 */
4007 freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
4008 if (list_empty(&freeze->work.entry)) {
4009 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4010 binder_wakeup_proc_ilocked(proc);
4011 } else if (freeze->sent) {
4012 freeze->resend = true;
4013 }
4014 binder_inner_proc_unlock(proc);
4015 binder_node_unlock(ref->node);
4016 binder_proc_unlock(proc);
4017 return 0;
4018 }
4019
4020 static int
binder_freeze_notification_done(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t cookie)4021 binder_freeze_notification_done(struct binder_proc *proc,
4022 struct binder_thread *thread,
4023 binder_uintptr_t cookie)
4024 {
4025 struct binder_ref_freeze *freeze = NULL;
4026 struct binder_work *w;
4027
4028 binder_inner_proc_lock(proc);
4029 list_for_each_entry(w, &proc->delivered_freeze, entry) {
4030 struct binder_ref_freeze *tmp_freeze =
4031 container_of(w, struct binder_ref_freeze, work);
4032
4033 if (tmp_freeze->cookie == cookie) {
4034 freeze = tmp_freeze;
4035 break;
4036 }
4037 }
4038 if (!freeze) {
4039 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
4040 proc->pid, thread->pid, (u64)cookie);
4041 binder_inner_proc_unlock(proc);
4042 return -EINVAL;
4043 }
4044 binder_dequeue_work_ilocked(&freeze->work);
4045 freeze->sent = false;
4046 if (freeze->resend) {
4047 freeze->resend = false;
4048 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4049 binder_wakeup_proc_ilocked(proc);
4050 }
4051 binder_inner_proc_unlock(proc);
4052 return 0;
4053 }
4054
4055 /**
4056 * binder_free_buf() - free the specified buffer
4057 * @proc: binder proc that owns buffer
4058 * @thread: binder thread performing the buffer release
4059 * @buffer: buffer to be freed
4060 * @is_failure: failed to send transaction
4061 *
4062 * If the buffer is for an async transaction, enqueue the next async
4063 * transaction from the node.
4064 *
4065 * Cleanup the buffer and free it.
4066 */
4067 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)4068 binder_free_buf(struct binder_proc *proc,
4069 struct binder_thread *thread,
4070 struct binder_buffer *buffer, bool is_failure)
4071 {
4072 binder_inner_proc_lock(proc);
4073 if (buffer->transaction) {
4074 buffer->transaction->buffer = NULL;
4075 buffer->transaction = NULL;
4076 }
4077 binder_inner_proc_unlock(proc);
4078 if (buffer->async_transaction && buffer->target_node) {
4079 struct binder_node *buf_node;
4080 struct binder_work *w;
4081
4082 buf_node = buffer->target_node;
4083 binder_node_inner_lock(buf_node);
4084 BUG_ON(!buf_node->has_async_transaction);
4085 BUG_ON(buf_node->proc != proc);
4086 w = binder_dequeue_work_head_ilocked(
4087 &buf_node->async_todo);
4088 if (!w) {
4089 buf_node->has_async_transaction = false;
4090 } else {
4091 binder_enqueue_work_ilocked(
4092 w, &proc->todo);
4093 binder_wakeup_proc_ilocked(proc);
4094 }
4095 binder_node_inner_unlock(buf_node);
4096 }
4097 trace_binder_transaction_buffer_release(buffer);
4098 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4099 binder_alloc_free_buf(&proc->alloc, buffer);
4100 }
4101
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)4102 static int binder_thread_write(struct binder_proc *proc,
4103 struct binder_thread *thread,
4104 binder_uintptr_t binder_buffer, size_t size,
4105 binder_size_t *consumed)
4106 {
4107 uint32_t cmd;
4108 struct binder_context *context = proc->context;
4109 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4110 void __user *ptr = buffer + *consumed;
4111 void __user *end = buffer + size;
4112
4113 while (ptr < end && thread->return_error.cmd == BR_OK) {
4114 int ret;
4115
4116 if (get_user(cmd, (uint32_t __user *)ptr))
4117 return -EFAULT;
4118 ptr += sizeof(uint32_t);
4119 trace_binder_command(cmd);
4120 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4121 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4122 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4123 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4124 }
4125 switch (cmd) {
4126 case BC_INCREFS:
4127 case BC_ACQUIRE:
4128 case BC_RELEASE:
4129 case BC_DECREFS: {
4130 uint32_t target;
4131 const char *debug_string;
4132 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4133 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4134 struct binder_ref_data rdata;
4135
4136 if (get_user(target, (uint32_t __user *)ptr))
4137 return -EFAULT;
4138
4139 ptr += sizeof(uint32_t);
4140 ret = -1;
4141 if (increment && !target) {
4142 struct binder_node *ctx_mgr_node;
4143
4144 mutex_lock(&context->context_mgr_node_lock);
4145 ctx_mgr_node = context->binder_context_mgr_node;
4146 if (ctx_mgr_node) {
4147 if (ctx_mgr_node->proc == proc) {
4148 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4149 proc->pid, thread->pid);
4150 mutex_unlock(&context->context_mgr_node_lock);
4151 return -EINVAL;
4152 }
4153 ret = binder_inc_ref_for_node(
4154 proc, ctx_mgr_node,
4155 strong, NULL, &rdata);
4156 }
4157 mutex_unlock(&context->context_mgr_node_lock);
4158 }
4159 if (ret)
4160 ret = binder_update_ref_for_handle(
4161 proc, target, increment, strong,
4162 &rdata);
4163 if (!ret && rdata.desc != target) {
4164 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4165 proc->pid, thread->pid,
4166 target, rdata.desc);
4167 }
4168 switch (cmd) {
4169 case BC_INCREFS:
4170 debug_string = "IncRefs";
4171 break;
4172 case BC_ACQUIRE:
4173 debug_string = "Acquire";
4174 break;
4175 case BC_RELEASE:
4176 debug_string = "Release";
4177 break;
4178 case BC_DECREFS:
4179 default:
4180 debug_string = "DecRefs";
4181 break;
4182 }
4183 if (ret) {
4184 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4185 proc->pid, thread->pid, debug_string,
4186 strong, target, ret);
4187 break;
4188 }
4189 binder_debug(BINDER_DEBUG_USER_REFS,
4190 "%d:%d %s ref %d desc %d s %d w %d\n",
4191 proc->pid, thread->pid, debug_string,
4192 rdata.debug_id, rdata.desc, rdata.strong,
4193 rdata.weak);
4194 break;
4195 }
4196 case BC_INCREFS_DONE:
4197 case BC_ACQUIRE_DONE: {
4198 binder_uintptr_t node_ptr;
4199 binder_uintptr_t cookie;
4200 struct binder_node *node;
4201 bool free_node;
4202
4203 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4204 return -EFAULT;
4205 ptr += sizeof(binder_uintptr_t);
4206 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4207 return -EFAULT;
4208 ptr += sizeof(binder_uintptr_t);
4209 node = binder_get_node(proc, node_ptr);
4210 if (node == NULL) {
4211 binder_user_error("%d:%d %s u%016llx no match\n",
4212 proc->pid, thread->pid,
4213 cmd == BC_INCREFS_DONE ?
4214 "BC_INCREFS_DONE" :
4215 "BC_ACQUIRE_DONE",
4216 (u64)node_ptr);
4217 break;
4218 }
4219 if (cookie != node->cookie) {
4220 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4221 proc->pid, thread->pid,
4222 cmd == BC_INCREFS_DONE ?
4223 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4224 (u64)node_ptr, node->debug_id,
4225 (u64)cookie, (u64)node->cookie);
4226 binder_put_node(node);
4227 break;
4228 }
4229 binder_node_inner_lock(node);
4230 if (cmd == BC_ACQUIRE_DONE) {
4231 if (node->pending_strong_ref == 0) {
4232 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4233 proc->pid, thread->pid,
4234 node->debug_id);
4235 binder_node_inner_unlock(node);
4236 binder_put_node(node);
4237 break;
4238 }
4239 node->pending_strong_ref = 0;
4240 } else {
4241 if (node->pending_weak_ref == 0) {
4242 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4243 proc->pid, thread->pid,
4244 node->debug_id);
4245 binder_node_inner_unlock(node);
4246 binder_put_node(node);
4247 break;
4248 }
4249 node->pending_weak_ref = 0;
4250 }
4251 free_node = binder_dec_node_nilocked(node,
4252 cmd == BC_ACQUIRE_DONE, 0);
4253 WARN_ON(free_node);
4254 binder_debug(BINDER_DEBUG_USER_REFS,
4255 "%d:%d %s node %d ls %d lw %d tr %d\n",
4256 proc->pid, thread->pid,
4257 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4258 node->debug_id, node->local_strong_refs,
4259 node->local_weak_refs, node->tmp_refs);
4260 binder_node_inner_unlock(node);
4261 binder_put_node(node);
4262 break;
4263 }
4264 case BC_ATTEMPT_ACQUIRE:
4265 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4266 return -EINVAL;
4267 case BC_ACQUIRE_RESULT:
4268 pr_err("BC_ACQUIRE_RESULT not supported\n");
4269 return -EINVAL;
4270
4271 case BC_FREE_BUFFER: {
4272 binder_uintptr_t data_ptr;
4273 struct binder_buffer *buffer;
4274
4275 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4276 return -EFAULT;
4277 ptr += sizeof(binder_uintptr_t);
4278
4279 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4280 data_ptr);
4281 if (IS_ERR_OR_NULL(buffer)) {
4282 if (PTR_ERR(buffer) == -EPERM) {
4283 binder_user_error(
4284 "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4285 proc->pid, thread->pid,
4286 (unsigned long)data_ptr - proc->alloc.vm_start);
4287 } else {
4288 binder_user_error(
4289 "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4290 proc->pid, thread->pid,
4291 (unsigned long)data_ptr - proc->alloc.vm_start);
4292 }
4293 break;
4294 }
4295 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4296 "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4297 proc->pid, thread->pid,
4298 (unsigned long)data_ptr - proc->alloc.vm_start,
4299 buffer->debug_id,
4300 buffer->transaction ? "active" : "finished");
4301 binder_free_buf(proc, thread, buffer, false);
4302 break;
4303 }
4304
4305 case BC_TRANSACTION_SG:
4306 case BC_REPLY_SG: {
4307 struct binder_transaction_data_sg tr;
4308
4309 if (copy_from_user(&tr, ptr, sizeof(tr)))
4310 return -EFAULT;
4311 ptr += sizeof(tr);
4312 binder_transaction(proc, thread, &tr.transaction_data,
4313 cmd == BC_REPLY_SG, tr.buffers_size);
4314 break;
4315 }
4316 case BC_TRANSACTION:
4317 case BC_REPLY: {
4318 struct binder_transaction_data tr;
4319
4320 if (copy_from_user(&tr, ptr, sizeof(tr)))
4321 return -EFAULT;
4322 ptr += sizeof(tr);
4323 binder_transaction(proc, thread, &tr,
4324 cmd == BC_REPLY, 0);
4325 break;
4326 }
4327
4328 case BC_REGISTER_LOOPER:
4329 binder_debug(BINDER_DEBUG_THREADS,
4330 "%d:%d BC_REGISTER_LOOPER\n",
4331 proc->pid, thread->pid);
4332 binder_inner_proc_lock(proc);
4333 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4334 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4335 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4336 proc->pid, thread->pid);
4337 } else if (proc->requested_threads == 0) {
4338 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4339 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4340 proc->pid, thread->pid);
4341 } else {
4342 proc->requested_threads--;
4343 proc->requested_threads_started++;
4344 }
4345 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4346 binder_inner_proc_unlock(proc);
4347 break;
4348 case BC_ENTER_LOOPER:
4349 binder_debug(BINDER_DEBUG_THREADS,
4350 "%d:%d BC_ENTER_LOOPER\n",
4351 proc->pid, thread->pid);
4352 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4353 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4354 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4355 proc->pid, thread->pid);
4356 }
4357 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4358 break;
4359 case BC_EXIT_LOOPER:
4360 binder_debug(BINDER_DEBUG_THREADS,
4361 "%d:%d BC_EXIT_LOOPER\n",
4362 proc->pid, thread->pid);
4363 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4364 break;
4365
4366 case BC_REQUEST_DEATH_NOTIFICATION:
4367 case BC_CLEAR_DEATH_NOTIFICATION: {
4368 uint32_t target;
4369 binder_uintptr_t cookie;
4370 struct binder_ref *ref;
4371 struct binder_ref_death *death = NULL;
4372
4373 if (get_user(target, (uint32_t __user *)ptr))
4374 return -EFAULT;
4375 ptr += sizeof(uint32_t);
4376 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4377 return -EFAULT;
4378 ptr += sizeof(binder_uintptr_t);
4379 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4380 /*
4381 * Allocate memory for death notification
4382 * before taking lock
4383 */
4384 death = kzalloc(sizeof(*death), GFP_KERNEL);
4385 if (death == NULL) {
4386 WARN_ON(thread->return_error.cmd !=
4387 BR_OK);
4388 thread->return_error.cmd = BR_ERROR;
4389 binder_enqueue_thread_work(
4390 thread,
4391 &thread->return_error.work);
4392 binder_debug(
4393 BINDER_DEBUG_FAILED_TRANSACTION,
4394 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4395 proc->pid, thread->pid);
4396 break;
4397 }
4398 }
4399 binder_proc_lock(proc);
4400 ref = binder_get_ref_olocked(proc, target, false);
4401 if (ref == NULL) {
4402 binder_user_error("%d:%d %s invalid ref %d\n",
4403 proc->pid, thread->pid,
4404 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4405 "BC_REQUEST_DEATH_NOTIFICATION" :
4406 "BC_CLEAR_DEATH_NOTIFICATION",
4407 target);
4408 binder_proc_unlock(proc);
4409 kfree(death);
4410 break;
4411 }
4412
4413 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4414 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4415 proc->pid, thread->pid,
4416 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4417 "BC_REQUEST_DEATH_NOTIFICATION" :
4418 "BC_CLEAR_DEATH_NOTIFICATION",
4419 (u64)cookie, ref->data.debug_id,
4420 ref->data.desc, ref->data.strong,
4421 ref->data.weak, ref->node->debug_id);
4422
4423 binder_node_lock(ref->node);
4424 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4425 if (ref->death) {
4426 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4427 proc->pid, thread->pid);
4428 binder_node_unlock(ref->node);
4429 binder_proc_unlock(proc);
4430 kfree(death);
4431 break;
4432 }
4433 binder_stats_created(BINDER_STAT_DEATH);
4434 INIT_LIST_HEAD(&death->work.entry);
4435 death->cookie = cookie;
4436 ref->death = death;
4437 if (ref->node->proc == NULL) {
4438 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4439
4440 binder_inner_proc_lock(proc);
4441 binder_enqueue_work_ilocked(
4442 &ref->death->work, &proc->todo);
4443 binder_wakeup_proc_ilocked(proc);
4444 binder_inner_proc_unlock(proc);
4445 }
4446 } else {
4447 if (ref->death == NULL) {
4448 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4449 proc->pid, thread->pid);
4450 binder_node_unlock(ref->node);
4451 binder_proc_unlock(proc);
4452 break;
4453 }
4454 death = ref->death;
4455 if (death->cookie != cookie) {
4456 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4457 proc->pid, thread->pid,
4458 (u64)death->cookie,
4459 (u64)cookie);
4460 binder_node_unlock(ref->node);
4461 binder_proc_unlock(proc);
4462 break;
4463 }
4464 ref->death = NULL;
4465 binder_inner_proc_lock(proc);
4466 if (list_empty(&death->work.entry)) {
4467 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4468 if (thread->looper &
4469 (BINDER_LOOPER_STATE_REGISTERED |
4470 BINDER_LOOPER_STATE_ENTERED))
4471 binder_enqueue_thread_work_ilocked(
4472 thread,
4473 &death->work);
4474 else {
4475 binder_enqueue_work_ilocked(
4476 &death->work,
4477 &proc->todo);
4478 binder_wakeup_proc_ilocked(
4479 proc);
4480 }
4481 } else {
4482 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4483 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4484 }
4485 binder_inner_proc_unlock(proc);
4486 }
4487 binder_node_unlock(ref->node);
4488 binder_proc_unlock(proc);
4489 } break;
4490 case BC_DEAD_BINDER_DONE: {
4491 struct binder_work *w;
4492 binder_uintptr_t cookie;
4493 struct binder_ref_death *death = NULL;
4494
4495 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4496 return -EFAULT;
4497
4498 ptr += sizeof(cookie);
4499 binder_inner_proc_lock(proc);
4500 list_for_each_entry(w, &proc->delivered_death,
4501 entry) {
4502 struct binder_ref_death *tmp_death =
4503 container_of(w,
4504 struct binder_ref_death,
4505 work);
4506
4507 if (tmp_death->cookie == cookie) {
4508 death = tmp_death;
4509 break;
4510 }
4511 }
4512 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4513 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4514 proc->pid, thread->pid, (u64)cookie,
4515 death);
4516 if (death == NULL) {
4517 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4518 proc->pid, thread->pid, (u64)cookie);
4519 binder_inner_proc_unlock(proc);
4520 break;
4521 }
4522 binder_dequeue_work_ilocked(&death->work);
4523 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4524 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4525 if (thread->looper &
4526 (BINDER_LOOPER_STATE_REGISTERED |
4527 BINDER_LOOPER_STATE_ENTERED))
4528 binder_enqueue_thread_work_ilocked(
4529 thread, &death->work);
4530 else {
4531 binder_enqueue_work_ilocked(
4532 &death->work,
4533 &proc->todo);
4534 binder_wakeup_proc_ilocked(proc);
4535 }
4536 }
4537 binder_inner_proc_unlock(proc);
4538 } break;
4539
4540 case BC_REQUEST_FREEZE_NOTIFICATION: {
4541 struct binder_handle_cookie handle_cookie;
4542 int error;
4543
4544 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4545 return -EFAULT;
4546 ptr += sizeof(handle_cookie);
4547 error = binder_request_freeze_notification(proc, thread,
4548 &handle_cookie);
4549 if (error)
4550 return error;
4551 } break;
4552
4553 case BC_CLEAR_FREEZE_NOTIFICATION: {
4554 struct binder_handle_cookie handle_cookie;
4555 int error;
4556
4557 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4558 return -EFAULT;
4559 ptr += sizeof(handle_cookie);
4560 error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4561 if (error)
4562 return error;
4563 } break;
4564
4565 case BC_FREEZE_NOTIFICATION_DONE: {
4566 binder_uintptr_t cookie;
4567 int error;
4568
4569 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4570 return -EFAULT;
4571
4572 ptr += sizeof(cookie);
4573 error = binder_freeze_notification_done(proc, thread, cookie);
4574 if (error)
4575 return error;
4576 } break;
4577
4578 default:
4579 pr_err("%d:%d unknown command %u\n",
4580 proc->pid, thread->pid, cmd);
4581 return -EINVAL;
4582 }
4583 *consumed = ptr - buffer;
4584 }
4585 return 0;
4586 }
4587
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4588 static void binder_stat_br(struct binder_proc *proc,
4589 struct binder_thread *thread, uint32_t cmd)
4590 {
4591 trace_binder_return(cmd);
4592 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4593 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4594 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4595 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4596 }
4597 }
4598
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4599 static int binder_put_node_cmd(struct binder_proc *proc,
4600 struct binder_thread *thread,
4601 void __user **ptrp,
4602 binder_uintptr_t node_ptr,
4603 binder_uintptr_t node_cookie,
4604 int node_debug_id,
4605 uint32_t cmd, const char *cmd_name)
4606 {
4607 void __user *ptr = *ptrp;
4608
4609 if (put_user(cmd, (uint32_t __user *)ptr))
4610 return -EFAULT;
4611 ptr += sizeof(uint32_t);
4612
4613 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4614 return -EFAULT;
4615 ptr += sizeof(binder_uintptr_t);
4616
4617 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4618 return -EFAULT;
4619 ptr += sizeof(binder_uintptr_t);
4620
4621 binder_stat_br(proc, thread, cmd);
4622 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4623 proc->pid, thread->pid, cmd_name, node_debug_id,
4624 (u64)node_ptr, (u64)node_cookie);
4625
4626 *ptrp = ptr;
4627 return 0;
4628 }
4629
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4630 static int binder_wait_for_work(struct binder_thread *thread,
4631 bool do_proc_work)
4632 {
4633 DEFINE_WAIT(wait);
4634 struct binder_proc *proc = thread->proc;
4635 int ret = 0;
4636
4637 binder_inner_proc_lock(proc);
4638 for (;;) {
4639 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4640 if (binder_has_work_ilocked(thread, do_proc_work))
4641 break;
4642 if (do_proc_work)
4643 list_add(&thread->waiting_thread_node,
4644 &proc->waiting_threads);
4645 binder_inner_proc_unlock(proc);
4646 schedule();
4647 binder_inner_proc_lock(proc);
4648 list_del_init(&thread->waiting_thread_node);
4649 if (signal_pending(current)) {
4650 ret = -EINTR;
4651 break;
4652 }
4653 }
4654 finish_wait(&thread->wait, &wait);
4655 binder_inner_proc_unlock(proc);
4656
4657 return ret;
4658 }
4659
4660 /**
4661 * binder_apply_fd_fixups() - finish fd translation
4662 * @proc: binder_proc associated @t->buffer
4663 * @t: binder transaction with list of fd fixups
4664 *
4665 * Now that we are in the context of the transaction target
4666 * process, we can allocate and install fds. Process the
4667 * list of fds to translate and fixup the buffer with the
4668 * new fds first and only then install the files.
4669 *
4670 * If we fail to allocate an fd, skip the install and release
4671 * any fds that have already been allocated.
4672 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4673 static int binder_apply_fd_fixups(struct binder_proc *proc,
4674 struct binder_transaction *t)
4675 {
4676 struct binder_txn_fd_fixup *fixup, *tmp;
4677 int ret = 0;
4678
4679 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4680 int fd = get_unused_fd_flags(O_CLOEXEC);
4681
4682 if (fd < 0) {
4683 binder_debug(BINDER_DEBUG_TRANSACTION,
4684 "failed fd fixup txn %d fd %d\n",
4685 t->debug_id, fd);
4686 ret = -ENOMEM;
4687 goto err;
4688 }
4689 binder_debug(BINDER_DEBUG_TRANSACTION,
4690 "fd fixup txn %d fd %d\n",
4691 t->debug_id, fd);
4692 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4693 fixup->target_fd = fd;
4694 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4695 fixup->offset, &fd,
4696 sizeof(u32))) {
4697 ret = -EINVAL;
4698 goto err;
4699 }
4700 }
4701 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4702 fd_install(fixup->target_fd, fixup->file);
4703 list_del(&fixup->fixup_entry);
4704 kfree(fixup);
4705 }
4706
4707 return ret;
4708
4709 err:
4710 binder_free_txn_fixups(t);
4711 return ret;
4712 }
4713
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4714 static int binder_thread_read(struct binder_proc *proc,
4715 struct binder_thread *thread,
4716 binder_uintptr_t binder_buffer, size_t size,
4717 binder_size_t *consumed, int non_block)
4718 {
4719 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4720 void __user *ptr = buffer + *consumed;
4721 void __user *end = buffer + size;
4722
4723 int ret = 0;
4724 int wait_for_proc_work;
4725
4726 if (*consumed == 0) {
4727 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4728 return -EFAULT;
4729 ptr += sizeof(uint32_t);
4730 }
4731
4732 retry:
4733 binder_inner_proc_lock(proc);
4734 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4735 binder_inner_proc_unlock(proc);
4736
4737 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4738
4739 trace_binder_wait_for_work(wait_for_proc_work,
4740 !!thread->transaction_stack,
4741 !binder_worklist_empty(proc, &thread->todo));
4742 if (wait_for_proc_work) {
4743 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4744 BINDER_LOOPER_STATE_ENTERED))) {
4745 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4746 proc->pid, thread->pid, thread->looper);
4747 wait_event_interruptible(binder_user_error_wait,
4748 binder_stop_on_user_error < 2);
4749 }
4750 binder_set_nice(proc->default_priority);
4751 }
4752
4753 if (non_block) {
4754 if (!binder_has_work(thread, wait_for_proc_work))
4755 ret = -EAGAIN;
4756 } else {
4757 ret = binder_wait_for_work(thread, wait_for_proc_work);
4758 }
4759
4760 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4761
4762 if (ret)
4763 return ret;
4764
4765 while (1) {
4766 uint32_t cmd;
4767 struct binder_transaction_data_secctx tr;
4768 struct binder_transaction_data *trd = &tr.transaction_data;
4769 struct binder_work *w = NULL;
4770 struct list_head *list = NULL;
4771 struct binder_transaction *t = NULL;
4772 struct binder_thread *t_from;
4773 size_t trsize = sizeof(*trd);
4774
4775 binder_inner_proc_lock(proc);
4776 if (!binder_worklist_empty_ilocked(&thread->todo))
4777 list = &thread->todo;
4778 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4779 wait_for_proc_work)
4780 list = &proc->todo;
4781 else {
4782 binder_inner_proc_unlock(proc);
4783
4784 /* no data added */
4785 if (ptr - buffer == 4 && !thread->looper_need_return)
4786 goto retry;
4787 break;
4788 }
4789
4790 if (end - ptr < sizeof(tr) + 4) {
4791 binder_inner_proc_unlock(proc);
4792 break;
4793 }
4794 w = binder_dequeue_work_head_ilocked(list);
4795 if (binder_worklist_empty_ilocked(&thread->todo))
4796 thread->process_todo = false;
4797
4798 switch (w->type) {
4799 case BINDER_WORK_TRANSACTION: {
4800 binder_inner_proc_unlock(proc);
4801 t = container_of(w, struct binder_transaction, work);
4802 } break;
4803 case BINDER_WORK_RETURN_ERROR: {
4804 struct binder_error *e = container_of(
4805 w, struct binder_error, work);
4806
4807 WARN_ON(e->cmd == BR_OK);
4808 binder_inner_proc_unlock(proc);
4809 if (put_user(e->cmd, (uint32_t __user *)ptr))
4810 return -EFAULT;
4811 cmd = e->cmd;
4812 e->cmd = BR_OK;
4813 ptr += sizeof(uint32_t);
4814
4815 binder_stat_br(proc, thread, cmd);
4816 } break;
4817 case BINDER_WORK_TRANSACTION_COMPLETE:
4818 case BINDER_WORK_TRANSACTION_PENDING:
4819 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4820 if (proc->oneway_spam_detection_enabled &&
4821 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4822 cmd = BR_ONEWAY_SPAM_SUSPECT;
4823 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4824 cmd = BR_TRANSACTION_PENDING_FROZEN;
4825 else
4826 cmd = BR_TRANSACTION_COMPLETE;
4827 binder_inner_proc_unlock(proc);
4828 kfree(w);
4829 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4830 if (put_user(cmd, (uint32_t __user *)ptr))
4831 return -EFAULT;
4832 ptr += sizeof(uint32_t);
4833
4834 binder_stat_br(proc, thread, cmd);
4835 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4836 "%d:%d BR_TRANSACTION_COMPLETE\n",
4837 proc->pid, thread->pid);
4838 } break;
4839 case BINDER_WORK_NODE: {
4840 struct binder_node *node = container_of(w, struct binder_node, work);
4841 int strong, weak;
4842 binder_uintptr_t node_ptr = node->ptr;
4843 binder_uintptr_t node_cookie = node->cookie;
4844 int node_debug_id = node->debug_id;
4845 int has_weak_ref;
4846 int has_strong_ref;
4847 void __user *orig_ptr = ptr;
4848
4849 BUG_ON(proc != node->proc);
4850 strong = node->internal_strong_refs ||
4851 node->local_strong_refs;
4852 weak = !hlist_empty(&node->refs) ||
4853 node->local_weak_refs ||
4854 node->tmp_refs || strong;
4855 has_strong_ref = node->has_strong_ref;
4856 has_weak_ref = node->has_weak_ref;
4857
4858 if (weak && !has_weak_ref) {
4859 node->has_weak_ref = 1;
4860 node->pending_weak_ref = 1;
4861 node->local_weak_refs++;
4862 }
4863 if (strong && !has_strong_ref) {
4864 node->has_strong_ref = 1;
4865 node->pending_strong_ref = 1;
4866 node->local_strong_refs++;
4867 }
4868 if (!strong && has_strong_ref)
4869 node->has_strong_ref = 0;
4870 if (!weak && has_weak_ref)
4871 node->has_weak_ref = 0;
4872 if (!weak && !strong) {
4873 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4874 "%d:%d node %d u%016llx c%016llx deleted\n",
4875 proc->pid, thread->pid,
4876 node_debug_id,
4877 (u64)node_ptr,
4878 (u64)node_cookie);
4879 rb_erase(&node->rb_node, &proc->nodes);
4880 binder_inner_proc_unlock(proc);
4881 binder_node_lock(node);
4882 /*
4883 * Acquire the node lock before freeing the
4884 * node to serialize with other threads that
4885 * may have been holding the node lock while
4886 * decrementing this node (avoids race where
4887 * this thread frees while the other thread
4888 * is unlocking the node after the final
4889 * decrement)
4890 */
4891 binder_node_unlock(node);
4892 binder_free_node(node);
4893 } else
4894 binder_inner_proc_unlock(proc);
4895
4896 if (weak && !has_weak_ref)
4897 ret = binder_put_node_cmd(
4898 proc, thread, &ptr, node_ptr,
4899 node_cookie, node_debug_id,
4900 BR_INCREFS, "BR_INCREFS");
4901 if (!ret && strong && !has_strong_ref)
4902 ret = binder_put_node_cmd(
4903 proc, thread, &ptr, node_ptr,
4904 node_cookie, node_debug_id,
4905 BR_ACQUIRE, "BR_ACQUIRE");
4906 if (!ret && !strong && has_strong_ref)
4907 ret = binder_put_node_cmd(
4908 proc, thread, &ptr, node_ptr,
4909 node_cookie, node_debug_id,
4910 BR_RELEASE, "BR_RELEASE");
4911 if (!ret && !weak && has_weak_ref)
4912 ret = binder_put_node_cmd(
4913 proc, thread, &ptr, node_ptr,
4914 node_cookie, node_debug_id,
4915 BR_DECREFS, "BR_DECREFS");
4916 if (orig_ptr == ptr)
4917 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4918 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4919 proc->pid, thread->pid,
4920 node_debug_id,
4921 (u64)node_ptr,
4922 (u64)node_cookie);
4923 if (ret)
4924 return ret;
4925 } break;
4926 case BINDER_WORK_DEAD_BINDER:
4927 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4928 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4929 struct binder_ref_death *death;
4930 uint32_t cmd;
4931 binder_uintptr_t cookie;
4932
4933 death = container_of(w, struct binder_ref_death, work);
4934 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4935 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4936 else
4937 cmd = BR_DEAD_BINDER;
4938 cookie = death->cookie;
4939
4940 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4941 "%d:%d %s %016llx\n",
4942 proc->pid, thread->pid,
4943 cmd == BR_DEAD_BINDER ?
4944 "BR_DEAD_BINDER" :
4945 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4946 (u64)cookie);
4947 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4948 binder_inner_proc_unlock(proc);
4949 kfree(death);
4950 binder_stats_deleted(BINDER_STAT_DEATH);
4951 } else {
4952 binder_enqueue_work_ilocked(
4953 w, &proc->delivered_death);
4954 binder_inner_proc_unlock(proc);
4955 }
4956 if (put_user(cmd, (uint32_t __user *)ptr))
4957 return -EFAULT;
4958 ptr += sizeof(uint32_t);
4959 if (put_user(cookie,
4960 (binder_uintptr_t __user *)ptr))
4961 return -EFAULT;
4962 ptr += sizeof(binder_uintptr_t);
4963 binder_stat_br(proc, thread, cmd);
4964 if (cmd == BR_DEAD_BINDER)
4965 goto done; /* DEAD_BINDER notifications can cause transactions */
4966 } break;
4967
4968 case BINDER_WORK_FROZEN_BINDER: {
4969 struct binder_ref_freeze *freeze;
4970 struct binder_frozen_state_info info;
4971
4972 memset(&info, 0, sizeof(info));
4973 freeze = container_of(w, struct binder_ref_freeze, work);
4974 info.is_frozen = freeze->is_frozen;
4975 info.cookie = freeze->cookie;
4976 freeze->sent = true;
4977 binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4978 binder_inner_proc_unlock(proc);
4979
4980 if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4981 return -EFAULT;
4982 ptr += sizeof(uint32_t);
4983 if (copy_to_user(ptr, &info, sizeof(info)))
4984 return -EFAULT;
4985 ptr += sizeof(info);
4986 binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4987 goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4988 } break;
4989
4990 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4991 struct binder_ref_freeze *freeze =
4992 container_of(w, struct binder_ref_freeze, work);
4993 binder_uintptr_t cookie = freeze->cookie;
4994
4995 binder_inner_proc_unlock(proc);
4996 kfree(freeze);
4997 binder_stats_deleted(BINDER_STAT_FREEZE);
4998 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4999 return -EFAULT;
5000 ptr += sizeof(uint32_t);
5001 if (put_user(cookie, (binder_uintptr_t __user *)ptr))
5002 return -EFAULT;
5003 ptr += sizeof(binder_uintptr_t);
5004 binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
5005 } break;
5006
5007 default:
5008 binder_inner_proc_unlock(proc);
5009 pr_err("%d:%d: bad work type %d\n",
5010 proc->pid, thread->pid, w->type);
5011 break;
5012 }
5013
5014 if (!t)
5015 continue;
5016
5017 BUG_ON(t->buffer == NULL);
5018 if (t->buffer->target_node) {
5019 struct binder_node *target_node = t->buffer->target_node;
5020
5021 trd->target.ptr = target_node->ptr;
5022 trd->cookie = target_node->cookie;
5023 t->saved_priority = task_nice(current);
5024 if (t->priority < target_node->min_priority &&
5025 !(t->flags & TF_ONE_WAY))
5026 binder_set_nice(t->priority);
5027 else if (!(t->flags & TF_ONE_WAY) ||
5028 t->saved_priority > target_node->min_priority)
5029 binder_set_nice(target_node->min_priority);
5030 cmd = BR_TRANSACTION;
5031 } else {
5032 trd->target.ptr = 0;
5033 trd->cookie = 0;
5034 cmd = BR_REPLY;
5035 }
5036 trd->code = t->code;
5037 trd->flags = t->flags;
5038 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
5039
5040 t_from = binder_get_txn_from(t);
5041 if (t_from) {
5042 struct task_struct *sender = t_from->proc->tsk;
5043
5044 trd->sender_pid =
5045 task_tgid_nr_ns(sender,
5046 task_active_pid_ns(current));
5047 } else {
5048 trd->sender_pid = 0;
5049 }
5050
5051 ret = binder_apply_fd_fixups(proc, t);
5052 if (ret) {
5053 struct binder_buffer *buffer = t->buffer;
5054 bool oneway = !!(t->flags & TF_ONE_WAY);
5055 int tid = t->debug_id;
5056
5057 if (t_from)
5058 binder_thread_dec_tmpref(t_from);
5059 buffer->transaction = NULL;
5060 binder_cleanup_transaction(t, "fd fixups failed",
5061 BR_FAILED_REPLY);
5062 binder_free_buf(proc, thread, buffer, true);
5063 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5064 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5065 proc->pid, thread->pid,
5066 oneway ? "async " :
5067 (cmd == BR_REPLY ? "reply " : ""),
5068 tid, BR_FAILED_REPLY, ret, __LINE__);
5069 if (cmd == BR_REPLY) {
5070 cmd = BR_FAILED_REPLY;
5071 if (put_user(cmd, (uint32_t __user *)ptr))
5072 return -EFAULT;
5073 ptr += sizeof(uint32_t);
5074 binder_stat_br(proc, thread, cmd);
5075 break;
5076 }
5077 continue;
5078 }
5079 trd->data_size = t->buffer->data_size;
5080 trd->offsets_size = t->buffer->offsets_size;
5081 trd->data.ptr.buffer = t->buffer->user_data;
5082 trd->data.ptr.offsets = trd->data.ptr.buffer +
5083 ALIGN(t->buffer->data_size,
5084 sizeof(void *));
5085
5086 tr.secctx = t->security_ctx;
5087 if (t->security_ctx) {
5088 cmd = BR_TRANSACTION_SEC_CTX;
5089 trsize = sizeof(tr);
5090 }
5091 if (put_user(cmd, (uint32_t __user *)ptr)) {
5092 if (t_from)
5093 binder_thread_dec_tmpref(t_from);
5094
5095 binder_cleanup_transaction(t, "put_user failed",
5096 BR_FAILED_REPLY);
5097
5098 return -EFAULT;
5099 }
5100 ptr += sizeof(uint32_t);
5101 if (copy_to_user(ptr, &tr, trsize)) {
5102 if (t_from)
5103 binder_thread_dec_tmpref(t_from);
5104
5105 binder_cleanup_transaction(t, "copy_to_user failed",
5106 BR_FAILED_REPLY);
5107
5108 return -EFAULT;
5109 }
5110 ptr += trsize;
5111
5112 trace_binder_transaction_received(t);
5113 binder_stat_br(proc, thread, cmd);
5114 binder_debug(BINDER_DEBUG_TRANSACTION,
5115 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5116 proc->pid, thread->pid,
5117 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5118 (cmd == BR_TRANSACTION_SEC_CTX) ?
5119 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5120 t->debug_id, t_from ? t_from->proc->pid : 0,
5121 t_from ? t_from->pid : 0, cmd,
5122 t->buffer->data_size, t->buffer->offsets_size);
5123
5124 if (t_from)
5125 binder_thread_dec_tmpref(t_from);
5126 t->buffer->allow_user_free = 1;
5127 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5128 binder_inner_proc_lock(thread->proc);
5129 t->to_parent = thread->transaction_stack;
5130 t->to_thread = thread;
5131 thread->transaction_stack = t;
5132 binder_inner_proc_unlock(thread->proc);
5133 } else {
5134 binder_free_transaction(t);
5135 }
5136 break;
5137 }
5138
5139 done:
5140
5141 *consumed = ptr - buffer;
5142 binder_inner_proc_lock(proc);
5143 if (proc->requested_threads == 0 &&
5144 list_empty(&thread->proc->waiting_threads) &&
5145 proc->requested_threads_started < proc->max_threads &&
5146 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5147 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5148 /*spawn a new thread if we leave this out */) {
5149 proc->requested_threads++;
5150 binder_inner_proc_unlock(proc);
5151 binder_debug(BINDER_DEBUG_THREADS,
5152 "%d:%d BR_SPAWN_LOOPER\n",
5153 proc->pid, thread->pid);
5154 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5155 return -EFAULT;
5156 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5157 } else
5158 binder_inner_proc_unlock(proc);
5159 return 0;
5160 }
5161
binder_release_work(struct binder_proc * proc,struct list_head * list)5162 static void binder_release_work(struct binder_proc *proc,
5163 struct list_head *list)
5164 {
5165 struct binder_work *w;
5166 enum binder_work_type wtype;
5167
5168 while (1) {
5169 binder_inner_proc_lock(proc);
5170 w = binder_dequeue_work_head_ilocked(list);
5171 wtype = w ? w->type : 0;
5172 binder_inner_proc_unlock(proc);
5173 if (!w)
5174 return;
5175
5176 switch (wtype) {
5177 case BINDER_WORK_TRANSACTION: {
5178 struct binder_transaction *t;
5179
5180 t = container_of(w, struct binder_transaction, work);
5181
5182 binder_cleanup_transaction(t, "process died.",
5183 BR_DEAD_REPLY);
5184 } break;
5185 case BINDER_WORK_RETURN_ERROR: {
5186 struct binder_error *e = container_of(
5187 w, struct binder_error, work);
5188
5189 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5190 "undelivered TRANSACTION_ERROR: %u\n",
5191 e->cmd);
5192 } break;
5193 case BINDER_WORK_TRANSACTION_PENDING:
5194 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5195 case BINDER_WORK_TRANSACTION_COMPLETE: {
5196 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5197 "undelivered TRANSACTION_COMPLETE\n");
5198 kfree(w);
5199 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5200 } break;
5201 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5202 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5203 struct binder_ref_death *death;
5204
5205 death = container_of(w, struct binder_ref_death, work);
5206 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5207 "undelivered death notification, %016llx\n",
5208 (u64)death->cookie);
5209 kfree(death);
5210 binder_stats_deleted(BINDER_STAT_DEATH);
5211 } break;
5212 case BINDER_WORK_NODE:
5213 break;
5214 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5215 struct binder_ref_freeze *freeze;
5216
5217 freeze = container_of(w, struct binder_ref_freeze, work);
5218 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5219 "undelivered freeze notification, %016llx\n",
5220 (u64)freeze->cookie);
5221 kfree(freeze);
5222 binder_stats_deleted(BINDER_STAT_FREEZE);
5223 } break;
5224 default:
5225 pr_err("unexpected work type, %d, not freed\n",
5226 wtype);
5227 break;
5228 }
5229 }
5230
5231 }
5232
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)5233 static struct binder_thread *binder_get_thread_ilocked(
5234 struct binder_proc *proc, struct binder_thread *new_thread)
5235 {
5236 struct binder_thread *thread = NULL;
5237 struct rb_node *parent = NULL;
5238 struct rb_node **p = &proc->threads.rb_node;
5239
5240 while (*p) {
5241 parent = *p;
5242 thread = rb_entry(parent, struct binder_thread, rb_node);
5243
5244 if (current->pid < thread->pid)
5245 p = &(*p)->rb_left;
5246 else if (current->pid > thread->pid)
5247 p = &(*p)->rb_right;
5248 else
5249 return thread;
5250 }
5251 if (!new_thread)
5252 return NULL;
5253 thread = new_thread;
5254 binder_stats_created(BINDER_STAT_THREAD);
5255 thread->proc = proc;
5256 thread->pid = current->pid;
5257 atomic_set(&thread->tmp_ref, 0);
5258 init_waitqueue_head(&thread->wait);
5259 INIT_LIST_HEAD(&thread->todo);
5260 rb_link_node(&thread->rb_node, parent, p);
5261 rb_insert_color(&thread->rb_node, &proc->threads);
5262 thread->looper_need_return = true;
5263 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5264 thread->return_error.cmd = BR_OK;
5265 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5266 thread->reply_error.cmd = BR_OK;
5267 thread->ee.command = BR_OK;
5268 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5269 return thread;
5270 }
5271
binder_get_thread(struct binder_proc * proc)5272 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5273 {
5274 struct binder_thread *thread;
5275 struct binder_thread *new_thread;
5276
5277 binder_inner_proc_lock(proc);
5278 thread = binder_get_thread_ilocked(proc, NULL);
5279 binder_inner_proc_unlock(proc);
5280 if (!thread) {
5281 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5282 if (new_thread == NULL)
5283 return NULL;
5284 binder_inner_proc_lock(proc);
5285 thread = binder_get_thread_ilocked(proc, new_thread);
5286 binder_inner_proc_unlock(proc);
5287 if (thread != new_thread)
5288 kfree(new_thread);
5289 }
5290 return thread;
5291 }
5292
binder_free_proc(struct binder_proc * proc)5293 static void binder_free_proc(struct binder_proc *proc)
5294 {
5295 struct binder_device *device;
5296
5297 BUG_ON(!list_empty(&proc->todo));
5298 BUG_ON(!list_empty(&proc->delivered_death));
5299 if (proc->outstanding_txns)
5300 pr_warn("%s: Unexpected outstanding_txns %d\n",
5301 __func__, proc->outstanding_txns);
5302 device = container_of(proc->context, struct binder_device, context);
5303 if (refcount_dec_and_test(&device->ref)) {
5304 binder_remove_device(device);
5305 kfree(proc->context->name);
5306 kfree(device);
5307 }
5308 binder_alloc_deferred_release(&proc->alloc);
5309 put_task_struct(proc->tsk);
5310 put_cred(proc->cred);
5311 binder_stats_deleted(BINDER_STAT_PROC);
5312 dbitmap_free(&proc->dmap);
5313 kfree(proc);
5314 }
5315
binder_free_thread(struct binder_thread * thread)5316 static void binder_free_thread(struct binder_thread *thread)
5317 {
5318 BUG_ON(!list_empty(&thread->todo));
5319 binder_stats_deleted(BINDER_STAT_THREAD);
5320 binder_proc_dec_tmpref(thread->proc);
5321 kfree(thread);
5322 }
5323
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5324 static int binder_thread_release(struct binder_proc *proc,
5325 struct binder_thread *thread)
5326 {
5327 struct binder_transaction *t;
5328 struct binder_transaction *send_reply = NULL;
5329 int active_transactions = 0;
5330 struct binder_transaction *last_t = NULL;
5331
5332 binder_inner_proc_lock(thread->proc);
5333 /*
5334 * take a ref on the proc so it survives
5335 * after we remove this thread from proc->threads.
5336 * The corresponding dec is when we actually
5337 * free the thread in binder_free_thread()
5338 */
5339 proc->tmp_ref++;
5340 /*
5341 * take a ref on this thread to ensure it
5342 * survives while we are releasing it
5343 */
5344 atomic_inc(&thread->tmp_ref);
5345 rb_erase(&thread->rb_node, &proc->threads);
5346 t = thread->transaction_stack;
5347 if (t) {
5348 spin_lock(&t->lock);
5349 if (t->to_thread == thread)
5350 send_reply = t;
5351 } else {
5352 __acquire(&t->lock);
5353 }
5354 thread->is_dead = true;
5355
5356 while (t) {
5357 last_t = t;
5358 active_transactions++;
5359 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5360 "release %d:%d transaction %d %s, still active\n",
5361 proc->pid, thread->pid,
5362 t->debug_id,
5363 (t->to_thread == thread) ? "in" : "out");
5364
5365 if (t->to_thread == thread) {
5366 thread->proc->outstanding_txns--;
5367 t->to_proc = NULL;
5368 t->to_thread = NULL;
5369 if (t->buffer) {
5370 t->buffer->transaction = NULL;
5371 t->buffer = NULL;
5372 }
5373 t = t->to_parent;
5374 } else if (t->from == thread) {
5375 t->from = NULL;
5376 t = t->from_parent;
5377 } else
5378 BUG();
5379 spin_unlock(&last_t->lock);
5380 if (t)
5381 spin_lock(&t->lock);
5382 else
5383 __acquire(&t->lock);
5384 }
5385 /* annotation for sparse, lock not acquired in last iteration above */
5386 __release(&t->lock);
5387
5388 /*
5389 * If this thread used poll, make sure we remove the waitqueue from any
5390 * poll data structures holding it.
5391 */
5392 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5393 wake_up_pollfree(&thread->wait);
5394
5395 binder_inner_proc_unlock(thread->proc);
5396
5397 /*
5398 * This is needed to avoid races between wake_up_pollfree() above and
5399 * someone else removing the last entry from the queue for other reasons
5400 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5401 * descriptor being closed). Such other users hold an RCU read lock, so
5402 * we can be sure they're done after we call synchronize_rcu().
5403 */
5404 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5405 synchronize_rcu();
5406
5407 if (send_reply)
5408 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5409 binder_release_work(proc, &thread->todo);
5410 binder_thread_dec_tmpref(thread);
5411 return active_transactions;
5412 }
5413
binder_poll(struct file * filp,struct poll_table_struct * wait)5414 static __poll_t binder_poll(struct file *filp,
5415 struct poll_table_struct *wait)
5416 {
5417 struct binder_proc *proc = filp->private_data;
5418 struct binder_thread *thread = NULL;
5419 bool wait_for_proc_work;
5420
5421 thread = binder_get_thread(proc);
5422 if (!thread)
5423 return EPOLLERR;
5424
5425 binder_inner_proc_lock(thread->proc);
5426 thread->looper |= BINDER_LOOPER_STATE_POLL;
5427 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5428
5429 binder_inner_proc_unlock(thread->proc);
5430
5431 poll_wait(filp, &thread->wait, wait);
5432
5433 if (binder_has_work(thread, wait_for_proc_work))
5434 return EPOLLIN;
5435
5436 return 0;
5437 }
5438
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5439 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5440 struct binder_thread *thread)
5441 {
5442 int ret = 0;
5443 struct binder_proc *proc = filp->private_data;
5444 void __user *ubuf = (void __user *)arg;
5445 struct binder_write_read bwr;
5446
5447 if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5448 return -EFAULT;
5449
5450 binder_debug(BINDER_DEBUG_READ_WRITE,
5451 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5452 proc->pid, thread->pid,
5453 (u64)bwr.write_size, (u64)bwr.write_buffer,
5454 (u64)bwr.read_size, (u64)bwr.read_buffer);
5455
5456 if (bwr.write_size > 0) {
5457 ret = binder_thread_write(proc, thread,
5458 bwr.write_buffer,
5459 bwr.write_size,
5460 &bwr.write_consumed);
5461 trace_binder_write_done(ret);
5462 if (ret < 0) {
5463 bwr.read_consumed = 0;
5464 goto out;
5465 }
5466 }
5467 if (bwr.read_size > 0) {
5468 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5469 bwr.read_size,
5470 &bwr.read_consumed,
5471 filp->f_flags & O_NONBLOCK);
5472 trace_binder_read_done(ret);
5473 binder_inner_proc_lock(proc);
5474 if (!binder_worklist_empty_ilocked(&proc->todo))
5475 binder_wakeup_proc_ilocked(proc);
5476 binder_inner_proc_unlock(proc);
5477 if (ret < 0)
5478 goto out;
5479 }
5480 binder_debug(BINDER_DEBUG_READ_WRITE,
5481 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5482 proc->pid, thread->pid,
5483 (u64)bwr.write_consumed, (u64)bwr.write_size,
5484 (u64)bwr.read_consumed, (u64)bwr.read_size);
5485 out:
5486 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5487 ret = -EFAULT;
5488 return ret;
5489 }
5490
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5491 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5492 struct flat_binder_object *fbo)
5493 {
5494 int ret = 0;
5495 struct binder_proc *proc = filp->private_data;
5496 struct binder_context *context = proc->context;
5497 struct binder_node *new_node;
5498 kuid_t curr_euid = current_euid();
5499
5500 guard(mutex)(&context->context_mgr_node_lock);
5501 if (context->binder_context_mgr_node) {
5502 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5503 return -EBUSY;
5504 }
5505 ret = security_binder_set_context_mgr(proc->cred);
5506 if (ret < 0)
5507 return ret;
5508 if (uid_valid(context->binder_context_mgr_uid)) {
5509 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5510 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5511 from_kuid(&init_user_ns, curr_euid),
5512 from_kuid(&init_user_ns,
5513 context->binder_context_mgr_uid));
5514 return -EPERM;
5515 }
5516 } else {
5517 context->binder_context_mgr_uid = curr_euid;
5518 }
5519 new_node = binder_new_node(proc, fbo);
5520 if (!new_node)
5521 return -ENOMEM;
5522 binder_node_lock(new_node);
5523 new_node->local_weak_refs++;
5524 new_node->local_strong_refs++;
5525 new_node->has_strong_ref = 1;
5526 new_node->has_weak_ref = 1;
5527 context->binder_context_mgr_node = new_node;
5528 binder_node_unlock(new_node);
5529 binder_put_node(new_node);
5530 return ret;
5531 }
5532
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5533 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5534 struct binder_node_info_for_ref *info)
5535 {
5536 struct binder_node *node;
5537 struct binder_context *context = proc->context;
5538 __u32 handle = info->handle;
5539
5540 if (info->strong_count || info->weak_count || info->reserved1 ||
5541 info->reserved2 || info->reserved3) {
5542 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5543 proc->pid);
5544 return -EINVAL;
5545 }
5546
5547 /* This ioctl may only be used by the context manager */
5548 mutex_lock(&context->context_mgr_node_lock);
5549 if (!context->binder_context_mgr_node ||
5550 context->binder_context_mgr_node->proc != proc) {
5551 mutex_unlock(&context->context_mgr_node_lock);
5552 return -EPERM;
5553 }
5554 mutex_unlock(&context->context_mgr_node_lock);
5555
5556 node = binder_get_node_from_ref(proc, handle, true, NULL);
5557 if (!node)
5558 return -EINVAL;
5559
5560 info->strong_count = node->local_strong_refs +
5561 node->internal_strong_refs;
5562 info->weak_count = node->local_weak_refs;
5563
5564 binder_put_node(node);
5565
5566 return 0;
5567 }
5568
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5569 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5570 struct binder_node_debug_info *info)
5571 {
5572 struct rb_node *n;
5573 binder_uintptr_t ptr = info->ptr;
5574
5575 memset(info, 0, sizeof(*info));
5576
5577 binder_inner_proc_lock(proc);
5578 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5579 struct binder_node *node = rb_entry(n, struct binder_node,
5580 rb_node);
5581 if (node->ptr > ptr) {
5582 info->ptr = node->ptr;
5583 info->cookie = node->cookie;
5584 info->has_strong_ref = node->has_strong_ref;
5585 info->has_weak_ref = node->has_weak_ref;
5586 break;
5587 }
5588 }
5589 binder_inner_proc_unlock(proc);
5590
5591 return 0;
5592 }
5593
binder_txns_pending_ilocked(struct binder_proc * proc)5594 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5595 {
5596 struct rb_node *n;
5597 struct binder_thread *thread;
5598
5599 if (proc->outstanding_txns > 0)
5600 return true;
5601
5602 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5603 thread = rb_entry(n, struct binder_thread, rb_node);
5604 if (thread->transaction_stack)
5605 return true;
5606 }
5607 return false;
5608 }
5609
binder_add_freeze_work(struct binder_proc * proc,bool is_frozen)5610 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5611 {
5612 struct binder_node *prev = NULL;
5613 struct rb_node *n;
5614 struct binder_ref *ref;
5615
5616 binder_inner_proc_lock(proc);
5617 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5618 struct binder_node *node;
5619
5620 node = rb_entry(n, struct binder_node, rb_node);
5621 binder_inc_node_tmpref_ilocked(node);
5622 binder_inner_proc_unlock(proc);
5623 if (prev)
5624 binder_put_node(prev);
5625 binder_node_lock(node);
5626 hlist_for_each_entry(ref, &node->refs, node_entry) {
5627 /*
5628 * Need the node lock to synchronize
5629 * with new notification requests and the
5630 * inner lock to synchronize with queued
5631 * freeze notifications.
5632 */
5633 binder_inner_proc_lock(ref->proc);
5634 if (!ref->freeze) {
5635 binder_inner_proc_unlock(ref->proc);
5636 continue;
5637 }
5638 ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5639 if (list_empty(&ref->freeze->work.entry)) {
5640 ref->freeze->is_frozen = is_frozen;
5641 binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5642 binder_wakeup_proc_ilocked(ref->proc);
5643 } else {
5644 if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5645 ref->freeze->resend = true;
5646 ref->freeze->is_frozen = is_frozen;
5647 }
5648 binder_inner_proc_unlock(ref->proc);
5649 }
5650 prev = node;
5651 binder_node_unlock(node);
5652 binder_inner_proc_lock(proc);
5653 if (proc->is_dead)
5654 break;
5655 }
5656 binder_inner_proc_unlock(proc);
5657 if (prev)
5658 binder_put_node(prev);
5659 }
5660
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5661 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5662 struct binder_proc *target_proc)
5663 {
5664 int ret = 0;
5665
5666 if (!info->enable) {
5667 binder_inner_proc_lock(target_proc);
5668 target_proc->sync_recv = false;
5669 target_proc->async_recv = false;
5670 target_proc->is_frozen = false;
5671 binder_inner_proc_unlock(target_proc);
5672 binder_add_freeze_work(target_proc, false);
5673 return 0;
5674 }
5675
5676 /*
5677 * Freezing the target. Prevent new transactions by
5678 * setting frozen state. If timeout specified, wait
5679 * for transactions to drain.
5680 */
5681 binder_inner_proc_lock(target_proc);
5682 target_proc->sync_recv = false;
5683 target_proc->async_recv = false;
5684 target_proc->is_frozen = true;
5685 binder_inner_proc_unlock(target_proc);
5686
5687 if (info->timeout_ms > 0)
5688 ret = wait_event_interruptible_timeout(
5689 target_proc->freeze_wait,
5690 (!target_proc->outstanding_txns),
5691 msecs_to_jiffies(info->timeout_ms));
5692
5693 /* Check pending transactions that wait for reply */
5694 if (ret >= 0) {
5695 binder_inner_proc_lock(target_proc);
5696 if (binder_txns_pending_ilocked(target_proc))
5697 ret = -EAGAIN;
5698 binder_inner_proc_unlock(target_proc);
5699 }
5700
5701 if (ret < 0) {
5702 binder_inner_proc_lock(target_proc);
5703 target_proc->is_frozen = false;
5704 binder_inner_proc_unlock(target_proc);
5705 } else {
5706 binder_add_freeze_work(target_proc, true);
5707 }
5708
5709 return ret;
5710 }
5711
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5712 static int binder_ioctl_get_freezer_info(
5713 struct binder_frozen_status_info *info)
5714 {
5715 struct binder_proc *target_proc;
5716 bool found = false;
5717 __u32 txns_pending;
5718
5719 info->sync_recv = 0;
5720 info->async_recv = 0;
5721
5722 mutex_lock(&binder_procs_lock);
5723 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5724 if (target_proc->pid == info->pid) {
5725 found = true;
5726 binder_inner_proc_lock(target_proc);
5727 txns_pending = binder_txns_pending_ilocked(target_proc);
5728 info->sync_recv |= target_proc->sync_recv |
5729 (txns_pending << 1);
5730 info->async_recv |= target_proc->async_recv;
5731 binder_inner_proc_unlock(target_proc);
5732 }
5733 }
5734 mutex_unlock(&binder_procs_lock);
5735
5736 if (!found)
5737 return -EINVAL;
5738
5739 return 0;
5740 }
5741
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5742 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5743 void __user *ubuf)
5744 {
5745 struct binder_extended_error ee;
5746
5747 binder_inner_proc_lock(thread->proc);
5748 ee = thread->ee;
5749 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5750 binder_inner_proc_unlock(thread->proc);
5751
5752 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5753 return -EFAULT;
5754
5755 return 0;
5756 }
5757
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5758 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5759 {
5760 int ret;
5761 struct binder_proc *proc = filp->private_data;
5762 struct binder_thread *thread;
5763 void __user *ubuf = (void __user *)arg;
5764
5765 trace_binder_ioctl(cmd, arg);
5766
5767 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5768 if (ret)
5769 goto err_unlocked;
5770
5771 thread = binder_get_thread(proc);
5772 if (thread == NULL) {
5773 ret = -ENOMEM;
5774 goto err;
5775 }
5776
5777 switch (cmd) {
5778 case BINDER_WRITE_READ:
5779 ret = binder_ioctl_write_read(filp, arg, thread);
5780 if (ret)
5781 goto err;
5782 break;
5783 case BINDER_SET_MAX_THREADS: {
5784 u32 max_threads;
5785
5786 if (copy_from_user(&max_threads, ubuf,
5787 sizeof(max_threads))) {
5788 ret = -EINVAL;
5789 goto err;
5790 }
5791 binder_inner_proc_lock(proc);
5792 proc->max_threads = max_threads;
5793 binder_inner_proc_unlock(proc);
5794 break;
5795 }
5796 case BINDER_SET_CONTEXT_MGR_EXT: {
5797 struct flat_binder_object fbo;
5798
5799 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5800 ret = -EINVAL;
5801 goto err;
5802 }
5803 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5804 if (ret)
5805 goto err;
5806 break;
5807 }
5808 case BINDER_SET_CONTEXT_MGR:
5809 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5810 if (ret)
5811 goto err;
5812 break;
5813 case BINDER_THREAD_EXIT:
5814 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5815 proc->pid, thread->pid);
5816 binder_thread_release(proc, thread);
5817 thread = NULL;
5818 break;
5819 case BINDER_VERSION: {
5820 struct binder_version __user *ver = ubuf;
5821
5822 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5823 &ver->protocol_version)) {
5824 ret = -EINVAL;
5825 goto err;
5826 }
5827 break;
5828 }
5829 case BINDER_GET_NODE_INFO_FOR_REF: {
5830 struct binder_node_info_for_ref info;
5831
5832 if (copy_from_user(&info, ubuf, sizeof(info))) {
5833 ret = -EFAULT;
5834 goto err;
5835 }
5836
5837 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5838 if (ret < 0)
5839 goto err;
5840
5841 if (copy_to_user(ubuf, &info, sizeof(info))) {
5842 ret = -EFAULT;
5843 goto err;
5844 }
5845
5846 break;
5847 }
5848 case BINDER_GET_NODE_DEBUG_INFO: {
5849 struct binder_node_debug_info info;
5850
5851 if (copy_from_user(&info, ubuf, sizeof(info))) {
5852 ret = -EFAULT;
5853 goto err;
5854 }
5855
5856 ret = binder_ioctl_get_node_debug_info(proc, &info);
5857 if (ret < 0)
5858 goto err;
5859
5860 if (copy_to_user(ubuf, &info, sizeof(info))) {
5861 ret = -EFAULT;
5862 goto err;
5863 }
5864 break;
5865 }
5866 case BINDER_FREEZE: {
5867 struct binder_freeze_info info;
5868 struct binder_proc **target_procs = NULL, *target_proc;
5869 int target_procs_count = 0, i = 0;
5870
5871 ret = 0;
5872
5873 if (copy_from_user(&info, ubuf, sizeof(info))) {
5874 ret = -EFAULT;
5875 goto err;
5876 }
5877
5878 mutex_lock(&binder_procs_lock);
5879 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5880 if (target_proc->pid == info.pid)
5881 target_procs_count++;
5882 }
5883
5884 if (target_procs_count == 0) {
5885 mutex_unlock(&binder_procs_lock);
5886 ret = -EINVAL;
5887 goto err;
5888 }
5889
5890 target_procs = kcalloc(target_procs_count,
5891 sizeof(struct binder_proc *),
5892 GFP_KERNEL);
5893
5894 if (!target_procs) {
5895 mutex_unlock(&binder_procs_lock);
5896 ret = -ENOMEM;
5897 goto err;
5898 }
5899
5900 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5901 if (target_proc->pid != info.pid)
5902 continue;
5903
5904 binder_inner_proc_lock(target_proc);
5905 target_proc->tmp_ref++;
5906 binder_inner_proc_unlock(target_proc);
5907
5908 target_procs[i++] = target_proc;
5909 }
5910 mutex_unlock(&binder_procs_lock);
5911
5912 for (i = 0; i < target_procs_count; i++) {
5913 if (ret >= 0)
5914 ret = binder_ioctl_freeze(&info,
5915 target_procs[i]);
5916
5917 binder_proc_dec_tmpref(target_procs[i]);
5918 }
5919
5920 kfree(target_procs);
5921
5922 if (ret < 0)
5923 goto err;
5924 break;
5925 }
5926 case BINDER_GET_FROZEN_INFO: {
5927 struct binder_frozen_status_info info;
5928
5929 if (copy_from_user(&info, ubuf, sizeof(info))) {
5930 ret = -EFAULT;
5931 goto err;
5932 }
5933
5934 ret = binder_ioctl_get_freezer_info(&info);
5935 if (ret < 0)
5936 goto err;
5937
5938 if (copy_to_user(ubuf, &info, sizeof(info))) {
5939 ret = -EFAULT;
5940 goto err;
5941 }
5942 break;
5943 }
5944 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5945 uint32_t enable;
5946
5947 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5948 ret = -EFAULT;
5949 goto err;
5950 }
5951 binder_inner_proc_lock(proc);
5952 proc->oneway_spam_detection_enabled = (bool)enable;
5953 binder_inner_proc_unlock(proc);
5954 break;
5955 }
5956 case BINDER_GET_EXTENDED_ERROR:
5957 ret = binder_ioctl_get_extended_error(thread, ubuf);
5958 if (ret < 0)
5959 goto err;
5960 break;
5961 default:
5962 ret = -EINVAL;
5963 goto err;
5964 }
5965 ret = 0;
5966 err:
5967 if (thread)
5968 thread->looper_need_return = false;
5969 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5970 if (ret && ret != -EINTR)
5971 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5972 err_unlocked:
5973 trace_binder_ioctl_done(ret);
5974 return ret;
5975 }
5976
binder_vma_open(struct vm_area_struct * vma)5977 static void binder_vma_open(struct vm_area_struct *vma)
5978 {
5979 struct binder_proc *proc = vma->vm_private_data;
5980
5981 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5982 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5983 proc->pid, vma->vm_start, vma->vm_end,
5984 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5985 (unsigned long)pgprot_val(vma->vm_page_prot));
5986 }
5987
binder_vma_close(struct vm_area_struct * vma)5988 static void binder_vma_close(struct vm_area_struct *vma)
5989 {
5990 struct binder_proc *proc = vma->vm_private_data;
5991
5992 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5993 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5994 proc->pid, vma->vm_start, vma->vm_end,
5995 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5996 (unsigned long)pgprot_val(vma->vm_page_prot));
5997 binder_alloc_vma_close(&proc->alloc);
5998 }
5999
binder_vm_fault(struct vm_fault * vmf)6000 VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
6001 {
6002 return VM_FAULT_SIGBUS;
6003 }
6004 EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
6005
6006 static const struct vm_operations_struct binder_vm_ops = {
6007 .open = binder_vma_open,
6008 .close = binder_vma_close,
6009 .fault = binder_vm_fault,
6010 };
6011
binder_mmap(struct file * filp,struct vm_area_struct * vma)6012 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
6013 {
6014 struct binder_proc *proc = filp->private_data;
6015
6016 if (proc->tsk != current->group_leader)
6017 return -EINVAL;
6018
6019 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6020 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
6021 __func__, proc->pid, vma->vm_start, vma->vm_end,
6022 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6023 (unsigned long)pgprot_val(vma->vm_page_prot));
6024
6025 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
6026 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
6027 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
6028 return -EPERM;
6029 }
6030 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
6031
6032 vma->vm_ops = &binder_vm_ops;
6033 vma->vm_private_data = proc;
6034
6035 return binder_alloc_mmap_handler(&proc->alloc, vma);
6036 }
6037
binder_open(struct inode * nodp,struct file * filp)6038 static int binder_open(struct inode *nodp, struct file *filp)
6039 {
6040 struct binder_proc *proc, *itr;
6041 struct binder_device *binder_dev;
6042 struct binderfs_info *info;
6043 struct dentry *binder_binderfs_dir_entry_proc = NULL;
6044 bool existing_pid = false;
6045
6046 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6047 current->group_leader->pid, current->pid);
6048
6049 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6050 if (proc == NULL)
6051 return -ENOMEM;
6052
6053 dbitmap_init(&proc->dmap);
6054 spin_lock_init(&proc->inner_lock);
6055 spin_lock_init(&proc->outer_lock);
6056 get_task_struct(current->group_leader);
6057 proc->tsk = current->group_leader;
6058 proc->cred = get_cred(filp->f_cred);
6059 INIT_LIST_HEAD(&proc->todo);
6060 init_waitqueue_head(&proc->freeze_wait);
6061 proc->default_priority = task_nice(current);
6062 /* binderfs stashes devices in i_private */
6063 if (is_binderfs_device(nodp)) {
6064 binder_dev = nodp->i_private;
6065 info = nodp->i_sb->s_fs_info;
6066 binder_binderfs_dir_entry_proc = info->proc_log_dir;
6067 } else {
6068 binder_dev = container_of(filp->private_data,
6069 struct binder_device, miscdev);
6070 }
6071 refcount_inc(&binder_dev->ref);
6072 proc->context = &binder_dev->context;
6073 binder_alloc_init(&proc->alloc);
6074
6075 binder_stats_created(BINDER_STAT_PROC);
6076 proc->pid = current->group_leader->pid;
6077 INIT_LIST_HEAD(&proc->delivered_death);
6078 INIT_LIST_HEAD(&proc->delivered_freeze);
6079 INIT_LIST_HEAD(&proc->waiting_threads);
6080 filp->private_data = proc;
6081
6082 mutex_lock(&binder_procs_lock);
6083 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6084 if (itr->pid == proc->pid) {
6085 existing_pid = true;
6086 break;
6087 }
6088 }
6089 hlist_add_head(&proc->proc_node, &binder_procs);
6090 mutex_unlock(&binder_procs_lock);
6091
6092 if (binder_debugfs_dir_entry_proc && !existing_pid) {
6093 char strbuf[11];
6094
6095 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6096 /*
6097 * proc debug entries are shared between contexts.
6098 * Only create for the first PID to avoid debugfs log spamming
6099 * The printing code will anyway print all contexts for a given
6100 * PID so this is not a problem.
6101 */
6102 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6103 binder_debugfs_dir_entry_proc,
6104 (void *)(unsigned long)proc->pid,
6105 &proc_fops);
6106 }
6107
6108 if (binder_binderfs_dir_entry_proc && !existing_pid) {
6109 char strbuf[11];
6110 struct dentry *binderfs_entry;
6111
6112 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6113 /*
6114 * Similar to debugfs, the process specific log file is shared
6115 * between contexts. Only create for the first PID.
6116 * This is ok since same as debugfs, the log file will contain
6117 * information on all contexts of a given PID.
6118 */
6119 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6120 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6121 if (!IS_ERR(binderfs_entry)) {
6122 proc->binderfs_entry = binderfs_entry;
6123 } else {
6124 int error;
6125
6126 error = PTR_ERR(binderfs_entry);
6127 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6128 strbuf, error);
6129 }
6130 }
6131
6132 return 0;
6133 }
6134
binder_flush(struct file * filp,fl_owner_t id)6135 static int binder_flush(struct file *filp, fl_owner_t id)
6136 {
6137 struct binder_proc *proc = filp->private_data;
6138
6139 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6140
6141 return 0;
6142 }
6143
binder_deferred_flush(struct binder_proc * proc)6144 static void binder_deferred_flush(struct binder_proc *proc)
6145 {
6146 struct rb_node *n;
6147 int wake_count = 0;
6148
6149 binder_inner_proc_lock(proc);
6150 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6151 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6152
6153 thread->looper_need_return = true;
6154 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6155 wake_up_interruptible(&thread->wait);
6156 wake_count++;
6157 }
6158 }
6159 binder_inner_proc_unlock(proc);
6160
6161 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6162 "binder_flush: %d woke %d threads\n", proc->pid,
6163 wake_count);
6164 }
6165
binder_release(struct inode * nodp,struct file * filp)6166 static int binder_release(struct inode *nodp, struct file *filp)
6167 {
6168 struct binder_proc *proc = filp->private_data;
6169
6170 debugfs_remove(proc->debugfs_entry);
6171
6172 if (proc->binderfs_entry) {
6173 simple_recursive_removal(proc->binderfs_entry, NULL);
6174 proc->binderfs_entry = NULL;
6175 }
6176
6177 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6178
6179 return 0;
6180 }
6181
binder_node_release(struct binder_node * node,int refs)6182 static int binder_node_release(struct binder_node *node, int refs)
6183 {
6184 struct binder_ref *ref;
6185 int death = 0;
6186 struct binder_proc *proc = node->proc;
6187
6188 binder_release_work(proc, &node->async_todo);
6189
6190 binder_node_lock(node);
6191 binder_inner_proc_lock(proc);
6192 binder_dequeue_work_ilocked(&node->work);
6193 /*
6194 * The caller must have taken a temporary ref on the node,
6195 */
6196 BUG_ON(!node->tmp_refs);
6197 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6198 binder_inner_proc_unlock(proc);
6199 binder_node_unlock(node);
6200 binder_free_node(node);
6201
6202 return refs;
6203 }
6204
6205 node->proc = NULL;
6206 node->local_strong_refs = 0;
6207 node->local_weak_refs = 0;
6208 binder_inner_proc_unlock(proc);
6209
6210 spin_lock(&binder_dead_nodes_lock);
6211 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6212 spin_unlock(&binder_dead_nodes_lock);
6213
6214 hlist_for_each_entry(ref, &node->refs, node_entry) {
6215 refs++;
6216 /*
6217 * Need the node lock to synchronize
6218 * with new notification requests and the
6219 * inner lock to synchronize with queued
6220 * death notifications.
6221 */
6222 binder_inner_proc_lock(ref->proc);
6223 if (!ref->death) {
6224 binder_inner_proc_unlock(ref->proc);
6225 continue;
6226 }
6227
6228 death++;
6229
6230 BUG_ON(!list_empty(&ref->death->work.entry));
6231 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6232 binder_enqueue_work_ilocked(&ref->death->work,
6233 &ref->proc->todo);
6234 binder_wakeup_proc_ilocked(ref->proc);
6235 binder_inner_proc_unlock(ref->proc);
6236 }
6237
6238 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6239 "node %d now dead, refs %d, death %d\n",
6240 node->debug_id, refs, death);
6241 binder_node_unlock(node);
6242 binder_put_node(node);
6243
6244 return refs;
6245 }
6246
binder_deferred_release(struct binder_proc * proc)6247 static void binder_deferred_release(struct binder_proc *proc)
6248 {
6249 struct binder_context *context = proc->context;
6250 struct rb_node *n;
6251 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6252
6253 mutex_lock(&binder_procs_lock);
6254 hlist_del(&proc->proc_node);
6255 mutex_unlock(&binder_procs_lock);
6256
6257 mutex_lock(&context->context_mgr_node_lock);
6258 if (context->binder_context_mgr_node &&
6259 context->binder_context_mgr_node->proc == proc) {
6260 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6261 "%s: %d context_mgr_node gone\n",
6262 __func__, proc->pid);
6263 context->binder_context_mgr_node = NULL;
6264 }
6265 mutex_unlock(&context->context_mgr_node_lock);
6266 binder_inner_proc_lock(proc);
6267 /*
6268 * Make sure proc stays alive after we
6269 * remove all the threads
6270 */
6271 proc->tmp_ref++;
6272
6273 proc->is_dead = true;
6274 proc->is_frozen = false;
6275 proc->sync_recv = false;
6276 proc->async_recv = false;
6277 threads = 0;
6278 active_transactions = 0;
6279 while ((n = rb_first(&proc->threads))) {
6280 struct binder_thread *thread;
6281
6282 thread = rb_entry(n, struct binder_thread, rb_node);
6283 binder_inner_proc_unlock(proc);
6284 threads++;
6285 active_transactions += binder_thread_release(proc, thread);
6286 binder_inner_proc_lock(proc);
6287 }
6288
6289 nodes = 0;
6290 incoming_refs = 0;
6291 while ((n = rb_first(&proc->nodes))) {
6292 struct binder_node *node;
6293
6294 node = rb_entry(n, struct binder_node, rb_node);
6295 nodes++;
6296 /*
6297 * take a temporary ref on the node before
6298 * calling binder_node_release() which will either
6299 * kfree() the node or call binder_put_node()
6300 */
6301 binder_inc_node_tmpref_ilocked(node);
6302 rb_erase(&node->rb_node, &proc->nodes);
6303 binder_inner_proc_unlock(proc);
6304 incoming_refs = binder_node_release(node, incoming_refs);
6305 binder_inner_proc_lock(proc);
6306 }
6307 binder_inner_proc_unlock(proc);
6308
6309 outgoing_refs = 0;
6310 binder_proc_lock(proc);
6311 while ((n = rb_first(&proc->refs_by_desc))) {
6312 struct binder_ref *ref;
6313
6314 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6315 outgoing_refs++;
6316 binder_cleanup_ref_olocked(ref);
6317 binder_proc_unlock(proc);
6318 binder_free_ref(ref);
6319 binder_proc_lock(proc);
6320 }
6321 binder_proc_unlock(proc);
6322
6323 binder_release_work(proc, &proc->todo);
6324 binder_release_work(proc, &proc->delivered_death);
6325 binder_release_work(proc, &proc->delivered_freeze);
6326
6327 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6328 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6329 __func__, proc->pid, threads, nodes, incoming_refs,
6330 outgoing_refs, active_transactions);
6331
6332 binder_proc_dec_tmpref(proc);
6333 }
6334
binder_deferred_func(struct work_struct * work)6335 static void binder_deferred_func(struct work_struct *work)
6336 {
6337 struct binder_proc *proc;
6338
6339 int defer;
6340
6341 do {
6342 mutex_lock(&binder_deferred_lock);
6343 if (!hlist_empty(&binder_deferred_list)) {
6344 proc = hlist_entry(binder_deferred_list.first,
6345 struct binder_proc, deferred_work_node);
6346 hlist_del_init(&proc->deferred_work_node);
6347 defer = proc->deferred_work;
6348 proc->deferred_work = 0;
6349 } else {
6350 proc = NULL;
6351 defer = 0;
6352 }
6353 mutex_unlock(&binder_deferred_lock);
6354
6355 if (defer & BINDER_DEFERRED_FLUSH)
6356 binder_deferred_flush(proc);
6357
6358 if (defer & BINDER_DEFERRED_RELEASE)
6359 binder_deferred_release(proc); /* frees proc */
6360 } while (proc);
6361 }
6362 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6363
6364 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6365 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6366 {
6367 guard(mutex)(&binder_deferred_lock);
6368 proc->deferred_work |= defer;
6369 if (hlist_unhashed(&proc->deferred_work_node)) {
6370 hlist_add_head(&proc->deferred_work_node,
6371 &binder_deferred_list);
6372 schedule_work(&binder_deferred_work);
6373 }
6374 }
6375
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6376 static void print_binder_transaction_ilocked(struct seq_file *m,
6377 struct binder_proc *proc,
6378 const char *prefix,
6379 struct binder_transaction *t)
6380 {
6381 struct binder_proc *to_proc;
6382 struct binder_buffer *buffer = t->buffer;
6383 ktime_t current_time = ktime_get();
6384
6385 spin_lock(&t->lock);
6386 to_proc = t->to_proc;
6387 seq_printf(m,
6388 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
6389 prefix, t->debug_id, t,
6390 t->from_pid,
6391 t->from_tid,
6392 to_proc ? to_proc->pid : 0,
6393 t->to_thread ? t->to_thread->pid : 0,
6394 t->code, t->flags, t->priority, t->is_async, t->is_reply,
6395 ktime_ms_delta(current_time, t->start_time));
6396 spin_unlock(&t->lock);
6397
6398 if (proc != to_proc) {
6399 /*
6400 * Can only safely deref buffer if we are holding the
6401 * correct proc inner lock for this node
6402 */
6403 seq_puts(m, "\n");
6404 return;
6405 }
6406
6407 if (buffer == NULL) {
6408 seq_puts(m, " buffer free\n");
6409 return;
6410 }
6411 if (buffer->target_node)
6412 seq_printf(m, " node %d", buffer->target_node->debug_id);
6413 seq_printf(m, " size %zd:%zd offset %lx\n",
6414 buffer->data_size, buffer->offsets_size,
6415 buffer->user_data - proc->alloc.vm_start);
6416 }
6417
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w,bool hash_ptrs)6418 static void print_binder_work_ilocked(struct seq_file *m,
6419 struct binder_proc *proc,
6420 const char *prefix,
6421 const char *transaction_prefix,
6422 struct binder_work *w, bool hash_ptrs)
6423 {
6424 struct binder_node *node;
6425 struct binder_transaction *t;
6426
6427 switch (w->type) {
6428 case BINDER_WORK_TRANSACTION:
6429 t = container_of(w, struct binder_transaction, work);
6430 print_binder_transaction_ilocked(
6431 m, proc, transaction_prefix, t);
6432 break;
6433 case BINDER_WORK_RETURN_ERROR: {
6434 struct binder_error *e = container_of(
6435 w, struct binder_error, work);
6436
6437 seq_printf(m, "%stransaction error: %u\n",
6438 prefix, e->cmd);
6439 } break;
6440 case BINDER_WORK_TRANSACTION_COMPLETE:
6441 seq_printf(m, "%stransaction complete\n", prefix);
6442 break;
6443 case BINDER_WORK_NODE:
6444 node = container_of(w, struct binder_node, work);
6445 if (hash_ptrs)
6446 seq_printf(m, "%snode work %d: u%p c%p\n",
6447 prefix, node->debug_id,
6448 (void *)(long)node->ptr,
6449 (void *)(long)node->cookie);
6450 else
6451 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6452 prefix, node->debug_id,
6453 (u64)node->ptr, (u64)node->cookie);
6454 break;
6455 case BINDER_WORK_DEAD_BINDER:
6456 seq_printf(m, "%shas dead binder\n", prefix);
6457 break;
6458 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6459 seq_printf(m, "%shas cleared dead binder\n", prefix);
6460 break;
6461 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6462 seq_printf(m, "%shas cleared death notification\n", prefix);
6463 break;
6464 case BINDER_WORK_FROZEN_BINDER:
6465 seq_printf(m, "%shas frozen binder\n", prefix);
6466 break;
6467 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6468 seq_printf(m, "%shas cleared freeze notification\n", prefix);
6469 break;
6470 default:
6471 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6472 break;
6473 }
6474 }
6475
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,bool print_always,bool hash_ptrs)6476 static void print_binder_thread_ilocked(struct seq_file *m,
6477 struct binder_thread *thread,
6478 bool print_always, bool hash_ptrs)
6479 {
6480 struct binder_transaction *t;
6481 struct binder_work *w;
6482 size_t start_pos = m->count;
6483 size_t header_pos;
6484
6485 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6486 thread->pid, thread->looper,
6487 thread->looper_need_return,
6488 atomic_read(&thread->tmp_ref));
6489 header_pos = m->count;
6490 t = thread->transaction_stack;
6491 while (t) {
6492 if (t->from == thread) {
6493 print_binder_transaction_ilocked(m, thread->proc,
6494 " outgoing transaction", t);
6495 t = t->from_parent;
6496 } else if (t->to_thread == thread) {
6497 print_binder_transaction_ilocked(m, thread->proc,
6498 " incoming transaction", t);
6499 t = t->to_parent;
6500 } else {
6501 print_binder_transaction_ilocked(m, thread->proc,
6502 " bad transaction", t);
6503 t = NULL;
6504 }
6505 }
6506 list_for_each_entry(w, &thread->todo, entry) {
6507 print_binder_work_ilocked(m, thread->proc, " ",
6508 " pending transaction",
6509 w, hash_ptrs);
6510 }
6511 if (!print_always && m->count == header_pos)
6512 m->count = start_pos;
6513 }
6514
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node,bool hash_ptrs)6515 static void print_binder_node_nilocked(struct seq_file *m,
6516 struct binder_node *node,
6517 bool hash_ptrs)
6518 {
6519 struct binder_ref *ref;
6520 struct binder_work *w;
6521 int count;
6522
6523 count = hlist_count_nodes(&node->refs);
6524
6525 if (hash_ptrs)
6526 seq_printf(m, " node %d: u%p c%p", node->debug_id,
6527 (void *)(long)node->ptr, (void *)(long)node->cookie);
6528 else
6529 seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
6530 (u64)node->ptr, (u64)node->cookie);
6531 seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6532 node->has_strong_ref, node->has_weak_ref,
6533 node->local_strong_refs, node->local_weak_refs,
6534 node->internal_strong_refs, count, node->tmp_refs);
6535 if (count) {
6536 seq_puts(m, " proc");
6537 hlist_for_each_entry(ref, &node->refs, node_entry)
6538 seq_printf(m, " %d", ref->proc->pid);
6539 }
6540 seq_puts(m, "\n");
6541 if (node->proc) {
6542 list_for_each_entry(w, &node->async_todo, entry)
6543 print_binder_work_ilocked(m, node->proc, " ",
6544 " pending async transaction",
6545 w, hash_ptrs);
6546 }
6547 }
6548
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6549 static void print_binder_ref_olocked(struct seq_file *m,
6550 struct binder_ref *ref)
6551 {
6552 binder_node_lock(ref->node);
6553 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6554 ref->data.debug_id, ref->data.desc,
6555 ref->node->proc ? "" : "dead ",
6556 ref->node->debug_id, ref->data.strong,
6557 ref->data.weak, ref->death);
6558 binder_node_unlock(ref->node);
6559 }
6560
6561 /**
6562 * print_next_binder_node_ilocked() - Print binder_node from a locked list
6563 * @m: struct seq_file for output via seq_printf()
6564 * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
6565 * @node: struct binder_node to print fields of
6566 * @prev_node: struct binder_node we hold a temporary reference to (if any)
6567 * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
6568 *
6569 * Helper function to handle synchronization around printing a struct
6570 * binder_node while iterating through @proc->nodes or the dead nodes list.
6571 * Caller must hold either @proc->inner_lock (for live nodes) or
6572 * binder_dead_nodes_lock. This lock will be released during the body of this
6573 * function, but it will be reacquired before returning to the caller.
6574 *
6575 * Return: pointer to the struct binder_node we hold a tmpref on
6576 */
6577 static struct binder_node *
print_next_binder_node_ilocked(struct seq_file * m,struct binder_proc * proc,struct binder_node * node,struct binder_node * prev_node,bool hash_ptrs)6578 print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6579 struct binder_node *node,
6580 struct binder_node *prev_node, bool hash_ptrs)
6581 {
6582 /*
6583 * Take a temporary reference on the node so that isn't freed while
6584 * we print it.
6585 */
6586 binder_inc_node_tmpref_ilocked(node);
6587 /*
6588 * Live nodes need to drop the inner proc lock and dead nodes need to
6589 * drop the binder_dead_nodes_lock before trying to take the node lock.
6590 */
6591 if (proc)
6592 binder_inner_proc_unlock(proc);
6593 else
6594 spin_unlock(&binder_dead_nodes_lock);
6595 if (prev_node)
6596 binder_put_node(prev_node);
6597 binder_node_inner_lock(node);
6598 print_binder_node_nilocked(m, node, hash_ptrs);
6599 binder_node_inner_unlock(node);
6600 if (proc)
6601 binder_inner_proc_lock(proc);
6602 else
6603 spin_lock(&binder_dead_nodes_lock);
6604 return node;
6605 }
6606
print_binder_proc(struct seq_file * m,struct binder_proc * proc,bool print_all,bool hash_ptrs)6607 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6608 bool print_all, bool hash_ptrs)
6609 {
6610 struct binder_work *w;
6611 struct rb_node *n;
6612 size_t start_pos = m->count;
6613 size_t header_pos;
6614 struct binder_node *last_node = NULL;
6615
6616 seq_printf(m, "proc %d\n", proc->pid);
6617 seq_printf(m, "context %s\n", proc->context->name);
6618 header_pos = m->count;
6619
6620 binder_inner_proc_lock(proc);
6621 for (n = rb_first(&proc->threads); n; n = rb_next(n))
6622 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6623 rb_node), print_all, hash_ptrs);
6624
6625 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6626 struct binder_node *node = rb_entry(n, struct binder_node,
6627 rb_node);
6628 if (!print_all && !node->has_async_transaction)
6629 continue;
6630
6631 last_node = print_next_binder_node_ilocked(m, proc, node,
6632 last_node,
6633 hash_ptrs);
6634 }
6635 binder_inner_proc_unlock(proc);
6636 if (last_node)
6637 binder_put_node(last_node);
6638
6639 if (print_all) {
6640 binder_proc_lock(proc);
6641 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6642 print_binder_ref_olocked(m, rb_entry(n,
6643 struct binder_ref,
6644 rb_node_desc));
6645 binder_proc_unlock(proc);
6646 }
6647 binder_alloc_print_allocated(m, &proc->alloc);
6648 binder_inner_proc_lock(proc);
6649 list_for_each_entry(w, &proc->todo, entry)
6650 print_binder_work_ilocked(m, proc, " ",
6651 " pending transaction", w,
6652 hash_ptrs);
6653 list_for_each_entry(w, &proc->delivered_death, entry) {
6654 seq_puts(m, " has delivered dead binder\n");
6655 break;
6656 }
6657 list_for_each_entry(w, &proc->delivered_freeze, entry) {
6658 seq_puts(m, " has delivered freeze binder\n");
6659 break;
6660 }
6661 binder_inner_proc_unlock(proc);
6662 if (!print_all && m->count == header_pos)
6663 m->count = start_pos;
6664 }
6665
6666 static const char * const binder_return_strings[] = {
6667 "BR_ERROR",
6668 "BR_OK",
6669 "BR_TRANSACTION",
6670 "BR_REPLY",
6671 "BR_ACQUIRE_RESULT",
6672 "BR_DEAD_REPLY",
6673 "BR_TRANSACTION_COMPLETE",
6674 "BR_INCREFS",
6675 "BR_ACQUIRE",
6676 "BR_RELEASE",
6677 "BR_DECREFS",
6678 "BR_ATTEMPT_ACQUIRE",
6679 "BR_NOOP",
6680 "BR_SPAWN_LOOPER",
6681 "BR_FINISHED",
6682 "BR_DEAD_BINDER",
6683 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6684 "BR_FAILED_REPLY",
6685 "BR_FROZEN_REPLY",
6686 "BR_ONEWAY_SPAM_SUSPECT",
6687 "BR_TRANSACTION_PENDING_FROZEN",
6688 "BR_FROZEN_BINDER",
6689 "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6690 };
6691
6692 static const char * const binder_command_strings[] = {
6693 "BC_TRANSACTION",
6694 "BC_REPLY",
6695 "BC_ACQUIRE_RESULT",
6696 "BC_FREE_BUFFER",
6697 "BC_INCREFS",
6698 "BC_ACQUIRE",
6699 "BC_RELEASE",
6700 "BC_DECREFS",
6701 "BC_INCREFS_DONE",
6702 "BC_ACQUIRE_DONE",
6703 "BC_ATTEMPT_ACQUIRE",
6704 "BC_REGISTER_LOOPER",
6705 "BC_ENTER_LOOPER",
6706 "BC_EXIT_LOOPER",
6707 "BC_REQUEST_DEATH_NOTIFICATION",
6708 "BC_CLEAR_DEATH_NOTIFICATION",
6709 "BC_DEAD_BINDER_DONE",
6710 "BC_TRANSACTION_SG",
6711 "BC_REPLY_SG",
6712 "BC_REQUEST_FREEZE_NOTIFICATION",
6713 "BC_CLEAR_FREEZE_NOTIFICATION",
6714 "BC_FREEZE_NOTIFICATION_DONE",
6715 };
6716
6717 static const char * const binder_objstat_strings[] = {
6718 "proc",
6719 "thread",
6720 "node",
6721 "ref",
6722 "death",
6723 "transaction",
6724 "transaction_complete",
6725 "freeze",
6726 };
6727
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6728 static void print_binder_stats(struct seq_file *m, const char *prefix,
6729 struct binder_stats *stats)
6730 {
6731 int i;
6732
6733 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6734 ARRAY_SIZE(binder_command_strings));
6735 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6736 int temp = atomic_read(&stats->bc[i]);
6737
6738 if (temp)
6739 seq_printf(m, "%s%s: %d\n", prefix,
6740 binder_command_strings[i], temp);
6741 }
6742
6743 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6744 ARRAY_SIZE(binder_return_strings));
6745 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6746 int temp = atomic_read(&stats->br[i]);
6747
6748 if (temp)
6749 seq_printf(m, "%s%s: %d\n", prefix,
6750 binder_return_strings[i], temp);
6751 }
6752
6753 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6754 ARRAY_SIZE(binder_objstat_strings));
6755 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6756 ARRAY_SIZE(stats->obj_deleted));
6757 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6758 int created = atomic_read(&stats->obj_created[i]);
6759 int deleted = atomic_read(&stats->obj_deleted[i]);
6760
6761 if (created || deleted)
6762 seq_printf(m, "%s%s: active %d total %d\n",
6763 prefix,
6764 binder_objstat_strings[i],
6765 created - deleted,
6766 created);
6767 }
6768 }
6769
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6770 static void print_binder_proc_stats(struct seq_file *m,
6771 struct binder_proc *proc)
6772 {
6773 struct binder_work *w;
6774 struct binder_thread *thread;
6775 struct rb_node *n;
6776 int count, strong, weak, ready_threads;
6777 size_t free_async_space =
6778 binder_alloc_get_free_async_space(&proc->alloc);
6779
6780 seq_printf(m, "proc %d\n", proc->pid);
6781 seq_printf(m, "context %s\n", proc->context->name);
6782 count = 0;
6783 ready_threads = 0;
6784 binder_inner_proc_lock(proc);
6785 for (n = rb_first(&proc->threads); n; n = rb_next(n))
6786 count++;
6787
6788 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6789 ready_threads++;
6790
6791 seq_printf(m, " threads: %d\n", count);
6792 seq_printf(m, " requested threads: %d+%d/%d\n"
6793 " ready threads %d\n"
6794 " free async space %zd\n", proc->requested_threads,
6795 proc->requested_threads_started, proc->max_threads,
6796 ready_threads,
6797 free_async_space);
6798 count = 0;
6799 for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6800 count++;
6801 binder_inner_proc_unlock(proc);
6802 seq_printf(m, " nodes: %d\n", count);
6803 count = 0;
6804 strong = 0;
6805 weak = 0;
6806 binder_proc_lock(proc);
6807 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6808 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6809 rb_node_desc);
6810 count++;
6811 strong += ref->data.strong;
6812 weak += ref->data.weak;
6813 }
6814 binder_proc_unlock(proc);
6815 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6816
6817 count = binder_alloc_get_allocated_count(&proc->alloc);
6818 seq_printf(m, " buffers: %d\n", count);
6819
6820 binder_alloc_print_pages(m, &proc->alloc);
6821
6822 count = 0;
6823 binder_inner_proc_lock(proc);
6824 list_for_each_entry(w, &proc->todo, entry) {
6825 if (w->type == BINDER_WORK_TRANSACTION)
6826 count++;
6827 }
6828 binder_inner_proc_unlock(proc);
6829 seq_printf(m, " pending transactions: %d\n", count);
6830
6831 print_binder_stats(m, " ", &proc->stats);
6832 }
6833
print_binder_state(struct seq_file * m,bool hash_ptrs)6834 static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6835 {
6836 struct binder_proc *proc;
6837 struct binder_node *node;
6838 struct binder_node *last_node = NULL;
6839
6840 seq_puts(m, "binder state:\n");
6841
6842 spin_lock(&binder_dead_nodes_lock);
6843 if (!hlist_empty(&binder_dead_nodes))
6844 seq_puts(m, "dead nodes:\n");
6845 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6846 last_node = print_next_binder_node_ilocked(m, NULL, node,
6847 last_node,
6848 hash_ptrs);
6849 spin_unlock(&binder_dead_nodes_lock);
6850 if (last_node)
6851 binder_put_node(last_node);
6852
6853 mutex_lock(&binder_procs_lock);
6854 hlist_for_each_entry(proc, &binder_procs, proc_node)
6855 print_binder_proc(m, proc, true, hash_ptrs);
6856 mutex_unlock(&binder_procs_lock);
6857 }
6858
print_binder_transactions(struct seq_file * m,bool hash_ptrs)6859 static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6860 {
6861 struct binder_proc *proc;
6862
6863 seq_puts(m, "binder transactions:\n");
6864 mutex_lock(&binder_procs_lock);
6865 hlist_for_each_entry(proc, &binder_procs, proc_node)
6866 print_binder_proc(m, proc, false, hash_ptrs);
6867 mutex_unlock(&binder_procs_lock);
6868 }
6869
state_show(struct seq_file * m,void * unused)6870 static int state_show(struct seq_file *m, void *unused)
6871 {
6872 print_binder_state(m, false);
6873 return 0;
6874 }
6875
state_hashed_show(struct seq_file * m,void * unused)6876 static int state_hashed_show(struct seq_file *m, void *unused)
6877 {
6878 print_binder_state(m, true);
6879 return 0;
6880 }
6881
stats_show(struct seq_file * m,void * unused)6882 static int stats_show(struct seq_file *m, void *unused)
6883 {
6884 struct binder_proc *proc;
6885
6886 seq_puts(m, "binder stats:\n");
6887
6888 print_binder_stats(m, "", &binder_stats);
6889
6890 mutex_lock(&binder_procs_lock);
6891 hlist_for_each_entry(proc, &binder_procs, proc_node)
6892 print_binder_proc_stats(m, proc);
6893 mutex_unlock(&binder_procs_lock);
6894
6895 return 0;
6896 }
6897
transactions_show(struct seq_file * m,void * unused)6898 static int transactions_show(struct seq_file *m, void *unused)
6899 {
6900 print_binder_transactions(m, false);
6901 return 0;
6902 }
6903
transactions_hashed_show(struct seq_file * m,void * unused)6904 static int transactions_hashed_show(struct seq_file *m, void *unused)
6905 {
6906 print_binder_transactions(m, true);
6907 return 0;
6908 }
6909
proc_show(struct seq_file * m,void * unused)6910 static int proc_show(struct seq_file *m, void *unused)
6911 {
6912 struct binder_proc *itr;
6913 int pid = (unsigned long)m->private;
6914
6915 guard(mutex)(&binder_procs_lock);
6916 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6917 if (itr->pid == pid) {
6918 seq_puts(m, "binder proc state:\n");
6919 print_binder_proc(m, itr, true, false);
6920 }
6921 }
6922
6923 return 0;
6924 }
6925
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6926 static void print_binder_transaction_log_entry(struct seq_file *m,
6927 struct binder_transaction_log_entry *e)
6928 {
6929 int debug_id = READ_ONCE(e->debug_id_done);
6930 /*
6931 * read barrier to guarantee debug_id_done read before
6932 * we print the log values
6933 */
6934 smp_rmb();
6935 seq_printf(m,
6936 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6937 e->debug_id, (e->call_type == 2) ? "reply" :
6938 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6939 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6940 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6941 e->return_error, e->return_error_param,
6942 e->return_error_line);
6943 /*
6944 * read-barrier to guarantee read of debug_id_done after
6945 * done printing the fields of the entry
6946 */
6947 smp_rmb();
6948 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6949 "\n" : " (incomplete)\n");
6950 }
6951
transaction_log_show(struct seq_file * m,void * unused)6952 static int transaction_log_show(struct seq_file *m, void *unused)
6953 {
6954 struct binder_transaction_log *log = m->private;
6955 unsigned int log_cur = atomic_read(&log->cur);
6956 unsigned int count;
6957 unsigned int cur;
6958 int i;
6959
6960 count = log_cur + 1;
6961 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6962 0 : count % ARRAY_SIZE(log->entry);
6963 if (count > ARRAY_SIZE(log->entry) || log->full)
6964 count = ARRAY_SIZE(log->entry);
6965 for (i = 0; i < count; i++) {
6966 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6967
6968 print_binder_transaction_log_entry(m, &log->entry[index]);
6969 }
6970 return 0;
6971 }
6972
6973 const struct file_operations binder_fops = {
6974 .owner = THIS_MODULE,
6975 .poll = binder_poll,
6976 .unlocked_ioctl = binder_ioctl,
6977 .compat_ioctl = compat_ptr_ioctl,
6978 .mmap = binder_mmap,
6979 .open = binder_open,
6980 .flush = binder_flush,
6981 .release = binder_release,
6982 };
6983
6984 DEFINE_SHOW_ATTRIBUTE(state);
6985 DEFINE_SHOW_ATTRIBUTE(state_hashed);
6986 DEFINE_SHOW_ATTRIBUTE(stats);
6987 DEFINE_SHOW_ATTRIBUTE(transactions);
6988 DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6989 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6990
6991 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6992 {
6993 .name = "state",
6994 .mode = 0444,
6995 .fops = &state_fops,
6996 .data = NULL,
6997 },
6998 {
6999 .name = "state_hashed",
7000 .mode = 0444,
7001 .fops = &state_hashed_fops,
7002 .data = NULL,
7003 },
7004 {
7005 .name = "stats",
7006 .mode = 0444,
7007 .fops = &stats_fops,
7008 .data = NULL,
7009 },
7010 {
7011 .name = "transactions",
7012 .mode = 0444,
7013 .fops = &transactions_fops,
7014 .data = NULL,
7015 },
7016 {
7017 .name = "transactions_hashed",
7018 .mode = 0444,
7019 .fops = &transactions_hashed_fops,
7020 .data = NULL,
7021 },
7022 {
7023 .name = "transaction_log",
7024 .mode = 0444,
7025 .fops = &transaction_log_fops,
7026 .data = &binder_transaction_log,
7027 },
7028 {
7029 .name = "failed_transaction_log",
7030 .mode = 0444,
7031 .fops = &transaction_log_fops,
7032 .data = &binder_transaction_log_failed,
7033 },
7034 {} /* terminator */
7035 };
7036
binder_add_device(struct binder_device * device)7037 void binder_add_device(struct binder_device *device)
7038 {
7039 guard(spinlock)(&binder_devices_lock);
7040 hlist_add_head(&device->hlist, &binder_devices);
7041 }
7042
binder_remove_device(struct binder_device * device)7043 void binder_remove_device(struct binder_device *device)
7044 {
7045 guard(spinlock)(&binder_devices_lock);
7046 hlist_del_init(&device->hlist);
7047 }
7048
init_binder_device(const char * name)7049 static int __init init_binder_device(const char *name)
7050 {
7051 int ret;
7052 struct binder_device *binder_device;
7053
7054 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7055 if (!binder_device)
7056 return -ENOMEM;
7057
7058 binder_device->miscdev.fops = &binder_fops;
7059 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7060 binder_device->miscdev.name = name;
7061
7062 refcount_set(&binder_device->ref, 1);
7063 binder_device->context.binder_context_mgr_uid = INVALID_UID;
7064 binder_device->context.name = name;
7065 mutex_init(&binder_device->context.context_mgr_node_lock);
7066
7067 ret = misc_register(&binder_device->miscdev);
7068 if (ret < 0) {
7069 kfree(binder_device);
7070 return ret;
7071 }
7072
7073 binder_add_device(binder_device);
7074
7075 return ret;
7076 }
7077
binder_init(void)7078 static int __init binder_init(void)
7079 {
7080 int ret;
7081 char *device_name, *device_tmp;
7082 struct binder_device *device;
7083 struct hlist_node *tmp;
7084 char *device_names = NULL;
7085 const struct binder_debugfs_entry *db_entry;
7086
7087 ret = binder_alloc_shrinker_init();
7088 if (ret)
7089 return ret;
7090
7091 atomic_set(&binder_transaction_log.cur, ~0U);
7092 atomic_set(&binder_transaction_log_failed.cur, ~0U);
7093
7094 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7095
7096 binder_for_each_debugfs_entry(db_entry)
7097 debugfs_create_file(db_entry->name,
7098 db_entry->mode,
7099 binder_debugfs_dir_entry_root,
7100 db_entry->data,
7101 db_entry->fops);
7102
7103 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7104 binder_debugfs_dir_entry_root);
7105
7106 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7107 strcmp(binder_devices_param, "") != 0) {
7108 /*
7109 * Copy the module_parameter string, because we don't want to
7110 * tokenize it in-place.
7111 */
7112 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7113 if (!device_names) {
7114 ret = -ENOMEM;
7115 goto err_alloc_device_names_failed;
7116 }
7117
7118 device_tmp = device_names;
7119 while ((device_name = strsep(&device_tmp, ","))) {
7120 ret = init_binder_device(device_name);
7121 if (ret)
7122 goto err_init_binder_device_failed;
7123 }
7124 }
7125
7126 ret = genl_register_family(&binder_nl_family);
7127 if (ret)
7128 goto err_init_binder_device_failed;
7129
7130 ret = init_binderfs();
7131 if (ret)
7132 goto err_init_binderfs_failed;
7133
7134 return ret;
7135
7136 err_init_binderfs_failed:
7137 genl_unregister_family(&binder_nl_family);
7138
7139 err_init_binder_device_failed:
7140 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7141 misc_deregister(&device->miscdev);
7142 binder_remove_device(device);
7143 kfree(device);
7144 }
7145
7146 kfree(device_names);
7147
7148 err_alloc_device_names_failed:
7149 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7150 binder_alloc_shrinker_exit();
7151
7152 return ret;
7153 }
7154
7155 device_initcall(binder_init);
7156
7157 #define CREATE_TRACE_POINTS
7158 #include "binder_trace.h"
7159