1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70
71 #include <uapi/linux/android/binder.h>
72
73 #include <linux/cacheflush.h>
74
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80
81 static HLIST_HEAD(binder_devices);
82 static DEFINE_SPINLOCK(binder_devices_lock);
83
84 static HLIST_HEAD(binder_procs);
85 static DEFINE_MUTEX(binder_procs_lock);
86
87 static HLIST_HEAD(binder_dead_nodes);
88 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89
90 static struct dentry *binder_debugfs_dir_entry_root;
91 static struct dentry *binder_debugfs_dir_entry_proc;
92 static atomic_t binder_last_id;
93
94 static int proc_show(struct seq_file *m, void *unused);
95 DEFINE_SHOW_ATTRIBUTE(proc);
96
97 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
98
99 enum {
100 BINDER_DEBUG_USER_ERROR = 1U << 0,
101 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
102 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
103 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
104 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
105 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
106 BINDER_DEBUG_READ_WRITE = 1U << 6,
107 BINDER_DEBUG_USER_REFS = 1U << 7,
108 BINDER_DEBUG_THREADS = 1U << 8,
109 BINDER_DEBUG_TRANSACTION = 1U << 9,
110 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
111 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
112 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
113 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
114 BINDER_DEBUG_SPINLOCKS = 1U << 14,
115 };
116 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
117 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
118 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
119
120 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
121 module_param_named(devices, binder_devices_param, charp, 0444);
122
123 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
124 static int binder_stop_on_user_error;
125
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)126 static int binder_set_stop_on_user_error(const char *val,
127 const struct kernel_param *kp)
128 {
129 int ret;
130
131 ret = param_set_int(val, kp);
132 if (binder_stop_on_user_error < 2)
133 wake_up(&binder_user_error_wait);
134 return ret;
135 }
136 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
137 param_get_int, &binder_stop_on_user_error, 0644);
138
binder_debug(int mask,const char * format,...)139 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
140 {
141 struct va_format vaf;
142 va_list args;
143
144 if (binder_debug_mask & mask) {
145 va_start(args, format);
146 vaf.va = &args;
147 vaf.fmt = format;
148 pr_info_ratelimited("%pV", &vaf);
149 va_end(args);
150 }
151 }
152
153 #define binder_txn_error(x...) \
154 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
155
binder_user_error(const char * format,...)156 static __printf(1, 2) void binder_user_error(const char *format, ...)
157 {
158 struct va_format vaf;
159 va_list args;
160
161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
162 va_start(args, format);
163 vaf.va = &args;
164 vaf.fmt = format;
165 pr_info_ratelimited("%pV", &vaf);
166 va_end(args);
167 }
168
169 if (binder_stop_on_user_error)
170 binder_stop_on_user_error = 2;
171 }
172
173 #define binder_set_extended_error(ee, _id, _command, _param) \
174 do { \
175 (ee)->id = _id; \
176 (ee)->command = _command; \
177 (ee)->param = _param; \
178 } while (0)
179
180 #define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
182
183 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184
185 #define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
187
188 #define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
190
191 static struct binder_stats binder_stats;
192
binder_stats_deleted(enum binder_stat_types type)193 static inline void binder_stats_deleted(enum binder_stat_types type)
194 {
195 atomic_inc(&binder_stats.obj_deleted[type]);
196 }
197
binder_stats_created(enum binder_stat_types type)198 static inline void binder_stats_created(enum binder_stat_types type)
199 {
200 atomic_inc(&binder_stats.obj_created[type]);
201 }
202
203 struct binder_transaction_log_entry {
204 int debug_id;
205 int debug_id_done;
206 int call_type;
207 int from_proc;
208 int from_thread;
209 int target_handle;
210 int to_proc;
211 int to_thread;
212 int to_node;
213 int data_size;
214 int offsets_size;
215 int return_error_line;
216 uint32_t return_error;
217 uint32_t return_error_param;
218 char context_name[BINDERFS_MAX_NAME + 1];
219 };
220
221 struct binder_transaction_log {
222 atomic_t cur;
223 bool full;
224 struct binder_transaction_log_entry entry[32];
225 };
226
227 static struct binder_transaction_log binder_transaction_log;
228 static struct binder_transaction_log binder_transaction_log_failed;
229
binder_transaction_log_add(struct binder_transaction_log * log)230 static struct binder_transaction_log_entry *binder_transaction_log_add(
231 struct binder_transaction_log *log)
232 {
233 struct binder_transaction_log_entry *e;
234 unsigned int cur = atomic_inc_return(&log->cur);
235
236 if (cur >= ARRAY_SIZE(log->entry))
237 log->full = true;
238 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
239 WRITE_ONCE(e->debug_id_done, 0);
240 /*
241 * write-barrier to synchronize access to e->debug_id_done.
242 * We make sure the initialized 0 value is seen before
243 * memset() other fields are zeroed by memset.
244 */
245 smp_wmb();
246 memset(e, 0, sizeof(*e));
247 return e;
248 }
249
250 enum binder_deferred_state {
251 BINDER_DEFERRED_FLUSH = 0x01,
252 BINDER_DEFERRED_RELEASE = 0x02,
253 };
254
255 enum {
256 BINDER_LOOPER_STATE_REGISTERED = 0x01,
257 BINDER_LOOPER_STATE_ENTERED = 0x02,
258 BINDER_LOOPER_STATE_EXITED = 0x04,
259 BINDER_LOOPER_STATE_INVALID = 0x08,
260 BINDER_LOOPER_STATE_WAITING = 0x10,
261 BINDER_LOOPER_STATE_POLL = 0x20,
262 };
263
264 /**
265 * binder_proc_lock() - Acquire outer lock for given binder_proc
266 * @proc: struct binder_proc to acquire
267 *
268 * Acquires proc->outer_lock. Used to protect binder_ref
269 * structures associated with the given proc.
270 */
271 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
272 static void
_binder_proc_lock(struct binder_proc * proc,int line)273 _binder_proc_lock(struct binder_proc *proc, int line)
274 __acquires(&proc->outer_lock)
275 {
276 binder_debug(BINDER_DEBUG_SPINLOCKS,
277 "%s: line=%d\n", __func__, line);
278 spin_lock(&proc->outer_lock);
279 }
280
281 /**
282 * binder_proc_unlock() - Release outer lock for given binder_proc
283 * @proc: struct binder_proc to acquire
284 *
285 * Release lock acquired via binder_proc_lock()
286 */
287 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
288 static void
_binder_proc_unlock(struct binder_proc * proc,int line)289 _binder_proc_unlock(struct binder_proc *proc, int line)
290 __releases(&proc->outer_lock)
291 {
292 binder_debug(BINDER_DEBUG_SPINLOCKS,
293 "%s: line=%d\n", __func__, line);
294 spin_unlock(&proc->outer_lock);
295 }
296
297 /**
298 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
299 * @proc: struct binder_proc to acquire
300 *
301 * Acquires proc->inner_lock. Used to protect todo lists
302 */
303 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
304 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)305 _binder_inner_proc_lock(struct binder_proc *proc, int line)
306 __acquires(&proc->inner_lock)
307 {
308 binder_debug(BINDER_DEBUG_SPINLOCKS,
309 "%s: line=%d\n", __func__, line);
310 spin_lock(&proc->inner_lock);
311 }
312
313 /**
314 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
315 * @proc: struct binder_proc to acquire
316 *
317 * Release lock acquired via binder_inner_proc_lock()
318 */
319 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
320 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)321 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
322 __releases(&proc->inner_lock)
323 {
324 binder_debug(BINDER_DEBUG_SPINLOCKS,
325 "%s: line=%d\n", __func__, line);
326 spin_unlock(&proc->inner_lock);
327 }
328
329 /**
330 * binder_node_lock() - Acquire spinlock for given binder_node
331 * @node: struct binder_node to acquire
332 *
333 * Acquires node->lock. Used to protect binder_node fields
334 */
335 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
336 static void
_binder_node_lock(struct binder_node * node,int line)337 _binder_node_lock(struct binder_node *node, int line)
338 __acquires(&node->lock)
339 {
340 binder_debug(BINDER_DEBUG_SPINLOCKS,
341 "%s: line=%d\n", __func__, line);
342 spin_lock(&node->lock);
343 }
344
345 /**
346 * binder_node_unlock() - Release spinlock for given binder_proc
347 * @node: struct binder_node to acquire
348 *
349 * Release lock acquired via binder_node_lock()
350 */
351 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
352 static void
_binder_node_unlock(struct binder_node * node,int line)353 _binder_node_unlock(struct binder_node *node, int line)
354 __releases(&node->lock)
355 {
356 binder_debug(BINDER_DEBUG_SPINLOCKS,
357 "%s: line=%d\n", __func__, line);
358 spin_unlock(&node->lock);
359 }
360
361 /**
362 * binder_node_inner_lock() - Acquire node and inner locks
363 * @node: struct binder_node to acquire
364 *
365 * Acquires node->lock. If node->proc also acquires
366 * proc->inner_lock. Used to protect binder_node fields
367 */
368 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
369 static void
_binder_node_inner_lock(struct binder_node * node,int line)370 _binder_node_inner_lock(struct binder_node *node, int line)
371 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
372 {
373 binder_debug(BINDER_DEBUG_SPINLOCKS,
374 "%s: line=%d\n", __func__, line);
375 spin_lock(&node->lock);
376 if (node->proc)
377 binder_inner_proc_lock(node->proc);
378 else
379 /* annotation for sparse */
380 __acquire(&node->proc->inner_lock);
381 }
382
383 /**
384 * binder_node_inner_unlock() - Release node and inner locks
385 * @node: struct binder_node to acquire
386 *
387 * Release lock acquired via binder_node_lock()
388 */
389 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
390 static void
_binder_node_inner_unlock(struct binder_node * node,int line)391 _binder_node_inner_unlock(struct binder_node *node, int line)
392 __releases(&node->lock) __releases(&node->proc->inner_lock)
393 {
394 struct binder_proc *proc = node->proc;
395
396 binder_debug(BINDER_DEBUG_SPINLOCKS,
397 "%s: line=%d\n", __func__, line);
398 if (proc)
399 binder_inner_proc_unlock(proc);
400 else
401 /* annotation for sparse */
402 __release(&node->proc->inner_lock);
403 spin_unlock(&node->lock);
404 }
405
binder_worklist_empty_ilocked(struct list_head * list)406 static bool binder_worklist_empty_ilocked(struct list_head *list)
407 {
408 return list_empty(list);
409 }
410
411 /**
412 * binder_worklist_empty() - Check if no items on the work list
413 * @proc: binder_proc associated with list
414 * @list: list to check
415 *
416 * Return: true if there are no items on list, else false
417 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)418 static bool binder_worklist_empty(struct binder_proc *proc,
419 struct list_head *list)
420 {
421 bool ret;
422
423 binder_inner_proc_lock(proc);
424 ret = binder_worklist_empty_ilocked(list);
425 binder_inner_proc_unlock(proc);
426 return ret;
427 }
428
429 /**
430 * binder_enqueue_work_ilocked() - Add an item to the work list
431 * @work: struct binder_work to add to list
432 * @target_list: list to add work to
433 *
434 * Adds the work to the specified list. Asserts that work
435 * is not already on a list.
436 *
437 * Requires the proc->inner_lock to be held.
438 */
439 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)440 binder_enqueue_work_ilocked(struct binder_work *work,
441 struct list_head *target_list)
442 {
443 BUG_ON(target_list == NULL);
444 BUG_ON(work->entry.next && !list_empty(&work->entry));
445 list_add_tail(&work->entry, target_list);
446 }
447
448 /**
449 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
450 * @thread: thread to queue work to
451 * @work: struct binder_work to add to list
452 *
453 * Adds the work to the todo list of the thread. Doesn't set the process_todo
454 * flag, which means that (if it wasn't already set) the thread will go to
455 * sleep without handling this work when it calls read.
456 *
457 * Requires the proc->inner_lock to be held.
458 */
459 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)460 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
461 struct binder_work *work)
462 {
463 WARN_ON(!list_empty(&thread->waiting_thread_node));
464 binder_enqueue_work_ilocked(work, &thread->todo);
465 }
466
467 /**
468 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
469 * @thread: thread to queue work to
470 * @work: struct binder_work to add to list
471 *
472 * Adds the work to the todo list of the thread, and enables processing
473 * of the todo queue.
474 *
475 * Requires the proc->inner_lock to be held.
476 */
477 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)478 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
479 struct binder_work *work)
480 {
481 WARN_ON(!list_empty(&thread->waiting_thread_node));
482 binder_enqueue_work_ilocked(work, &thread->todo);
483
484 /* (e)poll-based threads require an explicit wakeup signal when
485 * queuing their own work; they rely on these events to consume
486 * messages without I/O block. Without it, threads risk waiting
487 * indefinitely without handling the work.
488 */
489 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
490 thread->pid == current->pid && !thread->process_todo)
491 wake_up_interruptible_sync(&thread->wait);
492
493 thread->process_todo = true;
494 }
495
496 /**
497 * binder_enqueue_thread_work() - Add an item to the thread work list
498 * @thread: thread to queue work to
499 * @work: struct binder_work to add to list
500 *
501 * Adds the work to the todo list of the thread, and enables processing
502 * of the todo queue.
503 */
504 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)505 binder_enqueue_thread_work(struct binder_thread *thread,
506 struct binder_work *work)
507 {
508 binder_inner_proc_lock(thread->proc);
509 binder_enqueue_thread_work_ilocked(thread, work);
510 binder_inner_proc_unlock(thread->proc);
511 }
512
513 static void
binder_dequeue_work_ilocked(struct binder_work * work)514 binder_dequeue_work_ilocked(struct binder_work *work)
515 {
516 list_del_init(&work->entry);
517 }
518
519 /**
520 * binder_dequeue_work() - Removes an item from the work list
521 * @proc: binder_proc associated with list
522 * @work: struct binder_work to remove from list
523 *
524 * Removes the specified work item from whatever list it is on.
525 * Can safely be called if work is not on any list.
526 */
527 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)528 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
529 {
530 binder_inner_proc_lock(proc);
531 binder_dequeue_work_ilocked(work);
532 binder_inner_proc_unlock(proc);
533 }
534
binder_dequeue_work_head_ilocked(struct list_head * list)535 static struct binder_work *binder_dequeue_work_head_ilocked(
536 struct list_head *list)
537 {
538 struct binder_work *w;
539
540 w = list_first_entry_or_null(list, struct binder_work, entry);
541 if (w)
542 list_del_init(&w->entry);
543 return w;
544 }
545
546 static void
547 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
548 static void binder_free_thread(struct binder_thread *thread);
549 static void binder_free_proc(struct binder_proc *proc);
550 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
551
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)552 static bool binder_has_work_ilocked(struct binder_thread *thread,
553 bool do_proc_work)
554 {
555 return thread->process_todo ||
556 thread->looper_need_return ||
557 (do_proc_work &&
558 !binder_worklist_empty_ilocked(&thread->proc->todo));
559 }
560
binder_has_work(struct binder_thread * thread,bool do_proc_work)561 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
562 {
563 bool has_work;
564
565 binder_inner_proc_lock(thread->proc);
566 has_work = binder_has_work_ilocked(thread, do_proc_work);
567 binder_inner_proc_unlock(thread->proc);
568
569 return has_work;
570 }
571
binder_available_for_proc_work_ilocked(struct binder_thread * thread)572 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
573 {
574 return !thread->transaction_stack &&
575 binder_worklist_empty_ilocked(&thread->todo);
576 }
577
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)578 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
579 bool sync)
580 {
581 struct rb_node *n;
582 struct binder_thread *thread;
583
584 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
585 thread = rb_entry(n, struct binder_thread, rb_node);
586 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
587 binder_available_for_proc_work_ilocked(thread)) {
588 if (sync)
589 wake_up_interruptible_sync(&thread->wait);
590 else
591 wake_up_interruptible(&thread->wait);
592 }
593 }
594 }
595
596 /**
597 * binder_select_thread_ilocked() - selects a thread for doing proc work.
598 * @proc: process to select a thread from
599 *
600 * Note that calling this function moves the thread off the waiting_threads
601 * list, so it can only be woken up by the caller of this function, or a
602 * signal. Therefore, callers *should* always wake up the thread this function
603 * returns.
604 *
605 * Return: If there's a thread currently waiting for process work,
606 * returns that thread. Otherwise returns NULL.
607 */
608 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)609 binder_select_thread_ilocked(struct binder_proc *proc)
610 {
611 struct binder_thread *thread;
612
613 assert_spin_locked(&proc->inner_lock);
614 thread = list_first_entry_or_null(&proc->waiting_threads,
615 struct binder_thread,
616 waiting_thread_node);
617
618 if (thread)
619 list_del_init(&thread->waiting_thread_node);
620
621 return thread;
622 }
623
624 /**
625 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
626 * @proc: process to wake up a thread in
627 * @thread: specific thread to wake-up (may be NULL)
628 * @sync: whether to do a synchronous wake-up
629 *
630 * This function wakes up a thread in the @proc process.
631 * The caller may provide a specific thread to wake-up in
632 * the @thread parameter. If @thread is NULL, this function
633 * will wake up threads that have called poll().
634 *
635 * Note that for this function to work as expected, callers
636 * should first call binder_select_thread() to find a thread
637 * to handle the work (if they don't have a thread already),
638 * and pass the result into the @thread parameter.
639 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)640 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
641 struct binder_thread *thread,
642 bool sync)
643 {
644 assert_spin_locked(&proc->inner_lock);
645
646 if (thread) {
647 if (sync)
648 wake_up_interruptible_sync(&thread->wait);
649 else
650 wake_up_interruptible(&thread->wait);
651 return;
652 }
653
654 /* Didn't find a thread waiting for proc work; this can happen
655 * in two scenarios:
656 * 1. All threads are busy handling transactions
657 * In that case, one of those threads should call back into
658 * the kernel driver soon and pick up this work.
659 * 2. Threads are using the (e)poll interface, in which case
660 * they may be blocked on the waitqueue without having been
661 * added to waiting_threads. For this case, we just iterate
662 * over all threads not handling transaction work, and
663 * wake them all up. We wake all because we don't know whether
664 * a thread that called into (e)poll is handling non-binder
665 * work currently.
666 */
667 binder_wakeup_poll_threads_ilocked(proc, sync);
668 }
669
binder_wakeup_proc_ilocked(struct binder_proc * proc)670 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
671 {
672 struct binder_thread *thread = binder_select_thread_ilocked(proc);
673
674 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
675 }
676
binder_set_nice(long nice)677 static void binder_set_nice(long nice)
678 {
679 long min_nice;
680
681 if (can_nice(current, nice)) {
682 set_user_nice(current, nice);
683 return;
684 }
685 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
686 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
687 "%d: nice value %ld not allowed use %ld instead\n",
688 current->pid, nice, min_nice);
689 set_user_nice(current, min_nice);
690 if (min_nice <= MAX_NICE)
691 return;
692 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
693 }
694
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)695 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
696 binder_uintptr_t ptr)
697 {
698 struct rb_node *n = proc->nodes.rb_node;
699 struct binder_node *node;
700
701 assert_spin_locked(&proc->inner_lock);
702
703 while (n) {
704 node = rb_entry(n, struct binder_node, rb_node);
705
706 if (ptr < node->ptr)
707 n = n->rb_left;
708 else if (ptr > node->ptr)
709 n = n->rb_right;
710 else {
711 /*
712 * take an implicit weak reference
713 * to ensure node stays alive until
714 * call to binder_put_node()
715 */
716 binder_inc_node_tmpref_ilocked(node);
717 return node;
718 }
719 }
720 return NULL;
721 }
722
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)723 static struct binder_node *binder_get_node(struct binder_proc *proc,
724 binder_uintptr_t ptr)
725 {
726 struct binder_node *node;
727
728 binder_inner_proc_lock(proc);
729 node = binder_get_node_ilocked(proc, ptr);
730 binder_inner_proc_unlock(proc);
731 return node;
732 }
733
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)734 static struct binder_node *binder_init_node_ilocked(
735 struct binder_proc *proc,
736 struct binder_node *new_node,
737 struct flat_binder_object *fp)
738 {
739 struct rb_node **p = &proc->nodes.rb_node;
740 struct rb_node *parent = NULL;
741 struct binder_node *node;
742 binder_uintptr_t ptr = fp ? fp->binder : 0;
743 binder_uintptr_t cookie = fp ? fp->cookie : 0;
744 __u32 flags = fp ? fp->flags : 0;
745
746 assert_spin_locked(&proc->inner_lock);
747
748 while (*p) {
749
750 parent = *p;
751 node = rb_entry(parent, struct binder_node, rb_node);
752
753 if (ptr < node->ptr)
754 p = &(*p)->rb_left;
755 else if (ptr > node->ptr)
756 p = &(*p)->rb_right;
757 else {
758 /*
759 * A matching node is already in
760 * the rb tree. Abandon the init
761 * and return it.
762 */
763 binder_inc_node_tmpref_ilocked(node);
764 return node;
765 }
766 }
767 node = new_node;
768 binder_stats_created(BINDER_STAT_NODE);
769 node->tmp_refs++;
770 rb_link_node(&node->rb_node, parent, p);
771 rb_insert_color(&node->rb_node, &proc->nodes);
772 node->debug_id = atomic_inc_return(&binder_last_id);
773 node->proc = proc;
774 node->ptr = ptr;
775 node->cookie = cookie;
776 node->work.type = BINDER_WORK_NODE;
777 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
778 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
779 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
780 spin_lock_init(&node->lock);
781 INIT_LIST_HEAD(&node->work.entry);
782 INIT_LIST_HEAD(&node->async_todo);
783 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
784 "%d:%d node %d u%016llx c%016llx created\n",
785 proc->pid, current->pid, node->debug_id,
786 (u64)node->ptr, (u64)node->cookie);
787
788 return node;
789 }
790
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)791 static struct binder_node *binder_new_node(struct binder_proc *proc,
792 struct flat_binder_object *fp)
793 {
794 struct binder_node *node;
795 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
796
797 if (!new_node)
798 return NULL;
799 binder_inner_proc_lock(proc);
800 node = binder_init_node_ilocked(proc, new_node, fp);
801 binder_inner_proc_unlock(proc);
802 if (node != new_node)
803 /*
804 * The node was already added by another thread
805 */
806 kfree(new_node);
807
808 return node;
809 }
810
binder_free_node(struct binder_node * node)811 static void binder_free_node(struct binder_node *node)
812 {
813 kfree(node);
814 binder_stats_deleted(BINDER_STAT_NODE);
815 }
816
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)817 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
818 int internal,
819 struct list_head *target_list)
820 {
821 struct binder_proc *proc = node->proc;
822
823 assert_spin_locked(&node->lock);
824 if (proc)
825 assert_spin_locked(&proc->inner_lock);
826 if (strong) {
827 if (internal) {
828 if (target_list == NULL &&
829 node->internal_strong_refs == 0 &&
830 !(node->proc &&
831 node == node->proc->context->binder_context_mgr_node &&
832 node->has_strong_ref)) {
833 pr_err("invalid inc strong node for %d\n",
834 node->debug_id);
835 return -EINVAL;
836 }
837 node->internal_strong_refs++;
838 } else
839 node->local_strong_refs++;
840 if (!node->has_strong_ref && target_list) {
841 struct binder_thread *thread = container_of(target_list,
842 struct binder_thread, todo);
843 binder_dequeue_work_ilocked(&node->work);
844 BUG_ON(&thread->todo != target_list);
845 binder_enqueue_deferred_thread_work_ilocked(thread,
846 &node->work);
847 }
848 } else {
849 if (!internal)
850 node->local_weak_refs++;
851 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
852 if (target_list == NULL) {
853 pr_err("invalid inc weak node for %d\n",
854 node->debug_id);
855 return -EINVAL;
856 }
857 /*
858 * See comment above
859 */
860 binder_enqueue_work_ilocked(&node->work, target_list);
861 }
862 }
863 return 0;
864 }
865
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)866 static int binder_inc_node(struct binder_node *node, int strong, int internal,
867 struct list_head *target_list)
868 {
869 int ret;
870
871 binder_node_inner_lock(node);
872 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
873 binder_node_inner_unlock(node);
874
875 return ret;
876 }
877
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)878 static bool binder_dec_node_nilocked(struct binder_node *node,
879 int strong, int internal)
880 {
881 struct binder_proc *proc = node->proc;
882
883 assert_spin_locked(&node->lock);
884 if (proc)
885 assert_spin_locked(&proc->inner_lock);
886 if (strong) {
887 if (internal)
888 node->internal_strong_refs--;
889 else
890 node->local_strong_refs--;
891 if (node->local_strong_refs || node->internal_strong_refs)
892 return false;
893 } else {
894 if (!internal)
895 node->local_weak_refs--;
896 if (node->local_weak_refs || node->tmp_refs ||
897 !hlist_empty(&node->refs))
898 return false;
899 }
900
901 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
902 if (list_empty(&node->work.entry)) {
903 binder_enqueue_work_ilocked(&node->work, &proc->todo);
904 binder_wakeup_proc_ilocked(proc);
905 }
906 } else {
907 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
908 !node->local_weak_refs && !node->tmp_refs) {
909 if (proc) {
910 binder_dequeue_work_ilocked(&node->work);
911 rb_erase(&node->rb_node, &proc->nodes);
912 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
913 "refless node %d deleted\n",
914 node->debug_id);
915 } else {
916 BUG_ON(!list_empty(&node->work.entry));
917 spin_lock(&binder_dead_nodes_lock);
918 /*
919 * tmp_refs could have changed so
920 * check it again
921 */
922 if (node->tmp_refs) {
923 spin_unlock(&binder_dead_nodes_lock);
924 return false;
925 }
926 hlist_del(&node->dead_node);
927 spin_unlock(&binder_dead_nodes_lock);
928 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
929 "dead node %d deleted\n",
930 node->debug_id);
931 }
932 return true;
933 }
934 }
935 return false;
936 }
937
binder_dec_node(struct binder_node * node,int strong,int internal)938 static void binder_dec_node(struct binder_node *node, int strong, int internal)
939 {
940 bool free_node;
941
942 binder_node_inner_lock(node);
943 free_node = binder_dec_node_nilocked(node, strong, internal);
944 binder_node_inner_unlock(node);
945 if (free_node)
946 binder_free_node(node);
947 }
948
binder_inc_node_tmpref_ilocked(struct binder_node * node)949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
950 {
951 /*
952 * No call to binder_inc_node() is needed since we
953 * don't need to inform userspace of any changes to
954 * tmp_refs
955 */
956 node->tmp_refs++;
957 }
958
959 /**
960 * binder_inc_node_tmpref() - take a temporary reference on node
961 * @node: node to reference
962 *
963 * Take reference on node to prevent the node from being freed
964 * while referenced only by a local variable. The inner lock is
965 * needed to serialize with the node work on the queue (which
966 * isn't needed after the node is dead). If the node is dead
967 * (node->proc is NULL), use binder_dead_nodes_lock to protect
968 * node->tmp_refs against dead-node-only cases where the node
969 * lock cannot be acquired (eg traversing the dead node list to
970 * print nodes)
971 */
binder_inc_node_tmpref(struct binder_node * node)972 static void binder_inc_node_tmpref(struct binder_node *node)
973 {
974 binder_node_lock(node);
975 if (node->proc)
976 binder_inner_proc_lock(node->proc);
977 else
978 spin_lock(&binder_dead_nodes_lock);
979 binder_inc_node_tmpref_ilocked(node);
980 if (node->proc)
981 binder_inner_proc_unlock(node->proc);
982 else
983 spin_unlock(&binder_dead_nodes_lock);
984 binder_node_unlock(node);
985 }
986
987 /**
988 * binder_dec_node_tmpref() - remove a temporary reference on node
989 * @node: node to reference
990 *
991 * Release temporary reference on node taken via binder_inc_node_tmpref()
992 */
binder_dec_node_tmpref(struct binder_node * node)993 static void binder_dec_node_tmpref(struct binder_node *node)
994 {
995 bool free_node;
996
997 binder_node_inner_lock(node);
998 if (!node->proc)
999 spin_lock(&binder_dead_nodes_lock);
1000 else
1001 __acquire(&binder_dead_nodes_lock);
1002 node->tmp_refs--;
1003 BUG_ON(node->tmp_refs < 0);
1004 if (!node->proc)
1005 spin_unlock(&binder_dead_nodes_lock);
1006 else
1007 __release(&binder_dead_nodes_lock);
1008 /*
1009 * Call binder_dec_node() to check if all refcounts are 0
1010 * and cleanup is needed. Calling with strong=0 and internal=1
1011 * causes no actual reference to be released in binder_dec_node().
1012 * If that changes, a change is needed here too.
1013 */
1014 free_node = binder_dec_node_nilocked(node, 0, 1);
1015 binder_node_inner_unlock(node);
1016 if (free_node)
1017 binder_free_node(node);
1018 }
1019
binder_put_node(struct binder_node * node)1020 static void binder_put_node(struct binder_node *node)
1021 {
1022 binder_dec_node_tmpref(node);
1023 }
1024
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1025 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1026 u32 desc, bool need_strong_ref)
1027 {
1028 struct rb_node *n = proc->refs_by_desc.rb_node;
1029 struct binder_ref *ref;
1030
1031 while (n) {
1032 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1033
1034 if (desc < ref->data.desc) {
1035 n = n->rb_left;
1036 } else if (desc > ref->data.desc) {
1037 n = n->rb_right;
1038 } else if (need_strong_ref && !ref->data.strong) {
1039 binder_user_error("tried to use weak ref as strong ref\n");
1040 return NULL;
1041 } else {
1042 return ref;
1043 }
1044 }
1045 return NULL;
1046 }
1047
1048 /* Find the smallest unused descriptor the "slow way" */
slow_desc_lookup_olocked(struct binder_proc * proc,u32 offset)1049 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1050 {
1051 struct binder_ref *ref;
1052 struct rb_node *n;
1053 u32 desc;
1054
1055 desc = offset;
1056 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1057 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1058 if (ref->data.desc > desc)
1059 break;
1060 desc = ref->data.desc + 1;
1061 }
1062
1063 return desc;
1064 }
1065
1066 /*
1067 * Find an available reference descriptor ID. The proc->outer_lock might
1068 * be released in the process, in which case -EAGAIN is returned and the
1069 * @desc should be considered invalid.
1070 */
get_ref_desc_olocked(struct binder_proc * proc,struct binder_node * node,u32 * desc)1071 static int get_ref_desc_olocked(struct binder_proc *proc,
1072 struct binder_node *node,
1073 u32 *desc)
1074 {
1075 struct dbitmap *dmap = &proc->dmap;
1076 unsigned int nbits, offset;
1077 unsigned long *new, bit;
1078
1079 /* 0 is reserved for the context manager */
1080 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1081
1082 if (!dbitmap_enabled(dmap)) {
1083 *desc = slow_desc_lookup_olocked(proc, offset);
1084 return 0;
1085 }
1086
1087 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1088 *desc = bit;
1089 return 0;
1090 }
1091
1092 /*
1093 * The dbitmap is full and needs to grow. The proc->outer_lock
1094 * is briefly released to allocate the new bitmap safely.
1095 */
1096 nbits = dbitmap_grow_nbits(dmap);
1097 binder_proc_unlock(proc);
1098 new = bitmap_zalloc(nbits, GFP_KERNEL);
1099 binder_proc_lock(proc);
1100 dbitmap_grow(dmap, new, nbits);
1101
1102 return -EAGAIN;
1103 }
1104
1105 /**
1106 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1107 * @proc: binder_proc that owns the ref
1108 * @node: binder_node of target
1109 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1110 *
1111 * Look up the ref for the given node and return it if it exists
1112 *
1113 * If it doesn't exist and the caller provides a newly allocated
1114 * ref, initialize the fields of the newly allocated ref and insert
1115 * into the given proc rb_trees and node refs list.
1116 *
1117 * Return: the ref for node. It is possible that another thread
1118 * allocated/initialized the ref first in which case the
1119 * returned ref would be different than the passed-in
1120 * new_ref. new_ref must be kfree'd by the caller in
1121 * this case.
1122 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1123 static struct binder_ref *binder_get_ref_for_node_olocked(
1124 struct binder_proc *proc,
1125 struct binder_node *node,
1126 struct binder_ref *new_ref)
1127 {
1128 struct binder_ref *ref;
1129 struct rb_node *parent;
1130 struct rb_node **p;
1131 u32 desc;
1132
1133 retry:
1134 p = &proc->refs_by_node.rb_node;
1135 parent = NULL;
1136 while (*p) {
1137 parent = *p;
1138 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1139
1140 if (node < ref->node)
1141 p = &(*p)->rb_left;
1142 else if (node > ref->node)
1143 p = &(*p)->rb_right;
1144 else
1145 return ref;
1146 }
1147 if (!new_ref)
1148 return NULL;
1149
1150 /* might release the proc->outer_lock */
1151 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1152 goto retry;
1153
1154 binder_stats_created(BINDER_STAT_REF);
1155 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1156 new_ref->proc = proc;
1157 new_ref->node = node;
1158 rb_link_node(&new_ref->rb_node_node, parent, p);
1159 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1160
1161 new_ref->data.desc = desc;
1162 p = &proc->refs_by_desc.rb_node;
1163 while (*p) {
1164 parent = *p;
1165 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1166
1167 if (new_ref->data.desc < ref->data.desc)
1168 p = &(*p)->rb_left;
1169 else if (new_ref->data.desc > ref->data.desc)
1170 p = &(*p)->rb_right;
1171 else
1172 BUG();
1173 }
1174 rb_link_node(&new_ref->rb_node_desc, parent, p);
1175 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1176
1177 binder_node_lock(node);
1178 hlist_add_head(&new_ref->node_entry, &node->refs);
1179
1180 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1181 "%d new ref %d desc %d for node %d\n",
1182 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1183 node->debug_id);
1184 binder_node_unlock(node);
1185 return new_ref;
1186 }
1187
binder_cleanup_ref_olocked(struct binder_ref * ref)1188 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1189 {
1190 struct dbitmap *dmap = &ref->proc->dmap;
1191 bool delete_node = false;
1192
1193 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1194 "%d delete ref %d desc %d for node %d\n",
1195 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1196 ref->node->debug_id);
1197
1198 if (dbitmap_enabled(dmap))
1199 dbitmap_clear_bit(dmap, ref->data.desc);
1200 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1201 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1202
1203 binder_node_inner_lock(ref->node);
1204 if (ref->data.strong)
1205 binder_dec_node_nilocked(ref->node, 1, 1);
1206
1207 hlist_del(&ref->node_entry);
1208 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1209 binder_node_inner_unlock(ref->node);
1210 /*
1211 * Clear ref->node unless we want the caller to free the node
1212 */
1213 if (!delete_node) {
1214 /*
1215 * The caller uses ref->node to determine
1216 * whether the node needs to be freed. Clear
1217 * it since the node is still alive.
1218 */
1219 ref->node = NULL;
1220 }
1221
1222 if (ref->death) {
1223 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1224 "%d delete ref %d desc %d has death notification\n",
1225 ref->proc->pid, ref->data.debug_id,
1226 ref->data.desc);
1227 binder_dequeue_work(ref->proc, &ref->death->work);
1228 binder_stats_deleted(BINDER_STAT_DEATH);
1229 }
1230
1231 if (ref->freeze) {
1232 binder_dequeue_work(ref->proc, &ref->freeze->work);
1233 binder_stats_deleted(BINDER_STAT_FREEZE);
1234 }
1235
1236 binder_stats_deleted(BINDER_STAT_REF);
1237 }
1238
1239 /**
1240 * binder_inc_ref_olocked() - increment the ref for given handle
1241 * @ref: ref to be incremented
1242 * @strong: if true, strong increment, else weak
1243 * @target_list: list to queue node work on
1244 *
1245 * Increment the ref. @ref->proc->outer_lock must be held on entry
1246 *
1247 * Return: 0, if successful, else errno
1248 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1249 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1250 struct list_head *target_list)
1251 {
1252 int ret;
1253
1254 if (strong) {
1255 if (ref->data.strong == 0) {
1256 ret = binder_inc_node(ref->node, 1, 1, target_list);
1257 if (ret)
1258 return ret;
1259 }
1260 ref->data.strong++;
1261 } else {
1262 if (ref->data.weak == 0) {
1263 ret = binder_inc_node(ref->node, 0, 1, target_list);
1264 if (ret)
1265 return ret;
1266 }
1267 ref->data.weak++;
1268 }
1269 return 0;
1270 }
1271
1272 /**
1273 * binder_dec_ref_olocked() - dec the ref for given handle
1274 * @ref: ref to be decremented
1275 * @strong: if true, strong decrement, else weak
1276 *
1277 * Decrement the ref.
1278 *
1279 * Return: %true if ref is cleaned up and ready to be freed.
1280 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1281 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1282 {
1283 if (strong) {
1284 if (ref->data.strong == 0) {
1285 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1286 ref->proc->pid, ref->data.debug_id,
1287 ref->data.desc, ref->data.strong,
1288 ref->data.weak);
1289 return false;
1290 }
1291 ref->data.strong--;
1292 if (ref->data.strong == 0)
1293 binder_dec_node(ref->node, strong, 1);
1294 } else {
1295 if (ref->data.weak == 0) {
1296 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1297 ref->proc->pid, ref->data.debug_id,
1298 ref->data.desc, ref->data.strong,
1299 ref->data.weak);
1300 return false;
1301 }
1302 ref->data.weak--;
1303 }
1304 if (ref->data.strong == 0 && ref->data.weak == 0) {
1305 binder_cleanup_ref_olocked(ref);
1306 return true;
1307 }
1308 return false;
1309 }
1310
1311 /**
1312 * binder_get_node_from_ref() - get the node from the given proc/desc
1313 * @proc: proc containing the ref
1314 * @desc: the handle associated with the ref
1315 * @need_strong_ref: if true, only return node if ref is strong
1316 * @rdata: the id/refcount data for the ref
1317 *
1318 * Given a proc and ref handle, return the associated binder_node
1319 *
1320 * Return: a binder_node or NULL if not found or not strong when strong required
1321 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1322 static struct binder_node *binder_get_node_from_ref(
1323 struct binder_proc *proc,
1324 u32 desc, bool need_strong_ref,
1325 struct binder_ref_data *rdata)
1326 {
1327 struct binder_node *node;
1328 struct binder_ref *ref;
1329
1330 binder_proc_lock(proc);
1331 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1332 if (!ref)
1333 goto err_no_ref;
1334 node = ref->node;
1335 /*
1336 * Take an implicit reference on the node to ensure
1337 * it stays alive until the call to binder_put_node()
1338 */
1339 binder_inc_node_tmpref(node);
1340 if (rdata)
1341 *rdata = ref->data;
1342 binder_proc_unlock(proc);
1343
1344 return node;
1345
1346 err_no_ref:
1347 binder_proc_unlock(proc);
1348 return NULL;
1349 }
1350
1351 /**
1352 * binder_free_ref() - free the binder_ref
1353 * @ref: ref to free
1354 *
1355 * Free the binder_ref. Free the binder_node indicated by ref->node
1356 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1357 */
binder_free_ref(struct binder_ref * ref)1358 static void binder_free_ref(struct binder_ref *ref)
1359 {
1360 if (ref->node)
1361 binder_free_node(ref->node);
1362 kfree(ref->death);
1363 kfree(ref->freeze);
1364 kfree(ref);
1365 }
1366
1367 /* shrink descriptor bitmap if needed */
try_shrink_dmap(struct binder_proc * proc)1368 static void try_shrink_dmap(struct binder_proc *proc)
1369 {
1370 unsigned long *new;
1371 int nbits;
1372
1373 binder_proc_lock(proc);
1374 nbits = dbitmap_shrink_nbits(&proc->dmap);
1375 binder_proc_unlock(proc);
1376
1377 if (!nbits)
1378 return;
1379
1380 new = bitmap_zalloc(nbits, GFP_KERNEL);
1381 binder_proc_lock(proc);
1382 dbitmap_shrink(&proc->dmap, new, nbits);
1383 binder_proc_unlock(proc);
1384 }
1385
1386 /**
1387 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1388 * @proc: proc containing the ref
1389 * @desc: the handle associated with the ref
1390 * @increment: true=inc reference, false=dec reference
1391 * @strong: true=strong reference, false=weak reference
1392 * @rdata: the id/refcount data for the ref
1393 *
1394 * Given a proc and ref handle, increment or decrement the ref
1395 * according to "increment" arg.
1396 *
1397 * Return: 0 if successful, else errno
1398 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1399 static int binder_update_ref_for_handle(struct binder_proc *proc,
1400 uint32_t desc, bool increment, bool strong,
1401 struct binder_ref_data *rdata)
1402 {
1403 int ret = 0;
1404 struct binder_ref *ref;
1405 bool delete_ref = false;
1406
1407 binder_proc_lock(proc);
1408 ref = binder_get_ref_olocked(proc, desc, strong);
1409 if (!ref) {
1410 ret = -EINVAL;
1411 goto err_no_ref;
1412 }
1413 if (increment)
1414 ret = binder_inc_ref_olocked(ref, strong, NULL);
1415 else
1416 delete_ref = binder_dec_ref_olocked(ref, strong);
1417
1418 if (rdata)
1419 *rdata = ref->data;
1420 binder_proc_unlock(proc);
1421
1422 if (delete_ref) {
1423 binder_free_ref(ref);
1424 try_shrink_dmap(proc);
1425 }
1426 return ret;
1427
1428 err_no_ref:
1429 binder_proc_unlock(proc);
1430 return ret;
1431 }
1432
1433 /**
1434 * binder_dec_ref_for_handle() - dec the ref for given handle
1435 * @proc: proc containing the ref
1436 * @desc: the handle associated with the ref
1437 * @strong: true=strong reference, false=weak reference
1438 * @rdata: the id/refcount data for the ref
1439 *
1440 * Just calls binder_update_ref_for_handle() to decrement the ref.
1441 *
1442 * Return: 0 if successful, else errno
1443 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1444 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1445 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1446 {
1447 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1448 }
1449
1450
1451 /**
1452 * binder_inc_ref_for_node() - increment the ref for given proc/node
1453 * @proc: proc containing the ref
1454 * @node: target node
1455 * @strong: true=strong reference, false=weak reference
1456 * @target_list: worklist to use if node is incremented
1457 * @rdata: the id/refcount data for the ref
1458 *
1459 * Given a proc and node, increment the ref. Create the ref if it
1460 * doesn't already exist
1461 *
1462 * Return: 0 if successful, else errno
1463 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1464 static int binder_inc_ref_for_node(struct binder_proc *proc,
1465 struct binder_node *node,
1466 bool strong,
1467 struct list_head *target_list,
1468 struct binder_ref_data *rdata)
1469 {
1470 struct binder_ref *ref;
1471 struct binder_ref *new_ref = NULL;
1472 int ret = 0;
1473
1474 binder_proc_lock(proc);
1475 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1476 if (!ref) {
1477 binder_proc_unlock(proc);
1478 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1479 if (!new_ref)
1480 return -ENOMEM;
1481 binder_proc_lock(proc);
1482 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1483 }
1484 ret = binder_inc_ref_olocked(ref, strong, target_list);
1485 *rdata = ref->data;
1486 if (ret && ref == new_ref) {
1487 /*
1488 * Cleanup the failed reference here as the target
1489 * could now be dead and have already released its
1490 * references by now. Calling on the new reference
1491 * with strong=0 and a tmp_refs will not decrement
1492 * the node. The new_ref gets kfree'd below.
1493 */
1494 binder_cleanup_ref_olocked(new_ref);
1495 ref = NULL;
1496 }
1497
1498 binder_proc_unlock(proc);
1499 if (new_ref && ref != new_ref)
1500 /*
1501 * Another thread created the ref first so
1502 * free the one we allocated
1503 */
1504 kfree(new_ref);
1505 return ret;
1506 }
1507
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1508 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1509 struct binder_transaction *t)
1510 {
1511 BUG_ON(!target_thread);
1512 assert_spin_locked(&target_thread->proc->inner_lock);
1513 BUG_ON(target_thread->transaction_stack != t);
1514 BUG_ON(target_thread->transaction_stack->from != target_thread);
1515 target_thread->transaction_stack =
1516 target_thread->transaction_stack->from_parent;
1517 t->from = NULL;
1518 }
1519
1520 /**
1521 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1522 * @thread: thread to decrement
1523 *
1524 * A thread needs to be kept alive while being used to create or
1525 * handle a transaction. binder_get_txn_from() is used to safely
1526 * extract t->from from a binder_transaction and keep the thread
1527 * indicated by t->from from being freed. When done with that
1528 * binder_thread, this function is called to decrement the
1529 * tmp_ref and free if appropriate (thread has been released
1530 * and no transaction being processed by the driver)
1531 */
binder_thread_dec_tmpref(struct binder_thread * thread)1532 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1533 {
1534 /*
1535 * atomic is used to protect the counter value while
1536 * it cannot reach zero or thread->is_dead is false
1537 */
1538 binder_inner_proc_lock(thread->proc);
1539 atomic_dec(&thread->tmp_ref);
1540 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1541 binder_inner_proc_unlock(thread->proc);
1542 binder_free_thread(thread);
1543 return;
1544 }
1545 binder_inner_proc_unlock(thread->proc);
1546 }
1547
1548 /**
1549 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1550 * @proc: proc to decrement
1551 *
1552 * A binder_proc needs to be kept alive while being used to create or
1553 * handle a transaction. proc->tmp_ref is incremented when
1554 * creating a new transaction or the binder_proc is currently in-use
1555 * by threads that are being released. When done with the binder_proc,
1556 * this function is called to decrement the counter and free the
1557 * proc if appropriate (proc has been released, all threads have
1558 * been released and not currently in-use to process a transaction).
1559 */
binder_proc_dec_tmpref(struct binder_proc * proc)1560 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1561 {
1562 binder_inner_proc_lock(proc);
1563 proc->tmp_ref--;
1564 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1565 !proc->tmp_ref) {
1566 binder_inner_proc_unlock(proc);
1567 binder_free_proc(proc);
1568 return;
1569 }
1570 binder_inner_proc_unlock(proc);
1571 }
1572
1573 /**
1574 * binder_get_txn_from() - safely extract the "from" thread in transaction
1575 * @t: binder transaction for t->from
1576 *
1577 * Atomically return the "from" thread and increment the tmp_ref
1578 * count for the thread to ensure it stays alive until
1579 * binder_thread_dec_tmpref() is called.
1580 *
1581 * Return: the value of t->from
1582 */
binder_get_txn_from(struct binder_transaction * t)1583 static struct binder_thread *binder_get_txn_from(
1584 struct binder_transaction *t)
1585 {
1586 struct binder_thread *from;
1587
1588 spin_lock(&t->lock);
1589 from = t->from;
1590 if (from)
1591 atomic_inc(&from->tmp_ref);
1592 spin_unlock(&t->lock);
1593 return from;
1594 }
1595
1596 /**
1597 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1598 * @t: binder transaction for t->from
1599 *
1600 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1601 * to guarantee that the thread cannot be released while operating on it.
1602 * The caller must call binder_inner_proc_unlock() to release the inner lock
1603 * as well as call binder_dec_thread_txn() to release the reference.
1604 *
1605 * Return: the value of t->from
1606 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1607 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1608 struct binder_transaction *t)
1609 __acquires(&t->from->proc->inner_lock)
1610 {
1611 struct binder_thread *from;
1612
1613 from = binder_get_txn_from(t);
1614 if (!from) {
1615 __acquire(&from->proc->inner_lock);
1616 return NULL;
1617 }
1618 binder_inner_proc_lock(from->proc);
1619 if (t->from) {
1620 BUG_ON(from != t->from);
1621 return from;
1622 }
1623 binder_inner_proc_unlock(from->proc);
1624 __acquire(&from->proc->inner_lock);
1625 binder_thread_dec_tmpref(from);
1626 return NULL;
1627 }
1628
1629 /**
1630 * binder_free_txn_fixups() - free unprocessed fd fixups
1631 * @t: binder transaction for t->from
1632 *
1633 * If the transaction is being torn down prior to being
1634 * processed by the target process, free all of the
1635 * fd fixups and fput the file structs. It is safe to
1636 * call this function after the fixups have been
1637 * processed -- in that case, the list will be empty.
1638 */
binder_free_txn_fixups(struct binder_transaction * t)1639 static void binder_free_txn_fixups(struct binder_transaction *t)
1640 {
1641 struct binder_txn_fd_fixup *fixup, *tmp;
1642
1643 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1644 fput(fixup->file);
1645 if (fixup->target_fd >= 0)
1646 put_unused_fd(fixup->target_fd);
1647 list_del(&fixup->fixup_entry);
1648 kfree(fixup);
1649 }
1650 }
1651
binder_txn_latency_free(struct binder_transaction * t)1652 static void binder_txn_latency_free(struct binder_transaction *t)
1653 {
1654 int from_proc, from_thread, to_proc, to_thread;
1655
1656 spin_lock(&t->lock);
1657 from_proc = t->from ? t->from->proc->pid : 0;
1658 from_thread = t->from ? t->from->pid : 0;
1659 to_proc = t->to_proc ? t->to_proc->pid : 0;
1660 to_thread = t->to_thread ? t->to_thread->pid : 0;
1661 spin_unlock(&t->lock);
1662
1663 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1664 }
1665
binder_free_transaction(struct binder_transaction * t)1666 static void binder_free_transaction(struct binder_transaction *t)
1667 {
1668 struct binder_proc *target_proc = t->to_proc;
1669
1670 if (target_proc) {
1671 binder_inner_proc_lock(target_proc);
1672 target_proc->outstanding_txns--;
1673 if (target_proc->outstanding_txns < 0)
1674 pr_warn("%s: Unexpected outstanding_txns %d\n",
1675 __func__, target_proc->outstanding_txns);
1676 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1677 wake_up_interruptible_all(&target_proc->freeze_wait);
1678 if (t->buffer)
1679 t->buffer->transaction = NULL;
1680 binder_inner_proc_unlock(target_proc);
1681 }
1682 if (trace_binder_txn_latency_free_enabled())
1683 binder_txn_latency_free(t);
1684 /*
1685 * If the transaction has no target_proc, then
1686 * t->buffer->transaction has already been cleared.
1687 */
1688 binder_free_txn_fixups(t);
1689 kfree(t);
1690 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1691 }
1692
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1693 static void binder_send_failed_reply(struct binder_transaction *t,
1694 uint32_t error_code)
1695 {
1696 struct binder_thread *target_thread;
1697 struct binder_transaction *next;
1698
1699 BUG_ON(t->flags & TF_ONE_WAY);
1700 while (1) {
1701 target_thread = binder_get_txn_from_and_acq_inner(t);
1702 if (target_thread) {
1703 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1704 "send failed reply for transaction %d to %d:%d\n",
1705 t->debug_id,
1706 target_thread->proc->pid,
1707 target_thread->pid);
1708
1709 binder_pop_transaction_ilocked(target_thread, t);
1710 if (target_thread->reply_error.cmd == BR_OK) {
1711 target_thread->reply_error.cmd = error_code;
1712 binder_enqueue_thread_work_ilocked(
1713 target_thread,
1714 &target_thread->reply_error.work);
1715 wake_up_interruptible(&target_thread->wait);
1716 } else {
1717 /*
1718 * Cannot get here for normal operation, but
1719 * we can if multiple synchronous transactions
1720 * are sent without blocking for responses.
1721 * Just ignore the 2nd error in this case.
1722 */
1723 pr_warn("Unexpected reply error: %u\n",
1724 target_thread->reply_error.cmd);
1725 }
1726 binder_inner_proc_unlock(target_thread->proc);
1727 binder_thread_dec_tmpref(target_thread);
1728 binder_free_transaction(t);
1729 return;
1730 }
1731 __release(&target_thread->proc->inner_lock);
1732 next = t->from_parent;
1733
1734 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1735 "send failed reply for transaction %d, target dead\n",
1736 t->debug_id);
1737
1738 binder_free_transaction(t);
1739 if (next == NULL) {
1740 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1741 "reply failed, no target thread at root\n");
1742 return;
1743 }
1744 t = next;
1745 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1746 "reply failed, no target thread -- retry %d\n",
1747 t->debug_id);
1748 }
1749 }
1750
1751 /**
1752 * binder_cleanup_transaction() - cleans up undelivered transaction
1753 * @t: transaction that needs to be cleaned up
1754 * @reason: reason the transaction wasn't delivered
1755 * @error_code: error to return to caller (if synchronous call)
1756 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1757 static void binder_cleanup_transaction(struct binder_transaction *t,
1758 const char *reason,
1759 uint32_t error_code)
1760 {
1761 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1762 binder_send_failed_reply(t, error_code);
1763 } else {
1764 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1765 "undelivered transaction %d, %s\n",
1766 t->debug_id, reason);
1767 binder_free_transaction(t);
1768 }
1769 }
1770
1771 /**
1772 * binder_get_object() - gets object and checks for valid metadata
1773 * @proc: binder_proc owning the buffer
1774 * @u: sender's user pointer to base of buffer
1775 * @buffer: binder_buffer that we're parsing.
1776 * @offset: offset in the @buffer at which to validate an object.
1777 * @object: struct binder_object to read into
1778 *
1779 * Copy the binder object at the given offset into @object. If @u is
1780 * provided then the copy is from the sender's buffer. If not, then
1781 * it is copied from the target's @buffer.
1782 *
1783 * Return: If there's a valid metadata object at @offset, the
1784 * size of that object. Otherwise, it returns zero. The object
1785 * is read into the struct binder_object pointed to by @object.
1786 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1787 static size_t binder_get_object(struct binder_proc *proc,
1788 const void __user *u,
1789 struct binder_buffer *buffer,
1790 unsigned long offset,
1791 struct binder_object *object)
1792 {
1793 size_t read_size;
1794 struct binder_object_header *hdr;
1795 size_t object_size = 0;
1796
1797 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1798 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1799 !IS_ALIGNED(offset, sizeof(u32)))
1800 return 0;
1801
1802 if (u) {
1803 if (copy_from_user(object, u + offset, read_size))
1804 return 0;
1805 } else {
1806 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1807 offset, read_size))
1808 return 0;
1809 }
1810
1811 /* Ok, now see if we read a complete object. */
1812 hdr = &object->hdr;
1813 switch (hdr->type) {
1814 case BINDER_TYPE_BINDER:
1815 case BINDER_TYPE_WEAK_BINDER:
1816 case BINDER_TYPE_HANDLE:
1817 case BINDER_TYPE_WEAK_HANDLE:
1818 object_size = sizeof(struct flat_binder_object);
1819 break;
1820 case BINDER_TYPE_FD:
1821 object_size = sizeof(struct binder_fd_object);
1822 break;
1823 case BINDER_TYPE_PTR:
1824 object_size = sizeof(struct binder_buffer_object);
1825 break;
1826 case BINDER_TYPE_FDA:
1827 object_size = sizeof(struct binder_fd_array_object);
1828 break;
1829 default:
1830 return 0;
1831 }
1832 if (offset <= buffer->data_size - object_size &&
1833 buffer->data_size >= object_size)
1834 return object_size;
1835 else
1836 return 0;
1837 }
1838
1839 /**
1840 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1841 * @proc: binder_proc owning the buffer
1842 * @b: binder_buffer containing the object
1843 * @object: struct binder_object to read into
1844 * @index: index in offset array at which the binder_buffer_object is
1845 * located
1846 * @start_offset: points to the start of the offset array
1847 * @object_offsetp: offset of @object read from @b
1848 * @num_valid: the number of valid offsets in the offset array
1849 *
1850 * Return: If @index is within the valid range of the offset array
1851 * described by @start and @num_valid, and if there's a valid
1852 * binder_buffer_object at the offset found in index @index
1853 * of the offset array, that object is returned. Otherwise,
1854 * %NULL is returned.
1855 * Note that the offset found in index @index itself is not
1856 * verified; this function assumes that @num_valid elements
1857 * from @start were previously verified to have valid offsets.
1858 * If @object_offsetp is non-NULL, then the offset within
1859 * @b is written to it.
1860 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1861 static struct binder_buffer_object *binder_validate_ptr(
1862 struct binder_proc *proc,
1863 struct binder_buffer *b,
1864 struct binder_object *object,
1865 binder_size_t index,
1866 binder_size_t start_offset,
1867 binder_size_t *object_offsetp,
1868 binder_size_t num_valid)
1869 {
1870 size_t object_size;
1871 binder_size_t object_offset;
1872 unsigned long buffer_offset;
1873
1874 if (index >= num_valid)
1875 return NULL;
1876
1877 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1878 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1879 b, buffer_offset,
1880 sizeof(object_offset)))
1881 return NULL;
1882 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1883 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1884 return NULL;
1885 if (object_offsetp)
1886 *object_offsetp = object_offset;
1887
1888 return &object->bbo;
1889 }
1890
1891 /**
1892 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1893 * @proc: binder_proc owning the buffer
1894 * @b: transaction buffer
1895 * @objects_start_offset: offset to start of objects buffer
1896 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1897 * @fixup_offset: start offset in @buffer to fix up
1898 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1899 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1900 *
1901 * Return: %true if a fixup in buffer @buffer at offset @offset is
1902 * allowed.
1903 *
1904 * For safety reasons, we only allow fixups inside a buffer to happen
1905 * at increasing offsets; additionally, we only allow fixup on the last
1906 * buffer object that was verified, or one of its parents.
1907 *
1908 * Example of what is allowed:
1909 *
1910 * A
1911 * B (parent = A, offset = 0)
1912 * C (parent = A, offset = 16)
1913 * D (parent = C, offset = 0)
1914 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1915 *
1916 * Examples of what is not allowed:
1917 *
1918 * Decreasing offsets within the same parent:
1919 * A
1920 * C (parent = A, offset = 16)
1921 * B (parent = A, offset = 0) // decreasing offset within A
1922 *
1923 * Referring to a parent that wasn't the last object or any of its parents:
1924 * A
1925 * B (parent = A, offset = 0)
1926 * C (parent = A, offset = 0)
1927 * C (parent = A, offset = 16)
1928 * D (parent = B, offset = 0) // B is not A or any of A's parents
1929 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1930 static bool binder_validate_fixup(struct binder_proc *proc,
1931 struct binder_buffer *b,
1932 binder_size_t objects_start_offset,
1933 binder_size_t buffer_obj_offset,
1934 binder_size_t fixup_offset,
1935 binder_size_t last_obj_offset,
1936 binder_size_t last_min_offset)
1937 {
1938 if (!last_obj_offset) {
1939 /* Nothing to fix up in */
1940 return false;
1941 }
1942
1943 while (last_obj_offset != buffer_obj_offset) {
1944 unsigned long buffer_offset;
1945 struct binder_object last_object;
1946 struct binder_buffer_object *last_bbo;
1947 size_t object_size = binder_get_object(proc, NULL, b,
1948 last_obj_offset,
1949 &last_object);
1950 if (object_size != sizeof(*last_bbo))
1951 return false;
1952
1953 last_bbo = &last_object.bbo;
1954 /*
1955 * Safe to retrieve the parent of last_obj, since it
1956 * was already previously verified by the driver.
1957 */
1958 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1959 return false;
1960 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1961 buffer_offset = objects_start_offset +
1962 sizeof(binder_size_t) * last_bbo->parent;
1963 if (binder_alloc_copy_from_buffer(&proc->alloc,
1964 &last_obj_offset,
1965 b, buffer_offset,
1966 sizeof(last_obj_offset)))
1967 return false;
1968 }
1969 return (fixup_offset >= last_min_offset);
1970 }
1971
1972 /**
1973 * struct binder_task_work_cb - for deferred close
1974 *
1975 * @twork: callback_head for task work
1976 * @file: file to close
1977 *
1978 * Structure to pass task work to be handled after
1979 * returning from binder_ioctl() via task_work_add().
1980 */
1981 struct binder_task_work_cb {
1982 struct callback_head twork;
1983 struct file *file;
1984 };
1985
1986 /**
1987 * binder_do_fd_close() - close list of file descriptors
1988 * @twork: callback head for task work
1989 *
1990 * It is not safe to call ksys_close() during the binder_ioctl()
1991 * function if there is a chance that binder's own file descriptor
1992 * might be closed. This is to meet the requirements for using
1993 * fdget() (see comments for __fget_light()). Therefore use
1994 * task_work_add() to schedule the close operation once we have
1995 * returned from binder_ioctl(). This function is a callback
1996 * for that mechanism and does the actual ksys_close() on the
1997 * given file descriptor.
1998 */
binder_do_fd_close(struct callback_head * twork)1999 static void binder_do_fd_close(struct callback_head *twork)
2000 {
2001 struct binder_task_work_cb *twcb = container_of(twork,
2002 struct binder_task_work_cb, twork);
2003
2004 fput(twcb->file);
2005 kfree(twcb);
2006 }
2007
2008 /**
2009 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2010 * @fd: file-descriptor to close
2011 *
2012 * See comments in binder_do_fd_close(). This function is used to schedule
2013 * a file-descriptor to be closed after returning from binder_ioctl().
2014 */
binder_deferred_fd_close(int fd)2015 static void binder_deferred_fd_close(int fd)
2016 {
2017 struct binder_task_work_cb *twcb;
2018
2019 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2020 if (!twcb)
2021 return;
2022 init_task_work(&twcb->twork, binder_do_fd_close);
2023 twcb->file = file_close_fd(fd);
2024 if (twcb->file) {
2025 // pin it until binder_do_fd_close(); see comments there
2026 get_file(twcb->file);
2027 filp_close(twcb->file, current->files);
2028 task_work_add(current, &twcb->twork, TWA_RESUME);
2029 } else {
2030 kfree(twcb);
2031 }
2032 }
2033
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2034 static void binder_transaction_buffer_release(struct binder_proc *proc,
2035 struct binder_thread *thread,
2036 struct binder_buffer *buffer,
2037 binder_size_t off_end_offset,
2038 bool is_failure)
2039 {
2040 int debug_id = buffer->debug_id;
2041 binder_size_t off_start_offset, buffer_offset;
2042
2043 binder_debug(BINDER_DEBUG_TRANSACTION,
2044 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2045 proc->pid, buffer->debug_id,
2046 buffer->data_size, buffer->offsets_size,
2047 (unsigned long long)off_end_offset);
2048
2049 if (buffer->target_node)
2050 binder_dec_node(buffer->target_node, 1, 0);
2051
2052 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2053
2054 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2055 buffer_offset += sizeof(binder_size_t)) {
2056 struct binder_object_header *hdr;
2057 size_t object_size = 0;
2058 struct binder_object object;
2059 binder_size_t object_offset;
2060
2061 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2062 buffer, buffer_offset,
2063 sizeof(object_offset)))
2064 object_size = binder_get_object(proc, NULL, buffer,
2065 object_offset, &object);
2066 if (object_size == 0) {
2067 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2068 debug_id, (u64)object_offset, buffer->data_size);
2069 continue;
2070 }
2071 hdr = &object.hdr;
2072 switch (hdr->type) {
2073 case BINDER_TYPE_BINDER:
2074 case BINDER_TYPE_WEAK_BINDER: {
2075 struct flat_binder_object *fp;
2076 struct binder_node *node;
2077
2078 fp = to_flat_binder_object(hdr);
2079 node = binder_get_node(proc, fp->binder);
2080 if (node == NULL) {
2081 pr_err("transaction release %d bad node %016llx\n",
2082 debug_id, (u64)fp->binder);
2083 break;
2084 }
2085 binder_debug(BINDER_DEBUG_TRANSACTION,
2086 " node %d u%016llx\n",
2087 node->debug_id, (u64)node->ptr);
2088 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2089 0);
2090 binder_put_node(node);
2091 } break;
2092 case BINDER_TYPE_HANDLE:
2093 case BINDER_TYPE_WEAK_HANDLE: {
2094 struct flat_binder_object *fp;
2095 struct binder_ref_data rdata;
2096 int ret;
2097
2098 fp = to_flat_binder_object(hdr);
2099 ret = binder_dec_ref_for_handle(proc, fp->handle,
2100 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2101
2102 if (ret) {
2103 pr_err("transaction release %d bad handle %d, ret = %d\n",
2104 debug_id, fp->handle, ret);
2105 break;
2106 }
2107 binder_debug(BINDER_DEBUG_TRANSACTION,
2108 " ref %d desc %d\n",
2109 rdata.debug_id, rdata.desc);
2110 } break;
2111
2112 case BINDER_TYPE_FD: {
2113 /*
2114 * No need to close the file here since user-space
2115 * closes it for successfully delivered
2116 * transactions. For transactions that weren't
2117 * delivered, the new fd was never allocated so
2118 * there is no need to close and the fput on the
2119 * file is done when the transaction is torn
2120 * down.
2121 */
2122 } break;
2123 case BINDER_TYPE_PTR:
2124 /*
2125 * Nothing to do here, this will get cleaned up when the
2126 * transaction buffer gets freed
2127 */
2128 break;
2129 case BINDER_TYPE_FDA: {
2130 struct binder_fd_array_object *fda;
2131 struct binder_buffer_object *parent;
2132 struct binder_object ptr_object;
2133 binder_size_t fda_offset;
2134 size_t fd_index;
2135 binder_size_t fd_buf_size;
2136 binder_size_t num_valid;
2137
2138 if (is_failure) {
2139 /*
2140 * The fd fixups have not been applied so no
2141 * fds need to be closed.
2142 */
2143 continue;
2144 }
2145
2146 num_valid = (buffer_offset - off_start_offset) /
2147 sizeof(binder_size_t);
2148 fda = to_binder_fd_array_object(hdr);
2149 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2150 fda->parent,
2151 off_start_offset,
2152 NULL,
2153 num_valid);
2154 if (!parent) {
2155 pr_err("transaction release %d bad parent offset\n",
2156 debug_id);
2157 continue;
2158 }
2159 fd_buf_size = sizeof(u32) * fda->num_fds;
2160 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2161 pr_err("transaction release %d invalid number of fds (%lld)\n",
2162 debug_id, (u64)fda->num_fds);
2163 continue;
2164 }
2165 if (fd_buf_size > parent->length ||
2166 fda->parent_offset > parent->length - fd_buf_size) {
2167 /* No space for all file descriptors here. */
2168 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2169 debug_id, (u64)fda->num_fds);
2170 continue;
2171 }
2172 /*
2173 * the source data for binder_buffer_object is visible
2174 * to user-space and the @buffer element is the user
2175 * pointer to the buffer_object containing the fd_array.
2176 * Convert the address to an offset relative to
2177 * the base of the transaction buffer.
2178 */
2179 fda_offset = parent->buffer - buffer->user_data +
2180 fda->parent_offset;
2181 for (fd_index = 0; fd_index < fda->num_fds;
2182 fd_index++) {
2183 u32 fd;
2184 int err;
2185 binder_size_t offset = fda_offset +
2186 fd_index * sizeof(fd);
2187
2188 err = binder_alloc_copy_from_buffer(
2189 &proc->alloc, &fd, buffer,
2190 offset, sizeof(fd));
2191 WARN_ON(err);
2192 if (!err) {
2193 binder_deferred_fd_close(fd);
2194 /*
2195 * Need to make sure the thread goes
2196 * back to userspace to complete the
2197 * deferred close
2198 */
2199 if (thread)
2200 thread->looper_need_return = true;
2201 }
2202 }
2203 } break;
2204 default:
2205 pr_err("transaction release %d bad object type %x\n",
2206 debug_id, hdr->type);
2207 break;
2208 }
2209 }
2210 }
2211
2212 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2213 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2214 struct binder_thread *thread,
2215 struct binder_buffer *buffer,
2216 bool is_failure)
2217 {
2218 binder_size_t off_end_offset;
2219
2220 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2221 off_end_offset += buffer->offsets_size;
2222
2223 binder_transaction_buffer_release(proc, thread, buffer,
2224 off_end_offset, is_failure);
2225 }
2226
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2227 static int binder_translate_binder(struct flat_binder_object *fp,
2228 struct binder_transaction *t,
2229 struct binder_thread *thread)
2230 {
2231 struct binder_node *node;
2232 struct binder_proc *proc = thread->proc;
2233 struct binder_proc *target_proc = t->to_proc;
2234 struct binder_ref_data rdata;
2235 int ret = 0;
2236
2237 node = binder_get_node(proc, fp->binder);
2238 if (!node) {
2239 node = binder_new_node(proc, fp);
2240 if (!node)
2241 return -ENOMEM;
2242 }
2243 if (fp->cookie != node->cookie) {
2244 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2245 proc->pid, thread->pid, (u64)fp->binder,
2246 node->debug_id, (u64)fp->cookie,
2247 (u64)node->cookie);
2248 ret = -EINVAL;
2249 goto done;
2250 }
2251 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2252 ret = -EPERM;
2253 goto done;
2254 }
2255
2256 ret = binder_inc_ref_for_node(target_proc, node,
2257 fp->hdr.type == BINDER_TYPE_BINDER,
2258 &thread->todo, &rdata);
2259 if (ret)
2260 goto done;
2261
2262 if (fp->hdr.type == BINDER_TYPE_BINDER)
2263 fp->hdr.type = BINDER_TYPE_HANDLE;
2264 else
2265 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2266 fp->binder = 0;
2267 fp->handle = rdata.desc;
2268 fp->cookie = 0;
2269
2270 trace_binder_transaction_node_to_ref(t, node, &rdata);
2271 binder_debug(BINDER_DEBUG_TRANSACTION,
2272 " node %d u%016llx -> ref %d desc %d\n",
2273 node->debug_id, (u64)node->ptr,
2274 rdata.debug_id, rdata.desc);
2275 done:
2276 binder_put_node(node);
2277 return ret;
2278 }
2279
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2280 static int binder_translate_handle(struct flat_binder_object *fp,
2281 struct binder_transaction *t,
2282 struct binder_thread *thread)
2283 {
2284 struct binder_proc *proc = thread->proc;
2285 struct binder_proc *target_proc = t->to_proc;
2286 struct binder_node *node;
2287 struct binder_ref_data src_rdata;
2288 int ret = 0;
2289
2290 node = binder_get_node_from_ref(proc, fp->handle,
2291 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2292 if (!node) {
2293 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2294 proc->pid, thread->pid, fp->handle);
2295 return -EINVAL;
2296 }
2297 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2298 ret = -EPERM;
2299 goto done;
2300 }
2301
2302 binder_node_lock(node);
2303 if (node->proc == target_proc) {
2304 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2305 fp->hdr.type = BINDER_TYPE_BINDER;
2306 else
2307 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2308 fp->binder = node->ptr;
2309 fp->cookie = node->cookie;
2310 if (node->proc)
2311 binder_inner_proc_lock(node->proc);
2312 else
2313 __acquire(&node->proc->inner_lock);
2314 binder_inc_node_nilocked(node,
2315 fp->hdr.type == BINDER_TYPE_BINDER,
2316 0, NULL);
2317 if (node->proc)
2318 binder_inner_proc_unlock(node->proc);
2319 else
2320 __release(&node->proc->inner_lock);
2321 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2322 binder_debug(BINDER_DEBUG_TRANSACTION,
2323 " ref %d desc %d -> node %d u%016llx\n",
2324 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2325 (u64)node->ptr);
2326 binder_node_unlock(node);
2327 } else {
2328 struct binder_ref_data dest_rdata;
2329
2330 binder_node_unlock(node);
2331 ret = binder_inc_ref_for_node(target_proc, node,
2332 fp->hdr.type == BINDER_TYPE_HANDLE,
2333 NULL, &dest_rdata);
2334 if (ret)
2335 goto done;
2336
2337 fp->binder = 0;
2338 fp->handle = dest_rdata.desc;
2339 fp->cookie = 0;
2340 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2341 &dest_rdata);
2342 binder_debug(BINDER_DEBUG_TRANSACTION,
2343 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2344 src_rdata.debug_id, src_rdata.desc,
2345 dest_rdata.debug_id, dest_rdata.desc,
2346 node->debug_id);
2347 }
2348 done:
2349 binder_put_node(node);
2350 return ret;
2351 }
2352
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2353 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2354 struct binder_transaction *t,
2355 struct binder_thread *thread,
2356 struct binder_transaction *in_reply_to)
2357 {
2358 struct binder_proc *proc = thread->proc;
2359 struct binder_proc *target_proc = t->to_proc;
2360 struct binder_txn_fd_fixup *fixup;
2361 struct file *file;
2362 int ret = 0;
2363 bool target_allows_fd;
2364
2365 if (in_reply_to)
2366 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2367 else
2368 target_allows_fd = t->buffer->target_node->accept_fds;
2369 if (!target_allows_fd) {
2370 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2371 proc->pid, thread->pid,
2372 in_reply_to ? "reply" : "transaction",
2373 fd);
2374 ret = -EPERM;
2375 goto err_fd_not_accepted;
2376 }
2377
2378 file = fget(fd);
2379 if (!file) {
2380 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2381 proc->pid, thread->pid, fd);
2382 ret = -EBADF;
2383 goto err_fget;
2384 }
2385 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2386 if (ret < 0) {
2387 ret = -EPERM;
2388 goto err_security;
2389 }
2390
2391 /*
2392 * Add fixup record for this transaction. The allocation
2393 * of the fd in the target needs to be done from a
2394 * target thread.
2395 */
2396 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2397 if (!fixup) {
2398 ret = -ENOMEM;
2399 goto err_alloc;
2400 }
2401 fixup->file = file;
2402 fixup->offset = fd_offset;
2403 fixup->target_fd = -1;
2404 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2405 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2406
2407 return ret;
2408
2409 err_alloc:
2410 err_security:
2411 fput(file);
2412 err_fget:
2413 err_fd_not_accepted:
2414 return ret;
2415 }
2416
2417 /**
2418 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2419 * @offset offset in target buffer to fixup
2420 * @skip_size bytes to skip in copy (fixup will be written later)
2421 * @fixup_data data to write at fixup offset
2422 * @node list node
2423 *
2424 * This is used for the pointer fixup list (pf) which is created and consumed
2425 * during binder_transaction() and is only accessed locally. No
2426 * locking is necessary.
2427 *
2428 * The list is ordered by @offset.
2429 */
2430 struct binder_ptr_fixup {
2431 binder_size_t offset;
2432 size_t skip_size;
2433 binder_uintptr_t fixup_data;
2434 struct list_head node;
2435 };
2436
2437 /**
2438 * struct binder_sg_copy - scatter-gather data to be copied
2439 * @offset offset in target buffer
2440 * @sender_uaddr user address in source buffer
2441 * @length bytes to copy
2442 * @node list node
2443 *
2444 * This is used for the sg copy list (sgc) which is created and consumed
2445 * during binder_transaction() and is only accessed locally. No
2446 * locking is necessary.
2447 *
2448 * The list is ordered by @offset.
2449 */
2450 struct binder_sg_copy {
2451 binder_size_t offset;
2452 const void __user *sender_uaddr;
2453 size_t length;
2454 struct list_head node;
2455 };
2456
2457 /**
2458 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2459 * @alloc: binder_alloc associated with @buffer
2460 * @buffer: binder buffer in target process
2461 * @sgc_head: list_head of scatter-gather copy list
2462 * @pf_head: list_head of pointer fixup list
2463 *
2464 * Processes all elements of @sgc_head, applying fixups from @pf_head
2465 * and copying the scatter-gather data from the source process' user
2466 * buffer to the target's buffer. It is expected that the list creation
2467 * and processing all occurs during binder_transaction() so these lists
2468 * are only accessed in local context.
2469 *
2470 * Return: 0=success, else -errno
2471 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2472 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2473 struct binder_buffer *buffer,
2474 struct list_head *sgc_head,
2475 struct list_head *pf_head)
2476 {
2477 int ret = 0;
2478 struct binder_sg_copy *sgc, *tmpsgc;
2479 struct binder_ptr_fixup *tmppf;
2480 struct binder_ptr_fixup *pf =
2481 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2482 node);
2483
2484 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2485 size_t bytes_copied = 0;
2486
2487 while (bytes_copied < sgc->length) {
2488 size_t copy_size;
2489 size_t bytes_left = sgc->length - bytes_copied;
2490 size_t offset = sgc->offset + bytes_copied;
2491
2492 /*
2493 * We copy up to the fixup (pointed to by pf)
2494 */
2495 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2496 : bytes_left;
2497 if (!ret && copy_size)
2498 ret = binder_alloc_copy_user_to_buffer(
2499 alloc, buffer,
2500 offset,
2501 sgc->sender_uaddr + bytes_copied,
2502 copy_size);
2503 bytes_copied += copy_size;
2504 if (copy_size != bytes_left) {
2505 BUG_ON(!pf);
2506 /* we stopped at a fixup offset */
2507 if (pf->skip_size) {
2508 /*
2509 * we are just skipping. This is for
2510 * BINDER_TYPE_FDA where the translated
2511 * fds will be fixed up when we get
2512 * to target context.
2513 */
2514 bytes_copied += pf->skip_size;
2515 } else {
2516 /* apply the fixup indicated by pf */
2517 if (!ret)
2518 ret = binder_alloc_copy_to_buffer(
2519 alloc, buffer,
2520 pf->offset,
2521 &pf->fixup_data,
2522 sizeof(pf->fixup_data));
2523 bytes_copied += sizeof(pf->fixup_data);
2524 }
2525 list_del(&pf->node);
2526 kfree(pf);
2527 pf = list_first_entry_or_null(pf_head,
2528 struct binder_ptr_fixup, node);
2529 }
2530 }
2531 list_del(&sgc->node);
2532 kfree(sgc);
2533 }
2534 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2535 BUG_ON(pf->skip_size == 0);
2536 list_del(&pf->node);
2537 kfree(pf);
2538 }
2539 BUG_ON(!list_empty(sgc_head));
2540
2541 return ret > 0 ? -EINVAL : ret;
2542 }
2543
2544 /**
2545 * binder_cleanup_deferred_txn_lists() - free specified lists
2546 * @sgc_head: list_head of scatter-gather copy list
2547 * @pf_head: list_head of pointer fixup list
2548 *
2549 * Called to clean up @sgc_head and @pf_head if there is an
2550 * error.
2551 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2552 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2553 struct list_head *pf_head)
2554 {
2555 struct binder_sg_copy *sgc, *tmpsgc;
2556 struct binder_ptr_fixup *pf, *tmppf;
2557
2558 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2559 list_del(&sgc->node);
2560 kfree(sgc);
2561 }
2562 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2563 list_del(&pf->node);
2564 kfree(pf);
2565 }
2566 }
2567
2568 /**
2569 * binder_defer_copy() - queue a scatter-gather buffer for copy
2570 * @sgc_head: list_head of scatter-gather copy list
2571 * @offset: binder buffer offset in target process
2572 * @sender_uaddr: user address in source process
2573 * @length: bytes to copy
2574 *
2575 * Specify a scatter-gather block to be copied. The actual copy must
2576 * be deferred until all the needed fixups are identified and queued.
2577 * Then the copy and fixups are done together so un-translated values
2578 * from the source are never visible in the target buffer.
2579 *
2580 * We are guaranteed that repeated calls to this function will have
2581 * monotonically increasing @offset values so the list will naturally
2582 * be ordered.
2583 *
2584 * Return: 0=success, else -errno
2585 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2586 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2587 const void __user *sender_uaddr, size_t length)
2588 {
2589 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2590
2591 if (!bc)
2592 return -ENOMEM;
2593
2594 bc->offset = offset;
2595 bc->sender_uaddr = sender_uaddr;
2596 bc->length = length;
2597 INIT_LIST_HEAD(&bc->node);
2598
2599 /*
2600 * We are guaranteed that the deferred copies are in-order
2601 * so just add to the tail.
2602 */
2603 list_add_tail(&bc->node, sgc_head);
2604
2605 return 0;
2606 }
2607
2608 /**
2609 * binder_add_fixup() - queue a fixup to be applied to sg copy
2610 * @pf_head: list_head of binder ptr fixup list
2611 * @offset: binder buffer offset in target process
2612 * @fixup: bytes to be copied for fixup
2613 * @skip_size: bytes to skip when copying (fixup will be applied later)
2614 *
2615 * Add the specified fixup to a list ordered by @offset. When copying
2616 * the scatter-gather buffers, the fixup will be copied instead of
2617 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2618 * will be applied later (in target process context), so we just skip
2619 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2620 * value in @fixup.
2621 *
2622 * This function is called *mostly* in @offset order, but there are
2623 * exceptions. Since out-of-order inserts are relatively uncommon,
2624 * we insert the new element by searching backward from the tail of
2625 * the list.
2626 *
2627 * Return: 0=success, else -errno
2628 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2629 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2630 binder_uintptr_t fixup, size_t skip_size)
2631 {
2632 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2633 struct binder_ptr_fixup *tmppf;
2634
2635 if (!pf)
2636 return -ENOMEM;
2637
2638 pf->offset = offset;
2639 pf->fixup_data = fixup;
2640 pf->skip_size = skip_size;
2641 INIT_LIST_HEAD(&pf->node);
2642
2643 /* Fixups are *mostly* added in-order, but there are some
2644 * exceptions. Look backwards through list for insertion point.
2645 */
2646 list_for_each_entry_reverse(tmppf, pf_head, node) {
2647 if (tmppf->offset < pf->offset) {
2648 list_add(&pf->node, &tmppf->node);
2649 return 0;
2650 }
2651 }
2652 /*
2653 * if we get here, then the new offset is the lowest so
2654 * insert at the head
2655 */
2656 list_add(&pf->node, pf_head);
2657 return 0;
2658 }
2659
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2660 static int binder_translate_fd_array(struct list_head *pf_head,
2661 struct binder_fd_array_object *fda,
2662 const void __user *sender_ubuffer,
2663 struct binder_buffer_object *parent,
2664 struct binder_buffer_object *sender_uparent,
2665 struct binder_transaction *t,
2666 struct binder_thread *thread,
2667 struct binder_transaction *in_reply_to)
2668 {
2669 binder_size_t fdi, fd_buf_size;
2670 binder_size_t fda_offset;
2671 const void __user *sender_ufda_base;
2672 struct binder_proc *proc = thread->proc;
2673 int ret;
2674
2675 if (fda->num_fds == 0)
2676 return 0;
2677
2678 fd_buf_size = sizeof(u32) * fda->num_fds;
2679 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2680 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2681 proc->pid, thread->pid, (u64)fda->num_fds);
2682 return -EINVAL;
2683 }
2684 if (fd_buf_size > parent->length ||
2685 fda->parent_offset > parent->length - fd_buf_size) {
2686 /* No space for all file descriptors here. */
2687 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2688 proc->pid, thread->pid, (u64)fda->num_fds);
2689 return -EINVAL;
2690 }
2691 /*
2692 * the source data for binder_buffer_object is visible
2693 * to user-space and the @buffer element is the user
2694 * pointer to the buffer_object containing the fd_array.
2695 * Convert the address to an offset relative to
2696 * the base of the transaction buffer.
2697 */
2698 fda_offset = parent->buffer - t->buffer->user_data +
2699 fda->parent_offset;
2700 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2701 fda->parent_offset;
2702
2703 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2704 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2705 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2706 proc->pid, thread->pid);
2707 return -EINVAL;
2708 }
2709 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2710 if (ret)
2711 return ret;
2712
2713 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2714 u32 fd;
2715 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2716 binder_size_t sender_uoffset = fdi * sizeof(fd);
2717
2718 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2719 if (!ret)
2720 ret = binder_translate_fd(fd, offset, t, thread,
2721 in_reply_to);
2722 if (ret)
2723 return ret > 0 ? -EINVAL : ret;
2724 }
2725 return 0;
2726 }
2727
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2728 static int binder_fixup_parent(struct list_head *pf_head,
2729 struct binder_transaction *t,
2730 struct binder_thread *thread,
2731 struct binder_buffer_object *bp,
2732 binder_size_t off_start_offset,
2733 binder_size_t num_valid,
2734 binder_size_t last_fixup_obj_off,
2735 binder_size_t last_fixup_min_off)
2736 {
2737 struct binder_buffer_object *parent;
2738 struct binder_buffer *b = t->buffer;
2739 struct binder_proc *proc = thread->proc;
2740 struct binder_proc *target_proc = t->to_proc;
2741 struct binder_object object;
2742 binder_size_t buffer_offset;
2743 binder_size_t parent_offset;
2744
2745 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2746 return 0;
2747
2748 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2749 off_start_offset, &parent_offset,
2750 num_valid);
2751 if (!parent) {
2752 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2753 proc->pid, thread->pid);
2754 return -EINVAL;
2755 }
2756
2757 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2758 parent_offset, bp->parent_offset,
2759 last_fixup_obj_off,
2760 last_fixup_min_off)) {
2761 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2762 proc->pid, thread->pid);
2763 return -EINVAL;
2764 }
2765
2766 if (parent->length < sizeof(binder_uintptr_t) ||
2767 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2768 /* No space for a pointer here! */
2769 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2770 proc->pid, thread->pid);
2771 return -EINVAL;
2772 }
2773
2774 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2775
2776 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2777 }
2778
2779 /**
2780 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2781 * @t1: the pending async txn in the frozen process
2782 * @t2: the new async txn to supersede the outdated pending one
2783 *
2784 * Return: true if t2 can supersede t1
2785 * false if t2 can not supersede t1
2786 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2787 static bool binder_can_update_transaction(struct binder_transaction *t1,
2788 struct binder_transaction *t2)
2789 {
2790 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2791 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2792 return false;
2793 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2794 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2795 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2796 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2797 return true;
2798 return false;
2799 }
2800
2801 /**
2802 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2803 * @t: new async transaction
2804 * @target_list: list to find outdated transaction
2805 *
2806 * Return: the outdated transaction if found
2807 * NULL if no outdated transacton can be found
2808 *
2809 * Requires the proc->inner_lock to be held.
2810 */
2811 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2812 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2813 struct list_head *target_list)
2814 {
2815 struct binder_work *w;
2816
2817 list_for_each_entry(w, target_list, entry) {
2818 struct binder_transaction *t_queued;
2819
2820 if (w->type != BINDER_WORK_TRANSACTION)
2821 continue;
2822 t_queued = container_of(w, struct binder_transaction, work);
2823 if (binder_can_update_transaction(t_queued, t))
2824 return t_queued;
2825 }
2826 return NULL;
2827 }
2828
2829 /**
2830 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2831 * @t: transaction to send
2832 * @proc: process to send the transaction to
2833 * @thread: thread in @proc to send the transaction to (may be NULL)
2834 *
2835 * This function queues a transaction to the specified process. It will try
2836 * to find a thread in the target process to handle the transaction and
2837 * wake it up. If no thread is found, the work is queued to the proc
2838 * waitqueue.
2839 *
2840 * If the @thread parameter is not NULL, the transaction is always queued
2841 * to the waitlist of that specific thread.
2842 *
2843 * Return: 0 if the transaction was successfully queued
2844 * BR_DEAD_REPLY if the target process or thread is dead
2845 * BR_FROZEN_REPLY if the target process or thread is frozen and
2846 * the sync transaction was rejected
2847 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2848 * and the async transaction was successfully queued
2849 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2850 static int binder_proc_transaction(struct binder_transaction *t,
2851 struct binder_proc *proc,
2852 struct binder_thread *thread)
2853 {
2854 struct binder_node *node = t->buffer->target_node;
2855 bool oneway = !!(t->flags & TF_ONE_WAY);
2856 bool pending_async = false;
2857 struct binder_transaction *t_outdated = NULL;
2858 bool frozen = false;
2859
2860 BUG_ON(!node);
2861 binder_node_lock(node);
2862 if (oneway) {
2863 BUG_ON(thread);
2864 if (node->has_async_transaction)
2865 pending_async = true;
2866 else
2867 node->has_async_transaction = true;
2868 }
2869
2870 binder_inner_proc_lock(proc);
2871 if (proc->is_frozen) {
2872 frozen = true;
2873 proc->sync_recv |= !oneway;
2874 proc->async_recv |= oneway;
2875 }
2876
2877 if ((frozen && !oneway) || proc->is_dead ||
2878 (thread && thread->is_dead)) {
2879 binder_inner_proc_unlock(proc);
2880 binder_node_unlock(node);
2881 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2882 }
2883
2884 if (!thread && !pending_async)
2885 thread = binder_select_thread_ilocked(proc);
2886
2887 if (thread) {
2888 binder_enqueue_thread_work_ilocked(thread, &t->work);
2889 } else if (!pending_async) {
2890 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2891 } else {
2892 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2893 t_outdated = binder_find_outdated_transaction_ilocked(t,
2894 &node->async_todo);
2895 if (t_outdated) {
2896 binder_debug(BINDER_DEBUG_TRANSACTION,
2897 "txn %d supersedes %d\n",
2898 t->debug_id, t_outdated->debug_id);
2899 list_del_init(&t_outdated->work.entry);
2900 proc->outstanding_txns--;
2901 }
2902 }
2903 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2904 }
2905
2906 if (!pending_async)
2907 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2908
2909 proc->outstanding_txns++;
2910 binder_inner_proc_unlock(proc);
2911 binder_node_unlock(node);
2912
2913 /*
2914 * To reduce potential contention, free the outdated transaction and
2915 * buffer after releasing the locks.
2916 */
2917 if (t_outdated) {
2918 struct binder_buffer *buffer = t_outdated->buffer;
2919
2920 t_outdated->buffer = NULL;
2921 buffer->transaction = NULL;
2922 trace_binder_transaction_update_buffer_release(buffer);
2923 binder_release_entire_buffer(proc, NULL, buffer, false);
2924 binder_alloc_free_buf(&proc->alloc, buffer);
2925 kfree(t_outdated);
2926 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2927 }
2928
2929 if (oneway && frozen)
2930 return BR_TRANSACTION_PENDING_FROZEN;
2931
2932 return 0;
2933 }
2934
2935 /**
2936 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2937 * @node: struct binder_node for which to get refs
2938 * @procp: returns @node->proc if valid
2939 * @error: if no @procp then returns BR_DEAD_REPLY
2940 *
2941 * User-space normally keeps the node alive when creating a transaction
2942 * since it has a reference to the target. The local strong ref keeps it
2943 * alive if the sending process dies before the target process processes
2944 * the transaction. If the source process is malicious or has a reference
2945 * counting bug, relying on the local strong ref can fail.
2946 *
2947 * Since user-space can cause the local strong ref to go away, we also take
2948 * a tmpref on the node to ensure it survives while we are constructing
2949 * the transaction. We also need a tmpref on the proc while we are
2950 * constructing the transaction, so we take that here as well.
2951 *
2952 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2953 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2954 * target proc has died, @error is set to BR_DEAD_REPLY.
2955 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2956 static struct binder_node *binder_get_node_refs_for_txn(
2957 struct binder_node *node,
2958 struct binder_proc **procp,
2959 uint32_t *error)
2960 {
2961 struct binder_node *target_node = NULL;
2962
2963 binder_node_inner_lock(node);
2964 if (node->proc) {
2965 target_node = node;
2966 binder_inc_node_nilocked(node, 1, 0, NULL);
2967 binder_inc_node_tmpref_ilocked(node);
2968 node->proc->tmp_ref++;
2969 *procp = node->proc;
2970 } else
2971 *error = BR_DEAD_REPLY;
2972 binder_node_inner_unlock(node);
2973
2974 return target_node;
2975 }
2976
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2977 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2978 uint32_t command, int32_t param)
2979 {
2980 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2981
2982 if (!from) {
2983 /* annotation for sparse */
2984 __release(&from->proc->inner_lock);
2985 return;
2986 }
2987
2988 /* don't override existing errors */
2989 if (from->ee.command == BR_OK)
2990 binder_set_extended_error(&from->ee, id, command, param);
2991 binder_inner_proc_unlock(from->proc);
2992 binder_thread_dec_tmpref(from);
2993 }
2994
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2995 static void binder_transaction(struct binder_proc *proc,
2996 struct binder_thread *thread,
2997 struct binder_transaction_data *tr, int reply,
2998 binder_size_t extra_buffers_size)
2999 {
3000 int ret;
3001 struct binder_transaction *t;
3002 struct binder_work *w;
3003 struct binder_work *tcomplete;
3004 binder_size_t buffer_offset = 0;
3005 binder_size_t off_start_offset, off_end_offset;
3006 binder_size_t off_min;
3007 binder_size_t sg_buf_offset, sg_buf_end_offset;
3008 binder_size_t user_offset = 0;
3009 struct binder_proc *target_proc = NULL;
3010 struct binder_thread *target_thread = NULL;
3011 struct binder_node *target_node = NULL;
3012 struct binder_transaction *in_reply_to = NULL;
3013 struct binder_transaction_log_entry *e;
3014 uint32_t return_error = 0;
3015 uint32_t return_error_param = 0;
3016 uint32_t return_error_line = 0;
3017 binder_size_t last_fixup_obj_off = 0;
3018 binder_size_t last_fixup_min_off = 0;
3019 struct binder_context *context = proc->context;
3020 int t_debug_id = atomic_inc_return(&binder_last_id);
3021 ktime_t t_start_time = ktime_get();
3022 struct lsm_context lsmctx = { };
3023 struct list_head sgc_head;
3024 struct list_head pf_head;
3025 const void __user *user_buffer = (const void __user *)
3026 (uintptr_t)tr->data.ptr.buffer;
3027 INIT_LIST_HEAD(&sgc_head);
3028 INIT_LIST_HEAD(&pf_head);
3029
3030 e = binder_transaction_log_add(&binder_transaction_log);
3031 e->debug_id = t_debug_id;
3032 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3033 e->from_proc = proc->pid;
3034 e->from_thread = thread->pid;
3035 e->target_handle = tr->target.handle;
3036 e->data_size = tr->data_size;
3037 e->offsets_size = tr->offsets_size;
3038 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3039
3040 binder_inner_proc_lock(proc);
3041 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3042 binder_inner_proc_unlock(proc);
3043
3044 if (reply) {
3045 binder_inner_proc_lock(proc);
3046 in_reply_to = thread->transaction_stack;
3047 if (in_reply_to == NULL) {
3048 binder_inner_proc_unlock(proc);
3049 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3050 proc->pid, thread->pid);
3051 return_error = BR_FAILED_REPLY;
3052 return_error_param = -EPROTO;
3053 return_error_line = __LINE__;
3054 goto err_empty_call_stack;
3055 }
3056 if (in_reply_to->to_thread != thread) {
3057 spin_lock(&in_reply_to->lock);
3058 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3059 proc->pid, thread->pid, in_reply_to->debug_id,
3060 in_reply_to->to_proc ?
3061 in_reply_to->to_proc->pid : 0,
3062 in_reply_to->to_thread ?
3063 in_reply_to->to_thread->pid : 0);
3064 spin_unlock(&in_reply_to->lock);
3065 binder_inner_proc_unlock(proc);
3066 return_error = BR_FAILED_REPLY;
3067 return_error_param = -EPROTO;
3068 return_error_line = __LINE__;
3069 in_reply_to = NULL;
3070 goto err_bad_call_stack;
3071 }
3072 thread->transaction_stack = in_reply_to->to_parent;
3073 binder_inner_proc_unlock(proc);
3074 binder_set_nice(in_reply_to->saved_priority);
3075 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3076 if (target_thread == NULL) {
3077 /* annotation for sparse */
3078 __release(&target_thread->proc->inner_lock);
3079 binder_txn_error("%d:%d reply target not found\n",
3080 thread->pid, proc->pid);
3081 return_error = BR_DEAD_REPLY;
3082 return_error_line = __LINE__;
3083 goto err_dead_binder;
3084 }
3085 if (target_thread->transaction_stack != in_reply_to) {
3086 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3087 proc->pid, thread->pid,
3088 target_thread->transaction_stack ?
3089 target_thread->transaction_stack->debug_id : 0,
3090 in_reply_to->debug_id);
3091 binder_inner_proc_unlock(target_thread->proc);
3092 return_error = BR_FAILED_REPLY;
3093 return_error_param = -EPROTO;
3094 return_error_line = __LINE__;
3095 in_reply_to = NULL;
3096 target_thread = NULL;
3097 goto err_dead_binder;
3098 }
3099 target_proc = target_thread->proc;
3100 target_proc->tmp_ref++;
3101 binder_inner_proc_unlock(target_thread->proc);
3102 } else {
3103 if (tr->target.handle) {
3104 struct binder_ref *ref;
3105
3106 /*
3107 * There must already be a strong ref
3108 * on this node. If so, do a strong
3109 * increment on the node to ensure it
3110 * stays alive until the transaction is
3111 * done.
3112 */
3113 binder_proc_lock(proc);
3114 ref = binder_get_ref_olocked(proc, tr->target.handle,
3115 true);
3116 if (ref) {
3117 target_node = binder_get_node_refs_for_txn(
3118 ref->node, &target_proc,
3119 &return_error);
3120 } else {
3121 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3122 proc->pid, thread->pid, tr->target.handle);
3123 return_error = BR_FAILED_REPLY;
3124 }
3125 binder_proc_unlock(proc);
3126 } else {
3127 mutex_lock(&context->context_mgr_node_lock);
3128 target_node = context->binder_context_mgr_node;
3129 if (target_node)
3130 target_node = binder_get_node_refs_for_txn(
3131 target_node, &target_proc,
3132 &return_error);
3133 else
3134 return_error = BR_DEAD_REPLY;
3135 mutex_unlock(&context->context_mgr_node_lock);
3136 if (target_node && target_proc->pid == proc->pid) {
3137 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3138 proc->pid, thread->pid);
3139 return_error = BR_FAILED_REPLY;
3140 return_error_param = -EINVAL;
3141 return_error_line = __LINE__;
3142 goto err_invalid_target_handle;
3143 }
3144 }
3145 if (!target_node) {
3146 binder_txn_error("%d:%d cannot find target node\n",
3147 thread->pid, proc->pid);
3148 /*
3149 * return_error is set above
3150 */
3151 return_error_param = -EINVAL;
3152 return_error_line = __LINE__;
3153 goto err_dead_binder;
3154 }
3155 e->to_node = target_node->debug_id;
3156 if (WARN_ON(proc == target_proc)) {
3157 binder_txn_error("%d:%d self transactions not allowed\n",
3158 thread->pid, proc->pid);
3159 return_error = BR_FAILED_REPLY;
3160 return_error_param = -EINVAL;
3161 return_error_line = __LINE__;
3162 goto err_invalid_target_handle;
3163 }
3164 if (security_binder_transaction(proc->cred,
3165 target_proc->cred) < 0) {
3166 binder_txn_error("%d:%d transaction credentials failed\n",
3167 thread->pid, proc->pid);
3168 return_error = BR_FAILED_REPLY;
3169 return_error_param = -EPERM;
3170 return_error_line = __LINE__;
3171 goto err_invalid_target_handle;
3172 }
3173 binder_inner_proc_lock(proc);
3174
3175 w = list_first_entry_or_null(&thread->todo,
3176 struct binder_work, entry);
3177 if (!(tr->flags & TF_ONE_WAY) && w &&
3178 w->type == BINDER_WORK_TRANSACTION) {
3179 /*
3180 * Do not allow new outgoing transaction from a
3181 * thread that has a transaction at the head of
3182 * its todo list. Only need to check the head
3183 * because binder_select_thread_ilocked picks a
3184 * thread from proc->waiting_threads to enqueue
3185 * the transaction, and nothing is queued to the
3186 * todo list while the thread is on waiting_threads.
3187 */
3188 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3189 proc->pid, thread->pid);
3190 binder_inner_proc_unlock(proc);
3191 return_error = BR_FAILED_REPLY;
3192 return_error_param = -EPROTO;
3193 return_error_line = __LINE__;
3194 goto err_bad_todo_list;
3195 }
3196
3197 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3198 struct binder_transaction *tmp;
3199
3200 tmp = thread->transaction_stack;
3201 if (tmp->to_thread != thread) {
3202 spin_lock(&tmp->lock);
3203 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3204 proc->pid, thread->pid, tmp->debug_id,
3205 tmp->to_proc ? tmp->to_proc->pid : 0,
3206 tmp->to_thread ?
3207 tmp->to_thread->pid : 0);
3208 spin_unlock(&tmp->lock);
3209 binder_inner_proc_unlock(proc);
3210 return_error = BR_FAILED_REPLY;
3211 return_error_param = -EPROTO;
3212 return_error_line = __LINE__;
3213 goto err_bad_call_stack;
3214 }
3215 while (tmp) {
3216 struct binder_thread *from;
3217
3218 spin_lock(&tmp->lock);
3219 from = tmp->from;
3220 if (from && from->proc == target_proc) {
3221 atomic_inc(&from->tmp_ref);
3222 target_thread = from;
3223 spin_unlock(&tmp->lock);
3224 break;
3225 }
3226 spin_unlock(&tmp->lock);
3227 tmp = tmp->from_parent;
3228 }
3229 }
3230 binder_inner_proc_unlock(proc);
3231 }
3232 if (target_thread)
3233 e->to_thread = target_thread->pid;
3234 e->to_proc = target_proc->pid;
3235
3236 /* TODO: reuse incoming transaction for reply */
3237 t = kzalloc(sizeof(*t), GFP_KERNEL);
3238 if (t == NULL) {
3239 binder_txn_error("%d:%d cannot allocate transaction\n",
3240 thread->pid, proc->pid);
3241 return_error = BR_FAILED_REPLY;
3242 return_error_param = -ENOMEM;
3243 return_error_line = __LINE__;
3244 goto err_alloc_t_failed;
3245 }
3246 INIT_LIST_HEAD(&t->fd_fixups);
3247 binder_stats_created(BINDER_STAT_TRANSACTION);
3248 spin_lock_init(&t->lock);
3249
3250 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3251 if (tcomplete == NULL) {
3252 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3253 thread->pid, proc->pid);
3254 return_error = BR_FAILED_REPLY;
3255 return_error_param = -ENOMEM;
3256 return_error_line = __LINE__;
3257 goto err_alloc_tcomplete_failed;
3258 }
3259 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3260
3261 t->debug_id = t_debug_id;
3262 t->start_time = t_start_time;
3263
3264 if (reply)
3265 binder_debug(BINDER_DEBUG_TRANSACTION,
3266 "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3267 proc->pid, thread->pid, t->debug_id,
3268 target_proc->pid, target_thread->pid,
3269 (u64)tr->data_size, (u64)tr->offsets_size,
3270 (u64)extra_buffers_size);
3271 else
3272 binder_debug(BINDER_DEBUG_TRANSACTION,
3273 "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3274 proc->pid, thread->pid, t->debug_id,
3275 target_proc->pid, target_node->debug_id,
3276 (u64)tr->data_size, (u64)tr->offsets_size,
3277 (u64)extra_buffers_size);
3278
3279 if (!reply && !(tr->flags & TF_ONE_WAY))
3280 t->from = thread;
3281 else
3282 t->from = NULL;
3283 t->from_pid = proc->pid;
3284 t->from_tid = thread->pid;
3285 t->sender_euid = task_euid(proc->tsk);
3286 t->to_proc = target_proc;
3287 t->to_thread = target_thread;
3288 t->code = tr->code;
3289 t->flags = tr->flags;
3290 t->priority = task_nice(current);
3291
3292 if (target_node && target_node->txn_security_ctx) {
3293 u32 secid;
3294 size_t added_size;
3295
3296 security_cred_getsecid(proc->cred, &secid);
3297 ret = security_secid_to_secctx(secid, &lsmctx);
3298 if (ret < 0) {
3299 binder_txn_error("%d:%d failed to get security context\n",
3300 thread->pid, proc->pid);
3301 return_error = BR_FAILED_REPLY;
3302 return_error_param = ret;
3303 return_error_line = __LINE__;
3304 goto err_get_secctx_failed;
3305 }
3306 added_size = ALIGN(lsmctx.len, sizeof(u64));
3307 extra_buffers_size += added_size;
3308 if (extra_buffers_size < added_size) {
3309 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3310 thread->pid, proc->pid);
3311 return_error = BR_FAILED_REPLY;
3312 return_error_param = -EINVAL;
3313 return_error_line = __LINE__;
3314 goto err_bad_extra_size;
3315 }
3316 }
3317
3318 trace_binder_transaction(reply, t, target_node);
3319
3320 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3321 tr->offsets_size, extra_buffers_size,
3322 !reply && (t->flags & TF_ONE_WAY));
3323 if (IS_ERR(t->buffer)) {
3324 char *s;
3325
3326 ret = PTR_ERR(t->buffer);
3327 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3328 : (ret == -ENOSPC) ? ": no space left"
3329 : (ret == -ENOMEM) ? ": memory allocation failed"
3330 : "";
3331 binder_txn_error("cannot allocate buffer%s", s);
3332
3333 return_error_param = PTR_ERR(t->buffer);
3334 return_error = return_error_param == -ESRCH ?
3335 BR_DEAD_REPLY : BR_FAILED_REPLY;
3336 return_error_line = __LINE__;
3337 t->buffer = NULL;
3338 goto err_binder_alloc_buf_failed;
3339 }
3340 if (lsmctx.context) {
3341 int err;
3342 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3343 ALIGN(tr->offsets_size, sizeof(void *)) +
3344 ALIGN(extra_buffers_size, sizeof(void *)) -
3345 ALIGN(lsmctx.len, sizeof(u64));
3346
3347 t->security_ctx = t->buffer->user_data + buf_offset;
3348 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3349 t->buffer, buf_offset,
3350 lsmctx.context, lsmctx.len);
3351 if (err) {
3352 t->security_ctx = 0;
3353 WARN_ON(1);
3354 }
3355 security_release_secctx(&lsmctx);
3356 lsmctx.context = NULL;
3357 }
3358 t->buffer->debug_id = t->debug_id;
3359 t->buffer->transaction = t;
3360 t->buffer->target_node = target_node;
3361 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3362 trace_binder_transaction_alloc_buf(t->buffer);
3363
3364 if (binder_alloc_copy_user_to_buffer(
3365 &target_proc->alloc,
3366 t->buffer,
3367 ALIGN(tr->data_size, sizeof(void *)),
3368 (const void __user *)
3369 (uintptr_t)tr->data.ptr.offsets,
3370 tr->offsets_size)) {
3371 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3372 proc->pid, thread->pid);
3373 return_error = BR_FAILED_REPLY;
3374 return_error_param = -EFAULT;
3375 return_error_line = __LINE__;
3376 goto err_copy_data_failed;
3377 }
3378 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3379 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3380 proc->pid, thread->pid, (u64)tr->offsets_size);
3381 return_error = BR_FAILED_REPLY;
3382 return_error_param = -EINVAL;
3383 return_error_line = __LINE__;
3384 goto err_bad_offset;
3385 }
3386 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3387 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3388 proc->pid, thread->pid,
3389 (u64)extra_buffers_size);
3390 return_error = BR_FAILED_REPLY;
3391 return_error_param = -EINVAL;
3392 return_error_line = __LINE__;
3393 goto err_bad_offset;
3394 }
3395 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3396 buffer_offset = off_start_offset;
3397 off_end_offset = off_start_offset + tr->offsets_size;
3398 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3399 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3400 ALIGN(lsmctx.len, sizeof(u64));
3401 off_min = 0;
3402 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3403 buffer_offset += sizeof(binder_size_t)) {
3404 struct binder_object_header *hdr;
3405 size_t object_size;
3406 struct binder_object object;
3407 binder_size_t object_offset;
3408 binder_size_t copy_size;
3409
3410 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3411 &object_offset,
3412 t->buffer,
3413 buffer_offset,
3414 sizeof(object_offset))) {
3415 binder_txn_error("%d:%d copy offset from buffer failed\n",
3416 thread->pid, proc->pid);
3417 return_error = BR_FAILED_REPLY;
3418 return_error_param = -EINVAL;
3419 return_error_line = __LINE__;
3420 goto err_bad_offset;
3421 }
3422
3423 /*
3424 * Copy the source user buffer up to the next object
3425 * that will be processed.
3426 */
3427 copy_size = object_offset - user_offset;
3428 if (copy_size && (user_offset > object_offset ||
3429 object_offset > tr->data_size ||
3430 binder_alloc_copy_user_to_buffer(
3431 &target_proc->alloc,
3432 t->buffer, user_offset,
3433 user_buffer + user_offset,
3434 copy_size))) {
3435 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3436 proc->pid, thread->pid);
3437 return_error = BR_FAILED_REPLY;
3438 return_error_param = -EFAULT;
3439 return_error_line = __LINE__;
3440 goto err_copy_data_failed;
3441 }
3442 object_size = binder_get_object(target_proc, user_buffer,
3443 t->buffer, object_offset, &object);
3444 if (object_size == 0 || object_offset < off_min) {
3445 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3446 proc->pid, thread->pid,
3447 (u64)object_offset,
3448 (u64)off_min,
3449 (u64)t->buffer->data_size);
3450 return_error = BR_FAILED_REPLY;
3451 return_error_param = -EINVAL;
3452 return_error_line = __LINE__;
3453 goto err_bad_offset;
3454 }
3455 /*
3456 * Set offset to the next buffer fragment to be
3457 * copied
3458 */
3459 user_offset = object_offset + object_size;
3460
3461 hdr = &object.hdr;
3462 off_min = object_offset + object_size;
3463 switch (hdr->type) {
3464 case BINDER_TYPE_BINDER:
3465 case BINDER_TYPE_WEAK_BINDER: {
3466 struct flat_binder_object *fp;
3467
3468 fp = to_flat_binder_object(hdr);
3469 ret = binder_translate_binder(fp, t, thread);
3470
3471 if (ret < 0 ||
3472 binder_alloc_copy_to_buffer(&target_proc->alloc,
3473 t->buffer,
3474 object_offset,
3475 fp, sizeof(*fp))) {
3476 binder_txn_error("%d:%d translate binder failed\n",
3477 thread->pid, proc->pid);
3478 return_error = BR_FAILED_REPLY;
3479 return_error_param = ret;
3480 return_error_line = __LINE__;
3481 goto err_translate_failed;
3482 }
3483 } break;
3484 case BINDER_TYPE_HANDLE:
3485 case BINDER_TYPE_WEAK_HANDLE: {
3486 struct flat_binder_object *fp;
3487
3488 fp = to_flat_binder_object(hdr);
3489 ret = binder_translate_handle(fp, t, thread);
3490 if (ret < 0 ||
3491 binder_alloc_copy_to_buffer(&target_proc->alloc,
3492 t->buffer,
3493 object_offset,
3494 fp, sizeof(*fp))) {
3495 binder_txn_error("%d:%d translate handle failed\n",
3496 thread->pid, proc->pid);
3497 return_error = BR_FAILED_REPLY;
3498 return_error_param = ret;
3499 return_error_line = __LINE__;
3500 goto err_translate_failed;
3501 }
3502 } break;
3503
3504 case BINDER_TYPE_FD: {
3505 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3506 binder_size_t fd_offset = object_offset +
3507 (uintptr_t)&fp->fd - (uintptr_t)fp;
3508 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3509 thread, in_reply_to);
3510
3511 fp->pad_binder = 0;
3512 if (ret < 0 ||
3513 binder_alloc_copy_to_buffer(&target_proc->alloc,
3514 t->buffer,
3515 object_offset,
3516 fp, sizeof(*fp))) {
3517 binder_txn_error("%d:%d translate fd failed\n",
3518 thread->pid, proc->pid);
3519 return_error = BR_FAILED_REPLY;
3520 return_error_param = ret;
3521 return_error_line = __LINE__;
3522 goto err_translate_failed;
3523 }
3524 } break;
3525 case BINDER_TYPE_FDA: {
3526 struct binder_object ptr_object;
3527 binder_size_t parent_offset;
3528 struct binder_object user_object;
3529 size_t user_parent_size;
3530 struct binder_fd_array_object *fda =
3531 to_binder_fd_array_object(hdr);
3532 size_t num_valid = (buffer_offset - off_start_offset) /
3533 sizeof(binder_size_t);
3534 struct binder_buffer_object *parent =
3535 binder_validate_ptr(target_proc, t->buffer,
3536 &ptr_object, fda->parent,
3537 off_start_offset,
3538 &parent_offset,
3539 num_valid);
3540 if (!parent) {
3541 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3542 proc->pid, thread->pid);
3543 return_error = BR_FAILED_REPLY;
3544 return_error_param = -EINVAL;
3545 return_error_line = __LINE__;
3546 goto err_bad_parent;
3547 }
3548 if (!binder_validate_fixup(target_proc, t->buffer,
3549 off_start_offset,
3550 parent_offset,
3551 fda->parent_offset,
3552 last_fixup_obj_off,
3553 last_fixup_min_off)) {
3554 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3555 proc->pid, thread->pid);
3556 return_error = BR_FAILED_REPLY;
3557 return_error_param = -EINVAL;
3558 return_error_line = __LINE__;
3559 goto err_bad_parent;
3560 }
3561 /*
3562 * We need to read the user version of the parent
3563 * object to get the original user offset
3564 */
3565 user_parent_size =
3566 binder_get_object(proc, user_buffer, t->buffer,
3567 parent_offset, &user_object);
3568 if (user_parent_size != sizeof(user_object.bbo)) {
3569 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3570 proc->pid, thread->pid,
3571 user_parent_size,
3572 sizeof(user_object.bbo));
3573 return_error = BR_FAILED_REPLY;
3574 return_error_param = -EINVAL;
3575 return_error_line = __LINE__;
3576 goto err_bad_parent;
3577 }
3578 ret = binder_translate_fd_array(&pf_head, fda,
3579 user_buffer, parent,
3580 &user_object.bbo, t,
3581 thread, in_reply_to);
3582 if (!ret)
3583 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3584 t->buffer,
3585 object_offset,
3586 fda, sizeof(*fda));
3587 if (ret) {
3588 binder_txn_error("%d:%d translate fd array failed\n",
3589 thread->pid, proc->pid);
3590 return_error = BR_FAILED_REPLY;
3591 return_error_param = ret > 0 ? -EINVAL : ret;
3592 return_error_line = __LINE__;
3593 goto err_translate_failed;
3594 }
3595 last_fixup_obj_off = parent_offset;
3596 last_fixup_min_off =
3597 fda->parent_offset + sizeof(u32) * fda->num_fds;
3598 } break;
3599 case BINDER_TYPE_PTR: {
3600 struct binder_buffer_object *bp =
3601 to_binder_buffer_object(hdr);
3602 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3603 size_t num_valid;
3604
3605 if (bp->length > buf_left) {
3606 binder_user_error("%d:%d got transaction with too large buffer\n",
3607 proc->pid, thread->pid);
3608 return_error = BR_FAILED_REPLY;
3609 return_error_param = -EINVAL;
3610 return_error_line = __LINE__;
3611 goto err_bad_offset;
3612 }
3613 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3614 (const void __user *)(uintptr_t)bp->buffer,
3615 bp->length);
3616 if (ret) {
3617 binder_txn_error("%d:%d deferred copy failed\n",
3618 thread->pid, proc->pid);
3619 return_error = BR_FAILED_REPLY;
3620 return_error_param = ret;
3621 return_error_line = __LINE__;
3622 goto err_translate_failed;
3623 }
3624 /* Fixup buffer pointer to target proc address space */
3625 bp->buffer = t->buffer->user_data + sg_buf_offset;
3626 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3627
3628 num_valid = (buffer_offset - off_start_offset) /
3629 sizeof(binder_size_t);
3630 ret = binder_fixup_parent(&pf_head, t,
3631 thread, bp,
3632 off_start_offset,
3633 num_valid,
3634 last_fixup_obj_off,
3635 last_fixup_min_off);
3636 if (ret < 0 ||
3637 binder_alloc_copy_to_buffer(&target_proc->alloc,
3638 t->buffer,
3639 object_offset,
3640 bp, sizeof(*bp))) {
3641 binder_txn_error("%d:%d failed to fixup parent\n",
3642 thread->pid, proc->pid);
3643 return_error = BR_FAILED_REPLY;
3644 return_error_param = ret;
3645 return_error_line = __LINE__;
3646 goto err_translate_failed;
3647 }
3648 last_fixup_obj_off = object_offset;
3649 last_fixup_min_off = 0;
3650 } break;
3651 default:
3652 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3653 proc->pid, thread->pid, hdr->type);
3654 return_error = BR_FAILED_REPLY;
3655 return_error_param = -EINVAL;
3656 return_error_line = __LINE__;
3657 goto err_bad_object_type;
3658 }
3659 }
3660 /* Done processing objects, copy the rest of the buffer */
3661 if (binder_alloc_copy_user_to_buffer(
3662 &target_proc->alloc,
3663 t->buffer, user_offset,
3664 user_buffer + user_offset,
3665 tr->data_size - user_offset)) {
3666 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3667 proc->pid, thread->pid);
3668 return_error = BR_FAILED_REPLY;
3669 return_error_param = -EFAULT;
3670 return_error_line = __LINE__;
3671 goto err_copy_data_failed;
3672 }
3673
3674 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3675 &sgc_head, &pf_head);
3676 if (ret) {
3677 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3678 proc->pid, thread->pid);
3679 return_error = BR_FAILED_REPLY;
3680 return_error_param = ret;
3681 return_error_line = __LINE__;
3682 goto err_copy_data_failed;
3683 }
3684 if (t->buffer->oneway_spam_suspect)
3685 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3686 else
3687 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3688 t->work.type = BINDER_WORK_TRANSACTION;
3689
3690 if (reply) {
3691 binder_enqueue_thread_work(thread, tcomplete);
3692 binder_inner_proc_lock(target_proc);
3693 if (target_thread->is_dead) {
3694 return_error = BR_DEAD_REPLY;
3695 binder_inner_proc_unlock(target_proc);
3696 goto err_dead_proc_or_thread;
3697 }
3698 BUG_ON(t->buffer->async_transaction != 0);
3699 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3700 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3701 target_proc->outstanding_txns++;
3702 binder_inner_proc_unlock(target_proc);
3703 wake_up_interruptible_sync(&target_thread->wait);
3704 binder_free_transaction(in_reply_to);
3705 } else if (!(t->flags & TF_ONE_WAY)) {
3706 BUG_ON(t->buffer->async_transaction != 0);
3707 binder_inner_proc_lock(proc);
3708 /*
3709 * Defer the TRANSACTION_COMPLETE, so we don't return to
3710 * userspace immediately; this allows the target process to
3711 * immediately start processing this transaction, reducing
3712 * latency. We will then return the TRANSACTION_COMPLETE when
3713 * the target replies (or there is an error).
3714 */
3715 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3716 t->need_reply = 1;
3717 t->from_parent = thread->transaction_stack;
3718 thread->transaction_stack = t;
3719 binder_inner_proc_unlock(proc);
3720 return_error = binder_proc_transaction(t,
3721 target_proc, target_thread);
3722 if (return_error) {
3723 binder_inner_proc_lock(proc);
3724 binder_pop_transaction_ilocked(thread, t);
3725 binder_inner_proc_unlock(proc);
3726 goto err_dead_proc_or_thread;
3727 }
3728 } else {
3729 BUG_ON(target_node == NULL);
3730 BUG_ON(t->buffer->async_transaction != 1);
3731 return_error = binder_proc_transaction(t, target_proc, NULL);
3732 /*
3733 * Let the caller know when async transaction reaches a frozen
3734 * process and is put in a pending queue, waiting for the target
3735 * process to be unfrozen.
3736 */
3737 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3738 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3739 binder_enqueue_thread_work(thread, tcomplete);
3740 if (return_error &&
3741 return_error != BR_TRANSACTION_PENDING_FROZEN)
3742 goto err_dead_proc_or_thread;
3743 }
3744 if (target_thread)
3745 binder_thread_dec_tmpref(target_thread);
3746 binder_proc_dec_tmpref(target_proc);
3747 if (target_node)
3748 binder_dec_node_tmpref(target_node);
3749 /*
3750 * write barrier to synchronize with initialization
3751 * of log entry
3752 */
3753 smp_wmb();
3754 WRITE_ONCE(e->debug_id_done, t_debug_id);
3755 return;
3756
3757 err_dead_proc_or_thread:
3758 binder_txn_error("%d:%d dead process or thread\n",
3759 thread->pid, proc->pid);
3760 return_error_line = __LINE__;
3761 binder_dequeue_work(proc, tcomplete);
3762 err_translate_failed:
3763 err_bad_object_type:
3764 err_bad_offset:
3765 err_bad_parent:
3766 err_copy_data_failed:
3767 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3768 binder_free_txn_fixups(t);
3769 trace_binder_transaction_failed_buffer_release(t->buffer);
3770 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3771 buffer_offset, true);
3772 if (target_node)
3773 binder_dec_node_tmpref(target_node);
3774 target_node = NULL;
3775 t->buffer->transaction = NULL;
3776 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3777 err_binder_alloc_buf_failed:
3778 err_bad_extra_size:
3779 if (lsmctx.context)
3780 security_release_secctx(&lsmctx);
3781 err_get_secctx_failed:
3782 kfree(tcomplete);
3783 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3784 err_alloc_tcomplete_failed:
3785 if (trace_binder_txn_latency_free_enabled())
3786 binder_txn_latency_free(t);
3787 kfree(t);
3788 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3789 err_alloc_t_failed:
3790 err_bad_todo_list:
3791 err_bad_call_stack:
3792 err_empty_call_stack:
3793 err_dead_binder:
3794 err_invalid_target_handle:
3795 if (target_node) {
3796 binder_dec_node(target_node, 1, 0);
3797 binder_dec_node_tmpref(target_node);
3798 }
3799
3800 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3801 "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3802 proc->pid, thread->pid, reply ? "reply" :
3803 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3804 target_proc ? target_proc->pid : 0,
3805 target_thread ? target_thread->pid : 0,
3806 t_debug_id, return_error, return_error_param,
3807 tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3808 return_error_line);
3809
3810 if (target_thread)
3811 binder_thread_dec_tmpref(target_thread);
3812 if (target_proc)
3813 binder_proc_dec_tmpref(target_proc);
3814
3815 {
3816 struct binder_transaction_log_entry *fe;
3817
3818 e->return_error = return_error;
3819 e->return_error_param = return_error_param;
3820 e->return_error_line = return_error_line;
3821 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3822 *fe = *e;
3823 /*
3824 * write barrier to synchronize with initialization
3825 * of log entry
3826 */
3827 smp_wmb();
3828 WRITE_ONCE(e->debug_id_done, t_debug_id);
3829 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3830 }
3831
3832 BUG_ON(thread->return_error.cmd != BR_OK);
3833 if (in_reply_to) {
3834 binder_set_txn_from_error(in_reply_to, t_debug_id,
3835 return_error, return_error_param);
3836 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3837 binder_enqueue_thread_work(thread, &thread->return_error.work);
3838 binder_send_failed_reply(in_reply_to, return_error);
3839 } else {
3840 binder_inner_proc_lock(proc);
3841 binder_set_extended_error(&thread->ee, t_debug_id,
3842 return_error, return_error_param);
3843 binder_inner_proc_unlock(proc);
3844 thread->return_error.cmd = return_error;
3845 binder_enqueue_thread_work(thread, &thread->return_error.work);
3846 }
3847 }
3848
3849 static int
binder_request_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3850 binder_request_freeze_notification(struct binder_proc *proc,
3851 struct binder_thread *thread,
3852 struct binder_handle_cookie *handle_cookie)
3853 {
3854 struct binder_ref_freeze *freeze;
3855 struct binder_ref *ref;
3856
3857 freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3858 if (!freeze)
3859 return -ENOMEM;
3860 binder_proc_lock(proc);
3861 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3862 if (!ref) {
3863 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3864 proc->pid, thread->pid, handle_cookie->handle);
3865 binder_proc_unlock(proc);
3866 kfree(freeze);
3867 return -EINVAL;
3868 }
3869
3870 binder_node_lock(ref->node);
3871 if (ref->freeze) {
3872 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3873 proc->pid, thread->pid);
3874 binder_node_unlock(ref->node);
3875 binder_proc_unlock(proc);
3876 kfree(freeze);
3877 return -EINVAL;
3878 }
3879
3880 binder_stats_created(BINDER_STAT_FREEZE);
3881 INIT_LIST_HEAD(&freeze->work.entry);
3882 freeze->cookie = handle_cookie->cookie;
3883 freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3884 ref->freeze = freeze;
3885
3886 if (ref->node->proc) {
3887 binder_inner_proc_lock(ref->node->proc);
3888 freeze->is_frozen = ref->node->proc->is_frozen;
3889 binder_inner_proc_unlock(ref->node->proc);
3890
3891 binder_inner_proc_lock(proc);
3892 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3893 binder_wakeup_proc_ilocked(proc);
3894 binder_inner_proc_unlock(proc);
3895 }
3896
3897 binder_node_unlock(ref->node);
3898 binder_proc_unlock(proc);
3899 return 0;
3900 }
3901
3902 static int
binder_clear_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3903 binder_clear_freeze_notification(struct binder_proc *proc,
3904 struct binder_thread *thread,
3905 struct binder_handle_cookie *handle_cookie)
3906 {
3907 struct binder_ref_freeze *freeze;
3908 struct binder_ref *ref;
3909
3910 binder_proc_lock(proc);
3911 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3912 if (!ref) {
3913 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3914 proc->pid, thread->pid, handle_cookie->handle);
3915 binder_proc_unlock(proc);
3916 return -EINVAL;
3917 }
3918
3919 binder_node_lock(ref->node);
3920
3921 if (!ref->freeze) {
3922 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3923 proc->pid, thread->pid);
3924 binder_node_unlock(ref->node);
3925 binder_proc_unlock(proc);
3926 return -EINVAL;
3927 }
3928 freeze = ref->freeze;
3929 binder_inner_proc_lock(proc);
3930 if (freeze->cookie != handle_cookie->cookie) {
3931 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3932 proc->pid, thread->pid, (u64)freeze->cookie,
3933 (u64)handle_cookie->cookie);
3934 binder_inner_proc_unlock(proc);
3935 binder_node_unlock(ref->node);
3936 binder_proc_unlock(proc);
3937 return -EINVAL;
3938 }
3939 ref->freeze = NULL;
3940 /*
3941 * Take the existing freeze object and overwrite its work type. There are three cases here:
3942 * 1. No pending notification. In this case just add the work to the queue.
3943 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3944 * should resend with the new work type.
3945 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3946 * needs to be done here.
3947 */
3948 freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
3949 if (list_empty(&freeze->work.entry)) {
3950 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3951 binder_wakeup_proc_ilocked(proc);
3952 } else if (freeze->sent) {
3953 freeze->resend = true;
3954 }
3955 binder_inner_proc_unlock(proc);
3956 binder_node_unlock(ref->node);
3957 binder_proc_unlock(proc);
3958 return 0;
3959 }
3960
3961 static int
binder_freeze_notification_done(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t cookie)3962 binder_freeze_notification_done(struct binder_proc *proc,
3963 struct binder_thread *thread,
3964 binder_uintptr_t cookie)
3965 {
3966 struct binder_ref_freeze *freeze = NULL;
3967 struct binder_work *w;
3968
3969 binder_inner_proc_lock(proc);
3970 list_for_each_entry(w, &proc->delivered_freeze, entry) {
3971 struct binder_ref_freeze *tmp_freeze =
3972 container_of(w, struct binder_ref_freeze, work);
3973
3974 if (tmp_freeze->cookie == cookie) {
3975 freeze = tmp_freeze;
3976 break;
3977 }
3978 }
3979 if (!freeze) {
3980 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3981 proc->pid, thread->pid, (u64)cookie);
3982 binder_inner_proc_unlock(proc);
3983 return -EINVAL;
3984 }
3985 binder_dequeue_work_ilocked(&freeze->work);
3986 freeze->sent = false;
3987 if (freeze->resend) {
3988 freeze->resend = false;
3989 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3990 binder_wakeup_proc_ilocked(proc);
3991 }
3992 binder_inner_proc_unlock(proc);
3993 return 0;
3994 }
3995
3996 /**
3997 * binder_free_buf() - free the specified buffer
3998 * @proc: binder proc that owns buffer
3999 * @buffer: buffer to be freed
4000 * @is_failure: failed to send transaction
4001 *
4002 * If buffer for an async transaction, enqueue the next async
4003 * transaction from the node.
4004 *
4005 * Cleanup buffer and free it.
4006 */
4007 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)4008 binder_free_buf(struct binder_proc *proc,
4009 struct binder_thread *thread,
4010 struct binder_buffer *buffer, bool is_failure)
4011 {
4012 binder_inner_proc_lock(proc);
4013 if (buffer->transaction) {
4014 buffer->transaction->buffer = NULL;
4015 buffer->transaction = NULL;
4016 }
4017 binder_inner_proc_unlock(proc);
4018 if (buffer->async_transaction && buffer->target_node) {
4019 struct binder_node *buf_node;
4020 struct binder_work *w;
4021
4022 buf_node = buffer->target_node;
4023 binder_node_inner_lock(buf_node);
4024 BUG_ON(!buf_node->has_async_transaction);
4025 BUG_ON(buf_node->proc != proc);
4026 w = binder_dequeue_work_head_ilocked(
4027 &buf_node->async_todo);
4028 if (!w) {
4029 buf_node->has_async_transaction = false;
4030 } else {
4031 binder_enqueue_work_ilocked(
4032 w, &proc->todo);
4033 binder_wakeup_proc_ilocked(proc);
4034 }
4035 binder_node_inner_unlock(buf_node);
4036 }
4037 trace_binder_transaction_buffer_release(buffer);
4038 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4039 binder_alloc_free_buf(&proc->alloc, buffer);
4040 }
4041
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)4042 static int binder_thread_write(struct binder_proc *proc,
4043 struct binder_thread *thread,
4044 binder_uintptr_t binder_buffer, size_t size,
4045 binder_size_t *consumed)
4046 {
4047 uint32_t cmd;
4048 struct binder_context *context = proc->context;
4049 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4050 void __user *ptr = buffer + *consumed;
4051 void __user *end = buffer + size;
4052
4053 while (ptr < end && thread->return_error.cmd == BR_OK) {
4054 int ret;
4055
4056 if (get_user(cmd, (uint32_t __user *)ptr))
4057 return -EFAULT;
4058 ptr += sizeof(uint32_t);
4059 trace_binder_command(cmd);
4060 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4061 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4062 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4063 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4064 }
4065 switch (cmd) {
4066 case BC_INCREFS:
4067 case BC_ACQUIRE:
4068 case BC_RELEASE:
4069 case BC_DECREFS: {
4070 uint32_t target;
4071 const char *debug_string;
4072 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4073 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4074 struct binder_ref_data rdata;
4075
4076 if (get_user(target, (uint32_t __user *)ptr))
4077 return -EFAULT;
4078
4079 ptr += sizeof(uint32_t);
4080 ret = -1;
4081 if (increment && !target) {
4082 struct binder_node *ctx_mgr_node;
4083
4084 mutex_lock(&context->context_mgr_node_lock);
4085 ctx_mgr_node = context->binder_context_mgr_node;
4086 if (ctx_mgr_node) {
4087 if (ctx_mgr_node->proc == proc) {
4088 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4089 proc->pid, thread->pid);
4090 mutex_unlock(&context->context_mgr_node_lock);
4091 return -EINVAL;
4092 }
4093 ret = binder_inc_ref_for_node(
4094 proc, ctx_mgr_node,
4095 strong, NULL, &rdata);
4096 }
4097 mutex_unlock(&context->context_mgr_node_lock);
4098 }
4099 if (ret)
4100 ret = binder_update_ref_for_handle(
4101 proc, target, increment, strong,
4102 &rdata);
4103 if (!ret && rdata.desc != target) {
4104 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4105 proc->pid, thread->pid,
4106 target, rdata.desc);
4107 }
4108 switch (cmd) {
4109 case BC_INCREFS:
4110 debug_string = "IncRefs";
4111 break;
4112 case BC_ACQUIRE:
4113 debug_string = "Acquire";
4114 break;
4115 case BC_RELEASE:
4116 debug_string = "Release";
4117 break;
4118 case BC_DECREFS:
4119 default:
4120 debug_string = "DecRefs";
4121 break;
4122 }
4123 if (ret) {
4124 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4125 proc->pid, thread->pid, debug_string,
4126 strong, target, ret);
4127 break;
4128 }
4129 binder_debug(BINDER_DEBUG_USER_REFS,
4130 "%d:%d %s ref %d desc %d s %d w %d\n",
4131 proc->pid, thread->pid, debug_string,
4132 rdata.debug_id, rdata.desc, rdata.strong,
4133 rdata.weak);
4134 break;
4135 }
4136 case BC_INCREFS_DONE:
4137 case BC_ACQUIRE_DONE: {
4138 binder_uintptr_t node_ptr;
4139 binder_uintptr_t cookie;
4140 struct binder_node *node;
4141 bool free_node;
4142
4143 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4144 return -EFAULT;
4145 ptr += sizeof(binder_uintptr_t);
4146 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4147 return -EFAULT;
4148 ptr += sizeof(binder_uintptr_t);
4149 node = binder_get_node(proc, node_ptr);
4150 if (node == NULL) {
4151 binder_user_error("%d:%d %s u%016llx no match\n",
4152 proc->pid, thread->pid,
4153 cmd == BC_INCREFS_DONE ?
4154 "BC_INCREFS_DONE" :
4155 "BC_ACQUIRE_DONE",
4156 (u64)node_ptr);
4157 break;
4158 }
4159 if (cookie != node->cookie) {
4160 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4161 proc->pid, thread->pid,
4162 cmd == BC_INCREFS_DONE ?
4163 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4164 (u64)node_ptr, node->debug_id,
4165 (u64)cookie, (u64)node->cookie);
4166 binder_put_node(node);
4167 break;
4168 }
4169 binder_node_inner_lock(node);
4170 if (cmd == BC_ACQUIRE_DONE) {
4171 if (node->pending_strong_ref == 0) {
4172 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4173 proc->pid, thread->pid,
4174 node->debug_id);
4175 binder_node_inner_unlock(node);
4176 binder_put_node(node);
4177 break;
4178 }
4179 node->pending_strong_ref = 0;
4180 } else {
4181 if (node->pending_weak_ref == 0) {
4182 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4183 proc->pid, thread->pid,
4184 node->debug_id);
4185 binder_node_inner_unlock(node);
4186 binder_put_node(node);
4187 break;
4188 }
4189 node->pending_weak_ref = 0;
4190 }
4191 free_node = binder_dec_node_nilocked(node,
4192 cmd == BC_ACQUIRE_DONE, 0);
4193 WARN_ON(free_node);
4194 binder_debug(BINDER_DEBUG_USER_REFS,
4195 "%d:%d %s node %d ls %d lw %d tr %d\n",
4196 proc->pid, thread->pid,
4197 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4198 node->debug_id, node->local_strong_refs,
4199 node->local_weak_refs, node->tmp_refs);
4200 binder_node_inner_unlock(node);
4201 binder_put_node(node);
4202 break;
4203 }
4204 case BC_ATTEMPT_ACQUIRE:
4205 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4206 return -EINVAL;
4207 case BC_ACQUIRE_RESULT:
4208 pr_err("BC_ACQUIRE_RESULT not supported\n");
4209 return -EINVAL;
4210
4211 case BC_FREE_BUFFER: {
4212 binder_uintptr_t data_ptr;
4213 struct binder_buffer *buffer;
4214
4215 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4216 return -EFAULT;
4217 ptr += sizeof(binder_uintptr_t);
4218
4219 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4220 data_ptr);
4221 if (IS_ERR_OR_NULL(buffer)) {
4222 if (PTR_ERR(buffer) == -EPERM) {
4223 binder_user_error(
4224 "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4225 proc->pid, thread->pid,
4226 (unsigned long)data_ptr - proc->alloc.vm_start);
4227 } else {
4228 binder_user_error(
4229 "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4230 proc->pid, thread->pid,
4231 (unsigned long)data_ptr - proc->alloc.vm_start);
4232 }
4233 break;
4234 }
4235 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4236 "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4237 proc->pid, thread->pid,
4238 (unsigned long)data_ptr - proc->alloc.vm_start,
4239 buffer->debug_id,
4240 buffer->transaction ? "active" : "finished");
4241 binder_free_buf(proc, thread, buffer, false);
4242 break;
4243 }
4244
4245 case BC_TRANSACTION_SG:
4246 case BC_REPLY_SG: {
4247 struct binder_transaction_data_sg tr;
4248
4249 if (copy_from_user(&tr, ptr, sizeof(tr)))
4250 return -EFAULT;
4251 ptr += sizeof(tr);
4252 binder_transaction(proc, thread, &tr.transaction_data,
4253 cmd == BC_REPLY_SG, tr.buffers_size);
4254 break;
4255 }
4256 case BC_TRANSACTION:
4257 case BC_REPLY: {
4258 struct binder_transaction_data tr;
4259
4260 if (copy_from_user(&tr, ptr, sizeof(tr)))
4261 return -EFAULT;
4262 ptr += sizeof(tr);
4263 binder_transaction(proc, thread, &tr,
4264 cmd == BC_REPLY, 0);
4265 break;
4266 }
4267
4268 case BC_REGISTER_LOOPER:
4269 binder_debug(BINDER_DEBUG_THREADS,
4270 "%d:%d BC_REGISTER_LOOPER\n",
4271 proc->pid, thread->pid);
4272 binder_inner_proc_lock(proc);
4273 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4274 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4275 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4276 proc->pid, thread->pid);
4277 } else if (proc->requested_threads == 0) {
4278 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4279 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4280 proc->pid, thread->pid);
4281 } else {
4282 proc->requested_threads--;
4283 proc->requested_threads_started++;
4284 }
4285 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4286 binder_inner_proc_unlock(proc);
4287 break;
4288 case BC_ENTER_LOOPER:
4289 binder_debug(BINDER_DEBUG_THREADS,
4290 "%d:%d BC_ENTER_LOOPER\n",
4291 proc->pid, thread->pid);
4292 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4293 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4294 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4295 proc->pid, thread->pid);
4296 }
4297 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4298 break;
4299 case BC_EXIT_LOOPER:
4300 binder_debug(BINDER_DEBUG_THREADS,
4301 "%d:%d BC_EXIT_LOOPER\n",
4302 proc->pid, thread->pid);
4303 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4304 break;
4305
4306 case BC_REQUEST_DEATH_NOTIFICATION:
4307 case BC_CLEAR_DEATH_NOTIFICATION: {
4308 uint32_t target;
4309 binder_uintptr_t cookie;
4310 struct binder_ref *ref;
4311 struct binder_ref_death *death = NULL;
4312
4313 if (get_user(target, (uint32_t __user *)ptr))
4314 return -EFAULT;
4315 ptr += sizeof(uint32_t);
4316 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4317 return -EFAULT;
4318 ptr += sizeof(binder_uintptr_t);
4319 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4320 /*
4321 * Allocate memory for death notification
4322 * before taking lock
4323 */
4324 death = kzalloc(sizeof(*death), GFP_KERNEL);
4325 if (death == NULL) {
4326 WARN_ON(thread->return_error.cmd !=
4327 BR_OK);
4328 thread->return_error.cmd = BR_ERROR;
4329 binder_enqueue_thread_work(
4330 thread,
4331 &thread->return_error.work);
4332 binder_debug(
4333 BINDER_DEBUG_FAILED_TRANSACTION,
4334 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4335 proc->pid, thread->pid);
4336 break;
4337 }
4338 }
4339 binder_proc_lock(proc);
4340 ref = binder_get_ref_olocked(proc, target, false);
4341 if (ref == NULL) {
4342 binder_user_error("%d:%d %s invalid ref %d\n",
4343 proc->pid, thread->pid,
4344 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4345 "BC_REQUEST_DEATH_NOTIFICATION" :
4346 "BC_CLEAR_DEATH_NOTIFICATION",
4347 target);
4348 binder_proc_unlock(proc);
4349 kfree(death);
4350 break;
4351 }
4352
4353 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4354 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4355 proc->pid, thread->pid,
4356 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4357 "BC_REQUEST_DEATH_NOTIFICATION" :
4358 "BC_CLEAR_DEATH_NOTIFICATION",
4359 (u64)cookie, ref->data.debug_id,
4360 ref->data.desc, ref->data.strong,
4361 ref->data.weak, ref->node->debug_id);
4362
4363 binder_node_lock(ref->node);
4364 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4365 if (ref->death) {
4366 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4367 proc->pid, thread->pid);
4368 binder_node_unlock(ref->node);
4369 binder_proc_unlock(proc);
4370 kfree(death);
4371 break;
4372 }
4373 binder_stats_created(BINDER_STAT_DEATH);
4374 INIT_LIST_HEAD(&death->work.entry);
4375 death->cookie = cookie;
4376 ref->death = death;
4377 if (ref->node->proc == NULL) {
4378 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4379
4380 binder_inner_proc_lock(proc);
4381 binder_enqueue_work_ilocked(
4382 &ref->death->work, &proc->todo);
4383 binder_wakeup_proc_ilocked(proc);
4384 binder_inner_proc_unlock(proc);
4385 }
4386 } else {
4387 if (ref->death == NULL) {
4388 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4389 proc->pid, thread->pid);
4390 binder_node_unlock(ref->node);
4391 binder_proc_unlock(proc);
4392 break;
4393 }
4394 death = ref->death;
4395 if (death->cookie != cookie) {
4396 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4397 proc->pid, thread->pid,
4398 (u64)death->cookie,
4399 (u64)cookie);
4400 binder_node_unlock(ref->node);
4401 binder_proc_unlock(proc);
4402 break;
4403 }
4404 ref->death = NULL;
4405 binder_inner_proc_lock(proc);
4406 if (list_empty(&death->work.entry)) {
4407 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4408 if (thread->looper &
4409 (BINDER_LOOPER_STATE_REGISTERED |
4410 BINDER_LOOPER_STATE_ENTERED))
4411 binder_enqueue_thread_work_ilocked(
4412 thread,
4413 &death->work);
4414 else {
4415 binder_enqueue_work_ilocked(
4416 &death->work,
4417 &proc->todo);
4418 binder_wakeup_proc_ilocked(
4419 proc);
4420 }
4421 } else {
4422 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4423 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4424 }
4425 binder_inner_proc_unlock(proc);
4426 }
4427 binder_node_unlock(ref->node);
4428 binder_proc_unlock(proc);
4429 } break;
4430 case BC_DEAD_BINDER_DONE: {
4431 struct binder_work *w;
4432 binder_uintptr_t cookie;
4433 struct binder_ref_death *death = NULL;
4434
4435 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4436 return -EFAULT;
4437
4438 ptr += sizeof(cookie);
4439 binder_inner_proc_lock(proc);
4440 list_for_each_entry(w, &proc->delivered_death,
4441 entry) {
4442 struct binder_ref_death *tmp_death =
4443 container_of(w,
4444 struct binder_ref_death,
4445 work);
4446
4447 if (tmp_death->cookie == cookie) {
4448 death = tmp_death;
4449 break;
4450 }
4451 }
4452 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4453 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4454 proc->pid, thread->pid, (u64)cookie,
4455 death);
4456 if (death == NULL) {
4457 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4458 proc->pid, thread->pid, (u64)cookie);
4459 binder_inner_proc_unlock(proc);
4460 break;
4461 }
4462 binder_dequeue_work_ilocked(&death->work);
4463 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4464 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4465 if (thread->looper &
4466 (BINDER_LOOPER_STATE_REGISTERED |
4467 BINDER_LOOPER_STATE_ENTERED))
4468 binder_enqueue_thread_work_ilocked(
4469 thread, &death->work);
4470 else {
4471 binder_enqueue_work_ilocked(
4472 &death->work,
4473 &proc->todo);
4474 binder_wakeup_proc_ilocked(proc);
4475 }
4476 }
4477 binder_inner_proc_unlock(proc);
4478 } break;
4479
4480 case BC_REQUEST_FREEZE_NOTIFICATION: {
4481 struct binder_handle_cookie handle_cookie;
4482 int error;
4483
4484 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4485 return -EFAULT;
4486 ptr += sizeof(handle_cookie);
4487 error = binder_request_freeze_notification(proc, thread,
4488 &handle_cookie);
4489 if (error)
4490 return error;
4491 } break;
4492
4493 case BC_CLEAR_FREEZE_NOTIFICATION: {
4494 struct binder_handle_cookie handle_cookie;
4495 int error;
4496
4497 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4498 return -EFAULT;
4499 ptr += sizeof(handle_cookie);
4500 error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4501 if (error)
4502 return error;
4503 } break;
4504
4505 case BC_FREEZE_NOTIFICATION_DONE: {
4506 binder_uintptr_t cookie;
4507 int error;
4508
4509 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4510 return -EFAULT;
4511
4512 ptr += sizeof(cookie);
4513 error = binder_freeze_notification_done(proc, thread, cookie);
4514 if (error)
4515 return error;
4516 } break;
4517
4518 default:
4519 pr_err("%d:%d unknown command %u\n",
4520 proc->pid, thread->pid, cmd);
4521 return -EINVAL;
4522 }
4523 *consumed = ptr - buffer;
4524 }
4525 return 0;
4526 }
4527
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4528 static void binder_stat_br(struct binder_proc *proc,
4529 struct binder_thread *thread, uint32_t cmd)
4530 {
4531 trace_binder_return(cmd);
4532 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4533 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4534 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4535 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4536 }
4537 }
4538
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4539 static int binder_put_node_cmd(struct binder_proc *proc,
4540 struct binder_thread *thread,
4541 void __user **ptrp,
4542 binder_uintptr_t node_ptr,
4543 binder_uintptr_t node_cookie,
4544 int node_debug_id,
4545 uint32_t cmd, const char *cmd_name)
4546 {
4547 void __user *ptr = *ptrp;
4548
4549 if (put_user(cmd, (uint32_t __user *)ptr))
4550 return -EFAULT;
4551 ptr += sizeof(uint32_t);
4552
4553 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4554 return -EFAULT;
4555 ptr += sizeof(binder_uintptr_t);
4556
4557 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4558 return -EFAULT;
4559 ptr += sizeof(binder_uintptr_t);
4560
4561 binder_stat_br(proc, thread, cmd);
4562 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4563 proc->pid, thread->pid, cmd_name, node_debug_id,
4564 (u64)node_ptr, (u64)node_cookie);
4565
4566 *ptrp = ptr;
4567 return 0;
4568 }
4569
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4570 static int binder_wait_for_work(struct binder_thread *thread,
4571 bool do_proc_work)
4572 {
4573 DEFINE_WAIT(wait);
4574 struct binder_proc *proc = thread->proc;
4575 int ret = 0;
4576
4577 binder_inner_proc_lock(proc);
4578 for (;;) {
4579 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4580 if (binder_has_work_ilocked(thread, do_proc_work))
4581 break;
4582 if (do_proc_work)
4583 list_add(&thread->waiting_thread_node,
4584 &proc->waiting_threads);
4585 binder_inner_proc_unlock(proc);
4586 schedule();
4587 binder_inner_proc_lock(proc);
4588 list_del_init(&thread->waiting_thread_node);
4589 if (signal_pending(current)) {
4590 ret = -EINTR;
4591 break;
4592 }
4593 }
4594 finish_wait(&thread->wait, &wait);
4595 binder_inner_proc_unlock(proc);
4596
4597 return ret;
4598 }
4599
4600 /**
4601 * binder_apply_fd_fixups() - finish fd translation
4602 * @proc: binder_proc associated @t->buffer
4603 * @t: binder transaction with list of fd fixups
4604 *
4605 * Now that we are in the context of the transaction target
4606 * process, we can allocate and install fds. Process the
4607 * list of fds to translate and fixup the buffer with the
4608 * new fds first and only then install the files.
4609 *
4610 * If we fail to allocate an fd, skip the install and release
4611 * any fds that have already been allocated.
4612 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4613 static int binder_apply_fd_fixups(struct binder_proc *proc,
4614 struct binder_transaction *t)
4615 {
4616 struct binder_txn_fd_fixup *fixup, *tmp;
4617 int ret = 0;
4618
4619 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4620 int fd = get_unused_fd_flags(O_CLOEXEC);
4621
4622 if (fd < 0) {
4623 binder_debug(BINDER_DEBUG_TRANSACTION,
4624 "failed fd fixup txn %d fd %d\n",
4625 t->debug_id, fd);
4626 ret = -ENOMEM;
4627 goto err;
4628 }
4629 binder_debug(BINDER_DEBUG_TRANSACTION,
4630 "fd fixup txn %d fd %d\n",
4631 t->debug_id, fd);
4632 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4633 fixup->target_fd = fd;
4634 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4635 fixup->offset, &fd,
4636 sizeof(u32))) {
4637 ret = -EINVAL;
4638 goto err;
4639 }
4640 }
4641 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4642 fd_install(fixup->target_fd, fixup->file);
4643 list_del(&fixup->fixup_entry);
4644 kfree(fixup);
4645 }
4646
4647 return ret;
4648
4649 err:
4650 binder_free_txn_fixups(t);
4651 return ret;
4652 }
4653
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4654 static int binder_thread_read(struct binder_proc *proc,
4655 struct binder_thread *thread,
4656 binder_uintptr_t binder_buffer, size_t size,
4657 binder_size_t *consumed, int non_block)
4658 {
4659 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4660 void __user *ptr = buffer + *consumed;
4661 void __user *end = buffer + size;
4662
4663 int ret = 0;
4664 int wait_for_proc_work;
4665
4666 if (*consumed == 0) {
4667 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4668 return -EFAULT;
4669 ptr += sizeof(uint32_t);
4670 }
4671
4672 retry:
4673 binder_inner_proc_lock(proc);
4674 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4675 binder_inner_proc_unlock(proc);
4676
4677 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4678
4679 trace_binder_wait_for_work(wait_for_proc_work,
4680 !!thread->transaction_stack,
4681 !binder_worklist_empty(proc, &thread->todo));
4682 if (wait_for_proc_work) {
4683 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4684 BINDER_LOOPER_STATE_ENTERED))) {
4685 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4686 proc->pid, thread->pid, thread->looper);
4687 wait_event_interruptible(binder_user_error_wait,
4688 binder_stop_on_user_error < 2);
4689 }
4690 binder_set_nice(proc->default_priority);
4691 }
4692
4693 if (non_block) {
4694 if (!binder_has_work(thread, wait_for_proc_work))
4695 ret = -EAGAIN;
4696 } else {
4697 ret = binder_wait_for_work(thread, wait_for_proc_work);
4698 }
4699
4700 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4701
4702 if (ret)
4703 return ret;
4704
4705 while (1) {
4706 uint32_t cmd;
4707 struct binder_transaction_data_secctx tr;
4708 struct binder_transaction_data *trd = &tr.transaction_data;
4709 struct binder_work *w = NULL;
4710 struct list_head *list = NULL;
4711 struct binder_transaction *t = NULL;
4712 struct binder_thread *t_from;
4713 size_t trsize = sizeof(*trd);
4714
4715 binder_inner_proc_lock(proc);
4716 if (!binder_worklist_empty_ilocked(&thread->todo))
4717 list = &thread->todo;
4718 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4719 wait_for_proc_work)
4720 list = &proc->todo;
4721 else {
4722 binder_inner_proc_unlock(proc);
4723
4724 /* no data added */
4725 if (ptr - buffer == 4 && !thread->looper_need_return)
4726 goto retry;
4727 break;
4728 }
4729
4730 if (end - ptr < sizeof(tr) + 4) {
4731 binder_inner_proc_unlock(proc);
4732 break;
4733 }
4734 w = binder_dequeue_work_head_ilocked(list);
4735 if (binder_worklist_empty_ilocked(&thread->todo))
4736 thread->process_todo = false;
4737
4738 switch (w->type) {
4739 case BINDER_WORK_TRANSACTION: {
4740 binder_inner_proc_unlock(proc);
4741 t = container_of(w, struct binder_transaction, work);
4742 } break;
4743 case BINDER_WORK_RETURN_ERROR: {
4744 struct binder_error *e = container_of(
4745 w, struct binder_error, work);
4746
4747 WARN_ON(e->cmd == BR_OK);
4748 binder_inner_proc_unlock(proc);
4749 if (put_user(e->cmd, (uint32_t __user *)ptr))
4750 return -EFAULT;
4751 cmd = e->cmd;
4752 e->cmd = BR_OK;
4753 ptr += sizeof(uint32_t);
4754
4755 binder_stat_br(proc, thread, cmd);
4756 } break;
4757 case BINDER_WORK_TRANSACTION_COMPLETE:
4758 case BINDER_WORK_TRANSACTION_PENDING:
4759 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4760 if (proc->oneway_spam_detection_enabled &&
4761 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4762 cmd = BR_ONEWAY_SPAM_SUSPECT;
4763 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4764 cmd = BR_TRANSACTION_PENDING_FROZEN;
4765 else
4766 cmd = BR_TRANSACTION_COMPLETE;
4767 binder_inner_proc_unlock(proc);
4768 kfree(w);
4769 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4770 if (put_user(cmd, (uint32_t __user *)ptr))
4771 return -EFAULT;
4772 ptr += sizeof(uint32_t);
4773
4774 binder_stat_br(proc, thread, cmd);
4775 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4776 "%d:%d BR_TRANSACTION_COMPLETE\n",
4777 proc->pid, thread->pid);
4778 } break;
4779 case BINDER_WORK_NODE: {
4780 struct binder_node *node = container_of(w, struct binder_node, work);
4781 int strong, weak;
4782 binder_uintptr_t node_ptr = node->ptr;
4783 binder_uintptr_t node_cookie = node->cookie;
4784 int node_debug_id = node->debug_id;
4785 int has_weak_ref;
4786 int has_strong_ref;
4787 void __user *orig_ptr = ptr;
4788
4789 BUG_ON(proc != node->proc);
4790 strong = node->internal_strong_refs ||
4791 node->local_strong_refs;
4792 weak = !hlist_empty(&node->refs) ||
4793 node->local_weak_refs ||
4794 node->tmp_refs || strong;
4795 has_strong_ref = node->has_strong_ref;
4796 has_weak_ref = node->has_weak_ref;
4797
4798 if (weak && !has_weak_ref) {
4799 node->has_weak_ref = 1;
4800 node->pending_weak_ref = 1;
4801 node->local_weak_refs++;
4802 }
4803 if (strong && !has_strong_ref) {
4804 node->has_strong_ref = 1;
4805 node->pending_strong_ref = 1;
4806 node->local_strong_refs++;
4807 }
4808 if (!strong && has_strong_ref)
4809 node->has_strong_ref = 0;
4810 if (!weak && has_weak_ref)
4811 node->has_weak_ref = 0;
4812 if (!weak && !strong) {
4813 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4814 "%d:%d node %d u%016llx c%016llx deleted\n",
4815 proc->pid, thread->pid,
4816 node_debug_id,
4817 (u64)node_ptr,
4818 (u64)node_cookie);
4819 rb_erase(&node->rb_node, &proc->nodes);
4820 binder_inner_proc_unlock(proc);
4821 binder_node_lock(node);
4822 /*
4823 * Acquire the node lock before freeing the
4824 * node to serialize with other threads that
4825 * may have been holding the node lock while
4826 * decrementing this node (avoids race where
4827 * this thread frees while the other thread
4828 * is unlocking the node after the final
4829 * decrement)
4830 */
4831 binder_node_unlock(node);
4832 binder_free_node(node);
4833 } else
4834 binder_inner_proc_unlock(proc);
4835
4836 if (weak && !has_weak_ref)
4837 ret = binder_put_node_cmd(
4838 proc, thread, &ptr, node_ptr,
4839 node_cookie, node_debug_id,
4840 BR_INCREFS, "BR_INCREFS");
4841 if (!ret && strong && !has_strong_ref)
4842 ret = binder_put_node_cmd(
4843 proc, thread, &ptr, node_ptr,
4844 node_cookie, node_debug_id,
4845 BR_ACQUIRE, "BR_ACQUIRE");
4846 if (!ret && !strong && has_strong_ref)
4847 ret = binder_put_node_cmd(
4848 proc, thread, &ptr, node_ptr,
4849 node_cookie, node_debug_id,
4850 BR_RELEASE, "BR_RELEASE");
4851 if (!ret && !weak && has_weak_ref)
4852 ret = binder_put_node_cmd(
4853 proc, thread, &ptr, node_ptr,
4854 node_cookie, node_debug_id,
4855 BR_DECREFS, "BR_DECREFS");
4856 if (orig_ptr == ptr)
4857 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4858 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4859 proc->pid, thread->pid,
4860 node_debug_id,
4861 (u64)node_ptr,
4862 (u64)node_cookie);
4863 if (ret)
4864 return ret;
4865 } break;
4866 case BINDER_WORK_DEAD_BINDER:
4867 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4868 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4869 struct binder_ref_death *death;
4870 uint32_t cmd;
4871 binder_uintptr_t cookie;
4872
4873 death = container_of(w, struct binder_ref_death, work);
4874 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4875 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4876 else
4877 cmd = BR_DEAD_BINDER;
4878 cookie = death->cookie;
4879
4880 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4881 "%d:%d %s %016llx\n",
4882 proc->pid, thread->pid,
4883 cmd == BR_DEAD_BINDER ?
4884 "BR_DEAD_BINDER" :
4885 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4886 (u64)cookie);
4887 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4888 binder_inner_proc_unlock(proc);
4889 kfree(death);
4890 binder_stats_deleted(BINDER_STAT_DEATH);
4891 } else {
4892 binder_enqueue_work_ilocked(
4893 w, &proc->delivered_death);
4894 binder_inner_proc_unlock(proc);
4895 }
4896 if (put_user(cmd, (uint32_t __user *)ptr))
4897 return -EFAULT;
4898 ptr += sizeof(uint32_t);
4899 if (put_user(cookie,
4900 (binder_uintptr_t __user *)ptr))
4901 return -EFAULT;
4902 ptr += sizeof(binder_uintptr_t);
4903 binder_stat_br(proc, thread, cmd);
4904 if (cmd == BR_DEAD_BINDER)
4905 goto done; /* DEAD_BINDER notifications can cause transactions */
4906 } break;
4907
4908 case BINDER_WORK_FROZEN_BINDER: {
4909 struct binder_ref_freeze *freeze;
4910 struct binder_frozen_state_info info;
4911
4912 memset(&info, 0, sizeof(info));
4913 freeze = container_of(w, struct binder_ref_freeze, work);
4914 info.is_frozen = freeze->is_frozen;
4915 info.cookie = freeze->cookie;
4916 freeze->sent = true;
4917 binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4918 binder_inner_proc_unlock(proc);
4919
4920 if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4921 return -EFAULT;
4922 ptr += sizeof(uint32_t);
4923 if (copy_to_user(ptr, &info, sizeof(info)))
4924 return -EFAULT;
4925 ptr += sizeof(info);
4926 binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4927 goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4928 } break;
4929
4930 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4931 struct binder_ref_freeze *freeze =
4932 container_of(w, struct binder_ref_freeze, work);
4933 binder_uintptr_t cookie = freeze->cookie;
4934
4935 binder_inner_proc_unlock(proc);
4936 kfree(freeze);
4937 binder_stats_deleted(BINDER_STAT_FREEZE);
4938 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4939 return -EFAULT;
4940 ptr += sizeof(uint32_t);
4941 if (put_user(cookie, (binder_uintptr_t __user *)ptr))
4942 return -EFAULT;
4943 ptr += sizeof(binder_uintptr_t);
4944 binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
4945 } break;
4946
4947 default:
4948 binder_inner_proc_unlock(proc);
4949 pr_err("%d:%d: bad work type %d\n",
4950 proc->pid, thread->pid, w->type);
4951 break;
4952 }
4953
4954 if (!t)
4955 continue;
4956
4957 BUG_ON(t->buffer == NULL);
4958 if (t->buffer->target_node) {
4959 struct binder_node *target_node = t->buffer->target_node;
4960
4961 trd->target.ptr = target_node->ptr;
4962 trd->cookie = target_node->cookie;
4963 t->saved_priority = task_nice(current);
4964 if (t->priority < target_node->min_priority &&
4965 !(t->flags & TF_ONE_WAY))
4966 binder_set_nice(t->priority);
4967 else if (!(t->flags & TF_ONE_WAY) ||
4968 t->saved_priority > target_node->min_priority)
4969 binder_set_nice(target_node->min_priority);
4970 cmd = BR_TRANSACTION;
4971 } else {
4972 trd->target.ptr = 0;
4973 trd->cookie = 0;
4974 cmd = BR_REPLY;
4975 }
4976 trd->code = t->code;
4977 trd->flags = t->flags;
4978 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4979
4980 t_from = binder_get_txn_from(t);
4981 if (t_from) {
4982 struct task_struct *sender = t_from->proc->tsk;
4983
4984 trd->sender_pid =
4985 task_tgid_nr_ns(sender,
4986 task_active_pid_ns(current));
4987 } else {
4988 trd->sender_pid = 0;
4989 }
4990
4991 ret = binder_apply_fd_fixups(proc, t);
4992 if (ret) {
4993 struct binder_buffer *buffer = t->buffer;
4994 bool oneway = !!(t->flags & TF_ONE_WAY);
4995 int tid = t->debug_id;
4996
4997 if (t_from)
4998 binder_thread_dec_tmpref(t_from);
4999 buffer->transaction = NULL;
5000 binder_cleanup_transaction(t, "fd fixups failed",
5001 BR_FAILED_REPLY);
5002 binder_free_buf(proc, thread, buffer, true);
5003 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5004 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5005 proc->pid, thread->pid,
5006 oneway ? "async " :
5007 (cmd == BR_REPLY ? "reply " : ""),
5008 tid, BR_FAILED_REPLY, ret, __LINE__);
5009 if (cmd == BR_REPLY) {
5010 cmd = BR_FAILED_REPLY;
5011 if (put_user(cmd, (uint32_t __user *)ptr))
5012 return -EFAULT;
5013 ptr += sizeof(uint32_t);
5014 binder_stat_br(proc, thread, cmd);
5015 break;
5016 }
5017 continue;
5018 }
5019 trd->data_size = t->buffer->data_size;
5020 trd->offsets_size = t->buffer->offsets_size;
5021 trd->data.ptr.buffer = t->buffer->user_data;
5022 trd->data.ptr.offsets = trd->data.ptr.buffer +
5023 ALIGN(t->buffer->data_size,
5024 sizeof(void *));
5025
5026 tr.secctx = t->security_ctx;
5027 if (t->security_ctx) {
5028 cmd = BR_TRANSACTION_SEC_CTX;
5029 trsize = sizeof(tr);
5030 }
5031 if (put_user(cmd, (uint32_t __user *)ptr)) {
5032 if (t_from)
5033 binder_thread_dec_tmpref(t_from);
5034
5035 binder_cleanup_transaction(t, "put_user failed",
5036 BR_FAILED_REPLY);
5037
5038 return -EFAULT;
5039 }
5040 ptr += sizeof(uint32_t);
5041 if (copy_to_user(ptr, &tr, trsize)) {
5042 if (t_from)
5043 binder_thread_dec_tmpref(t_from);
5044
5045 binder_cleanup_transaction(t, "copy_to_user failed",
5046 BR_FAILED_REPLY);
5047
5048 return -EFAULT;
5049 }
5050 ptr += trsize;
5051
5052 trace_binder_transaction_received(t);
5053 binder_stat_br(proc, thread, cmd);
5054 binder_debug(BINDER_DEBUG_TRANSACTION,
5055 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5056 proc->pid, thread->pid,
5057 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5058 (cmd == BR_TRANSACTION_SEC_CTX) ?
5059 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5060 t->debug_id, t_from ? t_from->proc->pid : 0,
5061 t_from ? t_from->pid : 0, cmd,
5062 t->buffer->data_size, t->buffer->offsets_size);
5063
5064 if (t_from)
5065 binder_thread_dec_tmpref(t_from);
5066 t->buffer->allow_user_free = 1;
5067 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5068 binder_inner_proc_lock(thread->proc);
5069 t->to_parent = thread->transaction_stack;
5070 t->to_thread = thread;
5071 thread->transaction_stack = t;
5072 binder_inner_proc_unlock(thread->proc);
5073 } else {
5074 binder_free_transaction(t);
5075 }
5076 break;
5077 }
5078
5079 done:
5080
5081 *consumed = ptr - buffer;
5082 binder_inner_proc_lock(proc);
5083 if (proc->requested_threads == 0 &&
5084 list_empty(&thread->proc->waiting_threads) &&
5085 proc->requested_threads_started < proc->max_threads &&
5086 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5087 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5088 /*spawn a new thread if we leave this out */) {
5089 proc->requested_threads++;
5090 binder_inner_proc_unlock(proc);
5091 binder_debug(BINDER_DEBUG_THREADS,
5092 "%d:%d BR_SPAWN_LOOPER\n",
5093 proc->pid, thread->pid);
5094 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5095 return -EFAULT;
5096 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5097 } else
5098 binder_inner_proc_unlock(proc);
5099 return 0;
5100 }
5101
binder_release_work(struct binder_proc * proc,struct list_head * list)5102 static void binder_release_work(struct binder_proc *proc,
5103 struct list_head *list)
5104 {
5105 struct binder_work *w;
5106 enum binder_work_type wtype;
5107
5108 while (1) {
5109 binder_inner_proc_lock(proc);
5110 w = binder_dequeue_work_head_ilocked(list);
5111 wtype = w ? w->type : 0;
5112 binder_inner_proc_unlock(proc);
5113 if (!w)
5114 return;
5115
5116 switch (wtype) {
5117 case BINDER_WORK_TRANSACTION: {
5118 struct binder_transaction *t;
5119
5120 t = container_of(w, struct binder_transaction, work);
5121
5122 binder_cleanup_transaction(t, "process died.",
5123 BR_DEAD_REPLY);
5124 } break;
5125 case BINDER_WORK_RETURN_ERROR: {
5126 struct binder_error *e = container_of(
5127 w, struct binder_error, work);
5128
5129 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5130 "undelivered TRANSACTION_ERROR: %u\n",
5131 e->cmd);
5132 } break;
5133 case BINDER_WORK_TRANSACTION_PENDING:
5134 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5135 case BINDER_WORK_TRANSACTION_COMPLETE: {
5136 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5137 "undelivered TRANSACTION_COMPLETE\n");
5138 kfree(w);
5139 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5140 } break;
5141 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5142 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5143 struct binder_ref_death *death;
5144
5145 death = container_of(w, struct binder_ref_death, work);
5146 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5147 "undelivered death notification, %016llx\n",
5148 (u64)death->cookie);
5149 kfree(death);
5150 binder_stats_deleted(BINDER_STAT_DEATH);
5151 } break;
5152 case BINDER_WORK_NODE:
5153 break;
5154 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5155 struct binder_ref_freeze *freeze;
5156
5157 freeze = container_of(w, struct binder_ref_freeze, work);
5158 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5159 "undelivered freeze notification, %016llx\n",
5160 (u64)freeze->cookie);
5161 kfree(freeze);
5162 binder_stats_deleted(BINDER_STAT_FREEZE);
5163 } break;
5164 default:
5165 pr_err("unexpected work type, %d, not freed\n",
5166 wtype);
5167 break;
5168 }
5169 }
5170
5171 }
5172
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)5173 static struct binder_thread *binder_get_thread_ilocked(
5174 struct binder_proc *proc, struct binder_thread *new_thread)
5175 {
5176 struct binder_thread *thread = NULL;
5177 struct rb_node *parent = NULL;
5178 struct rb_node **p = &proc->threads.rb_node;
5179
5180 while (*p) {
5181 parent = *p;
5182 thread = rb_entry(parent, struct binder_thread, rb_node);
5183
5184 if (current->pid < thread->pid)
5185 p = &(*p)->rb_left;
5186 else if (current->pid > thread->pid)
5187 p = &(*p)->rb_right;
5188 else
5189 return thread;
5190 }
5191 if (!new_thread)
5192 return NULL;
5193 thread = new_thread;
5194 binder_stats_created(BINDER_STAT_THREAD);
5195 thread->proc = proc;
5196 thread->pid = current->pid;
5197 atomic_set(&thread->tmp_ref, 0);
5198 init_waitqueue_head(&thread->wait);
5199 INIT_LIST_HEAD(&thread->todo);
5200 rb_link_node(&thread->rb_node, parent, p);
5201 rb_insert_color(&thread->rb_node, &proc->threads);
5202 thread->looper_need_return = true;
5203 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5204 thread->return_error.cmd = BR_OK;
5205 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5206 thread->reply_error.cmd = BR_OK;
5207 thread->ee.command = BR_OK;
5208 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5209 return thread;
5210 }
5211
binder_get_thread(struct binder_proc * proc)5212 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5213 {
5214 struct binder_thread *thread;
5215 struct binder_thread *new_thread;
5216
5217 binder_inner_proc_lock(proc);
5218 thread = binder_get_thread_ilocked(proc, NULL);
5219 binder_inner_proc_unlock(proc);
5220 if (!thread) {
5221 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5222 if (new_thread == NULL)
5223 return NULL;
5224 binder_inner_proc_lock(proc);
5225 thread = binder_get_thread_ilocked(proc, new_thread);
5226 binder_inner_proc_unlock(proc);
5227 if (thread != new_thread)
5228 kfree(new_thread);
5229 }
5230 return thread;
5231 }
5232
binder_free_proc(struct binder_proc * proc)5233 static void binder_free_proc(struct binder_proc *proc)
5234 {
5235 struct binder_device *device;
5236
5237 BUG_ON(!list_empty(&proc->todo));
5238 BUG_ON(!list_empty(&proc->delivered_death));
5239 if (proc->outstanding_txns)
5240 pr_warn("%s: Unexpected outstanding_txns %d\n",
5241 __func__, proc->outstanding_txns);
5242 device = container_of(proc->context, struct binder_device, context);
5243 if (refcount_dec_and_test(&device->ref)) {
5244 binder_remove_device(device);
5245 kfree(proc->context->name);
5246 kfree(device);
5247 }
5248 binder_alloc_deferred_release(&proc->alloc);
5249 put_task_struct(proc->tsk);
5250 put_cred(proc->cred);
5251 binder_stats_deleted(BINDER_STAT_PROC);
5252 dbitmap_free(&proc->dmap);
5253 kfree(proc);
5254 }
5255
binder_free_thread(struct binder_thread * thread)5256 static void binder_free_thread(struct binder_thread *thread)
5257 {
5258 BUG_ON(!list_empty(&thread->todo));
5259 binder_stats_deleted(BINDER_STAT_THREAD);
5260 binder_proc_dec_tmpref(thread->proc);
5261 kfree(thread);
5262 }
5263
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5264 static int binder_thread_release(struct binder_proc *proc,
5265 struct binder_thread *thread)
5266 {
5267 struct binder_transaction *t;
5268 struct binder_transaction *send_reply = NULL;
5269 int active_transactions = 0;
5270 struct binder_transaction *last_t = NULL;
5271
5272 binder_inner_proc_lock(thread->proc);
5273 /*
5274 * take a ref on the proc so it survives
5275 * after we remove this thread from proc->threads.
5276 * The corresponding dec is when we actually
5277 * free the thread in binder_free_thread()
5278 */
5279 proc->tmp_ref++;
5280 /*
5281 * take a ref on this thread to ensure it
5282 * survives while we are releasing it
5283 */
5284 atomic_inc(&thread->tmp_ref);
5285 rb_erase(&thread->rb_node, &proc->threads);
5286 t = thread->transaction_stack;
5287 if (t) {
5288 spin_lock(&t->lock);
5289 if (t->to_thread == thread)
5290 send_reply = t;
5291 } else {
5292 __acquire(&t->lock);
5293 }
5294 thread->is_dead = true;
5295
5296 while (t) {
5297 last_t = t;
5298 active_transactions++;
5299 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5300 "release %d:%d transaction %d %s, still active\n",
5301 proc->pid, thread->pid,
5302 t->debug_id,
5303 (t->to_thread == thread) ? "in" : "out");
5304
5305 if (t->to_thread == thread) {
5306 thread->proc->outstanding_txns--;
5307 t->to_proc = NULL;
5308 t->to_thread = NULL;
5309 if (t->buffer) {
5310 t->buffer->transaction = NULL;
5311 t->buffer = NULL;
5312 }
5313 t = t->to_parent;
5314 } else if (t->from == thread) {
5315 t->from = NULL;
5316 t = t->from_parent;
5317 } else
5318 BUG();
5319 spin_unlock(&last_t->lock);
5320 if (t)
5321 spin_lock(&t->lock);
5322 else
5323 __acquire(&t->lock);
5324 }
5325 /* annotation for sparse, lock not acquired in last iteration above */
5326 __release(&t->lock);
5327
5328 /*
5329 * If this thread used poll, make sure we remove the waitqueue from any
5330 * poll data structures holding it.
5331 */
5332 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5333 wake_up_pollfree(&thread->wait);
5334
5335 binder_inner_proc_unlock(thread->proc);
5336
5337 /*
5338 * This is needed to avoid races between wake_up_pollfree() above and
5339 * someone else removing the last entry from the queue for other reasons
5340 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5341 * descriptor being closed). Such other users hold an RCU read lock, so
5342 * we can be sure they're done after we call synchronize_rcu().
5343 */
5344 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5345 synchronize_rcu();
5346
5347 if (send_reply)
5348 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5349 binder_release_work(proc, &thread->todo);
5350 binder_thread_dec_tmpref(thread);
5351 return active_transactions;
5352 }
5353
binder_poll(struct file * filp,struct poll_table_struct * wait)5354 static __poll_t binder_poll(struct file *filp,
5355 struct poll_table_struct *wait)
5356 {
5357 struct binder_proc *proc = filp->private_data;
5358 struct binder_thread *thread = NULL;
5359 bool wait_for_proc_work;
5360
5361 thread = binder_get_thread(proc);
5362 if (!thread)
5363 return EPOLLERR;
5364
5365 binder_inner_proc_lock(thread->proc);
5366 thread->looper |= BINDER_LOOPER_STATE_POLL;
5367 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5368
5369 binder_inner_proc_unlock(thread->proc);
5370
5371 poll_wait(filp, &thread->wait, wait);
5372
5373 if (binder_has_work(thread, wait_for_proc_work))
5374 return EPOLLIN;
5375
5376 return 0;
5377 }
5378
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5379 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5380 struct binder_thread *thread)
5381 {
5382 int ret = 0;
5383 struct binder_proc *proc = filp->private_data;
5384 void __user *ubuf = (void __user *)arg;
5385 struct binder_write_read bwr;
5386
5387 if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5388 return -EFAULT;
5389
5390 binder_debug(BINDER_DEBUG_READ_WRITE,
5391 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5392 proc->pid, thread->pid,
5393 (u64)bwr.write_size, (u64)bwr.write_buffer,
5394 (u64)bwr.read_size, (u64)bwr.read_buffer);
5395
5396 if (bwr.write_size > 0) {
5397 ret = binder_thread_write(proc, thread,
5398 bwr.write_buffer,
5399 bwr.write_size,
5400 &bwr.write_consumed);
5401 trace_binder_write_done(ret);
5402 if (ret < 0) {
5403 bwr.read_consumed = 0;
5404 goto out;
5405 }
5406 }
5407 if (bwr.read_size > 0) {
5408 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5409 bwr.read_size,
5410 &bwr.read_consumed,
5411 filp->f_flags & O_NONBLOCK);
5412 trace_binder_read_done(ret);
5413 binder_inner_proc_lock(proc);
5414 if (!binder_worklist_empty_ilocked(&proc->todo))
5415 binder_wakeup_proc_ilocked(proc);
5416 binder_inner_proc_unlock(proc);
5417 if (ret < 0)
5418 goto out;
5419 }
5420 binder_debug(BINDER_DEBUG_READ_WRITE,
5421 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5422 proc->pid, thread->pid,
5423 (u64)bwr.write_consumed, (u64)bwr.write_size,
5424 (u64)bwr.read_consumed, (u64)bwr.read_size);
5425 out:
5426 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5427 ret = -EFAULT;
5428 return ret;
5429 }
5430
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5431 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5432 struct flat_binder_object *fbo)
5433 {
5434 int ret = 0;
5435 struct binder_proc *proc = filp->private_data;
5436 struct binder_context *context = proc->context;
5437 struct binder_node *new_node;
5438 kuid_t curr_euid = current_euid();
5439
5440 mutex_lock(&context->context_mgr_node_lock);
5441 if (context->binder_context_mgr_node) {
5442 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5443 ret = -EBUSY;
5444 goto out;
5445 }
5446 ret = security_binder_set_context_mgr(proc->cred);
5447 if (ret < 0)
5448 goto out;
5449 if (uid_valid(context->binder_context_mgr_uid)) {
5450 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5451 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5452 from_kuid(&init_user_ns, curr_euid),
5453 from_kuid(&init_user_ns,
5454 context->binder_context_mgr_uid));
5455 ret = -EPERM;
5456 goto out;
5457 }
5458 } else {
5459 context->binder_context_mgr_uid = curr_euid;
5460 }
5461 new_node = binder_new_node(proc, fbo);
5462 if (!new_node) {
5463 ret = -ENOMEM;
5464 goto out;
5465 }
5466 binder_node_lock(new_node);
5467 new_node->local_weak_refs++;
5468 new_node->local_strong_refs++;
5469 new_node->has_strong_ref = 1;
5470 new_node->has_weak_ref = 1;
5471 context->binder_context_mgr_node = new_node;
5472 binder_node_unlock(new_node);
5473 binder_put_node(new_node);
5474 out:
5475 mutex_unlock(&context->context_mgr_node_lock);
5476 return ret;
5477 }
5478
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5479 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5480 struct binder_node_info_for_ref *info)
5481 {
5482 struct binder_node *node;
5483 struct binder_context *context = proc->context;
5484 __u32 handle = info->handle;
5485
5486 if (info->strong_count || info->weak_count || info->reserved1 ||
5487 info->reserved2 || info->reserved3) {
5488 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5489 proc->pid);
5490 return -EINVAL;
5491 }
5492
5493 /* This ioctl may only be used by the context manager */
5494 mutex_lock(&context->context_mgr_node_lock);
5495 if (!context->binder_context_mgr_node ||
5496 context->binder_context_mgr_node->proc != proc) {
5497 mutex_unlock(&context->context_mgr_node_lock);
5498 return -EPERM;
5499 }
5500 mutex_unlock(&context->context_mgr_node_lock);
5501
5502 node = binder_get_node_from_ref(proc, handle, true, NULL);
5503 if (!node)
5504 return -EINVAL;
5505
5506 info->strong_count = node->local_strong_refs +
5507 node->internal_strong_refs;
5508 info->weak_count = node->local_weak_refs;
5509
5510 binder_put_node(node);
5511
5512 return 0;
5513 }
5514
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5515 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5516 struct binder_node_debug_info *info)
5517 {
5518 struct rb_node *n;
5519 binder_uintptr_t ptr = info->ptr;
5520
5521 memset(info, 0, sizeof(*info));
5522
5523 binder_inner_proc_lock(proc);
5524 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5525 struct binder_node *node = rb_entry(n, struct binder_node,
5526 rb_node);
5527 if (node->ptr > ptr) {
5528 info->ptr = node->ptr;
5529 info->cookie = node->cookie;
5530 info->has_strong_ref = node->has_strong_ref;
5531 info->has_weak_ref = node->has_weak_ref;
5532 break;
5533 }
5534 }
5535 binder_inner_proc_unlock(proc);
5536
5537 return 0;
5538 }
5539
binder_txns_pending_ilocked(struct binder_proc * proc)5540 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5541 {
5542 struct rb_node *n;
5543 struct binder_thread *thread;
5544
5545 if (proc->outstanding_txns > 0)
5546 return true;
5547
5548 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5549 thread = rb_entry(n, struct binder_thread, rb_node);
5550 if (thread->transaction_stack)
5551 return true;
5552 }
5553 return false;
5554 }
5555
binder_add_freeze_work(struct binder_proc * proc,bool is_frozen)5556 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5557 {
5558 struct binder_node *prev = NULL;
5559 struct rb_node *n;
5560 struct binder_ref *ref;
5561
5562 binder_inner_proc_lock(proc);
5563 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5564 struct binder_node *node;
5565
5566 node = rb_entry(n, struct binder_node, rb_node);
5567 binder_inc_node_tmpref_ilocked(node);
5568 binder_inner_proc_unlock(proc);
5569 if (prev)
5570 binder_put_node(prev);
5571 binder_node_lock(node);
5572 hlist_for_each_entry(ref, &node->refs, node_entry) {
5573 /*
5574 * Need the node lock to synchronize
5575 * with new notification requests and the
5576 * inner lock to synchronize with queued
5577 * freeze notifications.
5578 */
5579 binder_inner_proc_lock(ref->proc);
5580 if (!ref->freeze) {
5581 binder_inner_proc_unlock(ref->proc);
5582 continue;
5583 }
5584 ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5585 if (list_empty(&ref->freeze->work.entry)) {
5586 ref->freeze->is_frozen = is_frozen;
5587 binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5588 binder_wakeup_proc_ilocked(ref->proc);
5589 } else {
5590 if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5591 ref->freeze->resend = true;
5592 ref->freeze->is_frozen = is_frozen;
5593 }
5594 binder_inner_proc_unlock(ref->proc);
5595 }
5596 prev = node;
5597 binder_node_unlock(node);
5598 binder_inner_proc_lock(proc);
5599 if (proc->is_dead)
5600 break;
5601 }
5602 binder_inner_proc_unlock(proc);
5603 if (prev)
5604 binder_put_node(prev);
5605 }
5606
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5607 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5608 struct binder_proc *target_proc)
5609 {
5610 int ret = 0;
5611
5612 if (!info->enable) {
5613 binder_inner_proc_lock(target_proc);
5614 target_proc->sync_recv = false;
5615 target_proc->async_recv = false;
5616 target_proc->is_frozen = false;
5617 binder_inner_proc_unlock(target_proc);
5618 binder_add_freeze_work(target_proc, false);
5619 return 0;
5620 }
5621
5622 /*
5623 * Freezing the target. Prevent new transactions by
5624 * setting frozen state. If timeout specified, wait
5625 * for transactions to drain.
5626 */
5627 binder_inner_proc_lock(target_proc);
5628 target_proc->sync_recv = false;
5629 target_proc->async_recv = false;
5630 target_proc->is_frozen = true;
5631 binder_inner_proc_unlock(target_proc);
5632
5633 if (info->timeout_ms > 0)
5634 ret = wait_event_interruptible_timeout(
5635 target_proc->freeze_wait,
5636 (!target_proc->outstanding_txns),
5637 msecs_to_jiffies(info->timeout_ms));
5638
5639 /* Check pending transactions that wait for reply */
5640 if (ret >= 0) {
5641 binder_inner_proc_lock(target_proc);
5642 if (binder_txns_pending_ilocked(target_proc))
5643 ret = -EAGAIN;
5644 binder_inner_proc_unlock(target_proc);
5645 }
5646
5647 if (ret < 0) {
5648 binder_inner_proc_lock(target_proc);
5649 target_proc->is_frozen = false;
5650 binder_inner_proc_unlock(target_proc);
5651 } else {
5652 binder_add_freeze_work(target_proc, true);
5653 }
5654
5655 return ret;
5656 }
5657
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5658 static int binder_ioctl_get_freezer_info(
5659 struct binder_frozen_status_info *info)
5660 {
5661 struct binder_proc *target_proc;
5662 bool found = false;
5663 __u32 txns_pending;
5664
5665 info->sync_recv = 0;
5666 info->async_recv = 0;
5667
5668 mutex_lock(&binder_procs_lock);
5669 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5670 if (target_proc->pid == info->pid) {
5671 found = true;
5672 binder_inner_proc_lock(target_proc);
5673 txns_pending = binder_txns_pending_ilocked(target_proc);
5674 info->sync_recv |= target_proc->sync_recv |
5675 (txns_pending << 1);
5676 info->async_recv |= target_proc->async_recv;
5677 binder_inner_proc_unlock(target_proc);
5678 }
5679 }
5680 mutex_unlock(&binder_procs_lock);
5681
5682 if (!found)
5683 return -EINVAL;
5684
5685 return 0;
5686 }
5687
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5688 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5689 void __user *ubuf)
5690 {
5691 struct binder_extended_error ee;
5692
5693 binder_inner_proc_lock(thread->proc);
5694 ee = thread->ee;
5695 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5696 binder_inner_proc_unlock(thread->proc);
5697
5698 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5699 return -EFAULT;
5700
5701 return 0;
5702 }
5703
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5704 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5705 {
5706 int ret;
5707 struct binder_proc *proc = filp->private_data;
5708 struct binder_thread *thread;
5709 void __user *ubuf = (void __user *)arg;
5710
5711 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5712 proc->pid, current->pid, cmd, arg);*/
5713
5714 binder_selftest_alloc(&proc->alloc);
5715
5716 trace_binder_ioctl(cmd, arg);
5717
5718 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5719 if (ret)
5720 goto err_unlocked;
5721
5722 thread = binder_get_thread(proc);
5723 if (thread == NULL) {
5724 ret = -ENOMEM;
5725 goto err;
5726 }
5727
5728 switch (cmd) {
5729 case BINDER_WRITE_READ:
5730 ret = binder_ioctl_write_read(filp, arg, thread);
5731 if (ret)
5732 goto err;
5733 break;
5734 case BINDER_SET_MAX_THREADS: {
5735 u32 max_threads;
5736
5737 if (copy_from_user(&max_threads, ubuf,
5738 sizeof(max_threads))) {
5739 ret = -EINVAL;
5740 goto err;
5741 }
5742 binder_inner_proc_lock(proc);
5743 proc->max_threads = max_threads;
5744 binder_inner_proc_unlock(proc);
5745 break;
5746 }
5747 case BINDER_SET_CONTEXT_MGR_EXT: {
5748 struct flat_binder_object fbo;
5749
5750 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5751 ret = -EINVAL;
5752 goto err;
5753 }
5754 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5755 if (ret)
5756 goto err;
5757 break;
5758 }
5759 case BINDER_SET_CONTEXT_MGR:
5760 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5761 if (ret)
5762 goto err;
5763 break;
5764 case BINDER_THREAD_EXIT:
5765 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5766 proc->pid, thread->pid);
5767 binder_thread_release(proc, thread);
5768 thread = NULL;
5769 break;
5770 case BINDER_VERSION: {
5771 struct binder_version __user *ver = ubuf;
5772
5773 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5774 &ver->protocol_version)) {
5775 ret = -EINVAL;
5776 goto err;
5777 }
5778 break;
5779 }
5780 case BINDER_GET_NODE_INFO_FOR_REF: {
5781 struct binder_node_info_for_ref info;
5782
5783 if (copy_from_user(&info, ubuf, sizeof(info))) {
5784 ret = -EFAULT;
5785 goto err;
5786 }
5787
5788 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5789 if (ret < 0)
5790 goto err;
5791
5792 if (copy_to_user(ubuf, &info, sizeof(info))) {
5793 ret = -EFAULT;
5794 goto err;
5795 }
5796
5797 break;
5798 }
5799 case BINDER_GET_NODE_DEBUG_INFO: {
5800 struct binder_node_debug_info info;
5801
5802 if (copy_from_user(&info, ubuf, sizeof(info))) {
5803 ret = -EFAULT;
5804 goto err;
5805 }
5806
5807 ret = binder_ioctl_get_node_debug_info(proc, &info);
5808 if (ret < 0)
5809 goto err;
5810
5811 if (copy_to_user(ubuf, &info, sizeof(info))) {
5812 ret = -EFAULT;
5813 goto err;
5814 }
5815 break;
5816 }
5817 case BINDER_FREEZE: {
5818 struct binder_freeze_info info;
5819 struct binder_proc **target_procs = NULL, *target_proc;
5820 int target_procs_count = 0, i = 0;
5821
5822 ret = 0;
5823
5824 if (copy_from_user(&info, ubuf, sizeof(info))) {
5825 ret = -EFAULT;
5826 goto err;
5827 }
5828
5829 mutex_lock(&binder_procs_lock);
5830 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5831 if (target_proc->pid == info.pid)
5832 target_procs_count++;
5833 }
5834
5835 if (target_procs_count == 0) {
5836 mutex_unlock(&binder_procs_lock);
5837 ret = -EINVAL;
5838 goto err;
5839 }
5840
5841 target_procs = kcalloc(target_procs_count,
5842 sizeof(struct binder_proc *),
5843 GFP_KERNEL);
5844
5845 if (!target_procs) {
5846 mutex_unlock(&binder_procs_lock);
5847 ret = -ENOMEM;
5848 goto err;
5849 }
5850
5851 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5852 if (target_proc->pid != info.pid)
5853 continue;
5854
5855 binder_inner_proc_lock(target_proc);
5856 target_proc->tmp_ref++;
5857 binder_inner_proc_unlock(target_proc);
5858
5859 target_procs[i++] = target_proc;
5860 }
5861 mutex_unlock(&binder_procs_lock);
5862
5863 for (i = 0; i < target_procs_count; i++) {
5864 if (ret >= 0)
5865 ret = binder_ioctl_freeze(&info,
5866 target_procs[i]);
5867
5868 binder_proc_dec_tmpref(target_procs[i]);
5869 }
5870
5871 kfree(target_procs);
5872
5873 if (ret < 0)
5874 goto err;
5875 break;
5876 }
5877 case BINDER_GET_FROZEN_INFO: {
5878 struct binder_frozen_status_info info;
5879
5880 if (copy_from_user(&info, ubuf, sizeof(info))) {
5881 ret = -EFAULT;
5882 goto err;
5883 }
5884
5885 ret = binder_ioctl_get_freezer_info(&info);
5886 if (ret < 0)
5887 goto err;
5888
5889 if (copy_to_user(ubuf, &info, sizeof(info))) {
5890 ret = -EFAULT;
5891 goto err;
5892 }
5893 break;
5894 }
5895 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5896 uint32_t enable;
5897
5898 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5899 ret = -EFAULT;
5900 goto err;
5901 }
5902 binder_inner_proc_lock(proc);
5903 proc->oneway_spam_detection_enabled = (bool)enable;
5904 binder_inner_proc_unlock(proc);
5905 break;
5906 }
5907 case BINDER_GET_EXTENDED_ERROR:
5908 ret = binder_ioctl_get_extended_error(thread, ubuf);
5909 if (ret < 0)
5910 goto err;
5911 break;
5912 default:
5913 ret = -EINVAL;
5914 goto err;
5915 }
5916 ret = 0;
5917 err:
5918 if (thread)
5919 thread->looper_need_return = false;
5920 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5921 if (ret && ret != -EINTR)
5922 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5923 err_unlocked:
5924 trace_binder_ioctl_done(ret);
5925 return ret;
5926 }
5927
binder_vma_open(struct vm_area_struct * vma)5928 static void binder_vma_open(struct vm_area_struct *vma)
5929 {
5930 struct binder_proc *proc = vma->vm_private_data;
5931
5932 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5933 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5934 proc->pid, vma->vm_start, vma->vm_end,
5935 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5936 (unsigned long)pgprot_val(vma->vm_page_prot));
5937 }
5938
binder_vma_close(struct vm_area_struct * vma)5939 static void binder_vma_close(struct vm_area_struct *vma)
5940 {
5941 struct binder_proc *proc = vma->vm_private_data;
5942
5943 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5944 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5945 proc->pid, vma->vm_start, vma->vm_end,
5946 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5947 (unsigned long)pgprot_val(vma->vm_page_prot));
5948 binder_alloc_vma_close(&proc->alloc);
5949 }
5950
binder_vm_fault(struct vm_fault * vmf)5951 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5952 {
5953 return VM_FAULT_SIGBUS;
5954 }
5955
5956 static const struct vm_operations_struct binder_vm_ops = {
5957 .open = binder_vma_open,
5958 .close = binder_vma_close,
5959 .fault = binder_vm_fault,
5960 };
5961
binder_mmap(struct file * filp,struct vm_area_struct * vma)5962 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5963 {
5964 struct binder_proc *proc = filp->private_data;
5965
5966 if (proc->tsk != current->group_leader)
5967 return -EINVAL;
5968
5969 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5970 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5971 __func__, proc->pid, vma->vm_start, vma->vm_end,
5972 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5973 (unsigned long)pgprot_val(vma->vm_page_prot));
5974
5975 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5976 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5977 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5978 return -EPERM;
5979 }
5980 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5981
5982 vma->vm_ops = &binder_vm_ops;
5983 vma->vm_private_data = proc;
5984
5985 return binder_alloc_mmap_handler(&proc->alloc, vma);
5986 }
5987
binder_open(struct inode * nodp,struct file * filp)5988 static int binder_open(struct inode *nodp, struct file *filp)
5989 {
5990 struct binder_proc *proc, *itr;
5991 struct binder_device *binder_dev;
5992 struct binderfs_info *info;
5993 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5994 bool existing_pid = false;
5995
5996 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5997 current->group_leader->pid, current->pid);
5998
5999 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6000 if (proc == NULL)
6001 return -ENOMEM;
6002
6003 dbitmap_init(&proc->dmap);
6004 spin_lock_init(&proc->inner_lock);
6005 spin_lock_init(&proc->outer_lock);
6006 get_task_struct(current->group_leader);
6007 proc->tsk = current->group_leader;
6008 proc->cred = get_cred(filp->f_cred);
6009 INIT_LIST_HEAD(&proc->todo);
6010 init_waitqueue_head(&proc->freeze_wait);
6011 proc->default_priority = task_nice(current);
6012 /* binderfs stashes devices in i_private */
6013 if (is_binderfs_device(nodp)) {
6014 binder_dev = nodp->i_private;
6015 info = nodp->i_sb->s_fs_info;
6016 binder_binderfs_dir_entry_proc = info->proc_log_dir;
6017 } else {
6018 binder_dev = container_of(filp->private_data,
6019 struct binder_device, miscdev);
6020 }
6021 refcount_inc(&binder_dev->ref);
6022 proc->context = &binder_dev->context;
6023 binder_alloc_init(&proc->alloc);
6024
6025 binder_stats_created(BINDER_STAT_PROC);
6026 proc->pid = current->group_leader->pid;
6027 INIT_LIST_HEAD(&proc->delivered_death);
6028 INIT_LIST_HEAD(&proc->delivered_freeze);
6029 INIT_LIST_HEAD(&proc->waiting_threads);
6030 filp->private_data = proc;
6031
6032 mutex_lock(&binder_procs_lock);
6033 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6034 if (itr->pid == proc->pid) {
6035 existing_pid = true;
6036 break;
6037 }
6038 }
6039 hlist_add_head(&proc->proc_node, &binder_procs);
6040 mutex_unlock(&binder_procs_lock);
6041
6042 if (binder_debugfs_dir_entry_proc && !existing_pid) {
6043 char strbuf[11];
6044
6045 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6046 /*
6047 * proc debug entries are shared between contexts.
6048 * Only create for the first PID to avoid debugfs log spamming
6049 * The printing code will anyway print all contexts for a given
6050 * PID so this is not a problem.
6051 */
6052 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6053 binder_debugfs_dir_entry_proc,
6054 (void *)(unsigned long)proc->pid,
6055 &proc_fops);
6056 }
6057
6058 if (binder_binderfs_dir_entry_proc && !existing_pid) {
6059 char strbuf[11];
6060 struct dentry *binderfs_entry;
6061
6062 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6063 /*
6064 * Similar to debugfs, the process specific log file is shared
6065 * between contexts. Only create for the first PID.
6066 * This is ok since same as debugfs, the log file will contain
6067 * information on all contexts of a given PID.
6068 */
6069 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6070 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6071 if (!IS_ERR(binderfs_entry)) {
6072 proc->binderfs_entry = binderfs_entry;
6073 } else {
6074 int error;
6075
6076 error = PTR_ERR(binderfs_entry);
6077 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6078 strbuf, error);
6079 }
6080 }
6081
6082 return 0;
6083 }
6084
binder_flush(struct file * filp,fl_owner_t id)6085 static int binder_flush(struct file *filp, fl_owner_t id)
6086 {
6087 struct binder_proc *proc = filp->private_data;
6088
6089 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6090
6091 return 0;
6092 }
6093
binder_deferred_flush(struct binder_proc * proc)6094 static void binder_deferred_flush(struct binder_proc *proc)
6095 {
6096 struct rb_node *n;
6097 int wake_count = 0;
6098
6099 binder_inner_proc_lock(proc);
6100 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6101 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6102
6103 thread->looper_need_return = true;
6104 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6105 wake_up_interruptible(&thread->wait);
6106 wake_count++;
6107 }
6108 }
6109 binder_inner_proc_unlock(proc);
6110
6111 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6112 "binder_flush: %d woke %d threads\n", proc->pid,
6113 wake_count);
6114 }
6115
binder_release(struct inode * nodp,struct file * filp)6116 static int binder_release(struct inode *nodp, struct file *filp)
6117 {
6118 struct binder_proc *proc = filp->private_data;
6119
6120 debugfs_remove(proc->debugfs_entry);
6121
6122 if (proc->binderfs_entry) {
6123 simple_recursive_removal(proc->binderfs_entry, NULL);
6124 proc->binderfs_entry = NULL;
6125 }
6126
6127 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6128
6129 return 0;
6130 }
6131
binder_node_release(struct binder_node * node,int refs)6132 static int binder_node_release(struct binder_node *node, int refs)
6133 {
6134 struct binder_ref *ref;
6135 int death = 0;
6136 struct binder_proc *proc = node->proc;
6137
6138 binder_release_work(proc, &node->async_todo);
6139
6140 binder_node_lock(node);
6141 binder_inner_proc_lock(proc);
6142 binder_dequeue_work_ilocked(&node->work);
6143 /*
6144 * The caller must have taken a temporary ref on the node,
6145 */
6146 BUG_ON(!node->tmp_refs);
6147 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6148 binder_inner_proc_unlock(proc);
6149 binder_node_unlock(node);
6150 binder_free_node(node);
6151
6152 return refs;
6153 }
6154
6155 node->proc = NULL;
6156 node->local_strong_refs = 0;
6157 node->local_weak_refs = 0;
6158 binder_inner_proc_unlock(proc);
6159
6160 spin_lock(&binder_dead_nodes_lock);
6161 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6162 spin_unlock(&binder_dead_nodes_lock);
6163
6164 hlist_for_each_entry(ref, &node->refs, node_entry) {
6165 refs++;
6166 /*
6167 * Need the node lock to synchronize
6168 * with new notification requests and the
6169 * inner lock to synchronize with queued
6170 * death notifications.
6171 */
6172 binder_inner_proc_lock(ref->proc);
6173 if (!ref->death) {
6174 binder_inner_proc_unlock(ref->proc);
6175 continue;
6176 }
6177
6178 death++;
6179
6180 BUG_ON(!list_empty(&ref->death->work.entry));
6181 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6182 binder_enqueue_work_ilocked(&ref->death->work,
6183 &ref->proc->todo);
6184 binder_wakeup_proc_ilocked(ref->proc);
6185 binder_inner_proc_unlock(ref->proc);
6186 }
6187
6188 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6189 "node %d now dead, refs %d, death %d\n",
6190 node->debug_id, refs, death);
6191 binder_node_unlock(node);
6192 binder_put_node(node);
6193
6194 return refs;
6195 }
6196
binder_deferred_release(struct binder_proc * proc)6197 static void binder_deferred_release(struct binder_proc *proc)
6198 {
6199 struct binder_context *context = proc->context;
6200 struct rb_node *n;
6201 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6202
6203 mutex_lock(&binder_procs_lock);
6204 hlist_del(&proc->proc_node);
6205 mutex_unlock(&binder_procs_lock);
6206
6207 mutex_lock(&context->context_mgr_node_lock);
6208 if (context->binder_context_mgr_node &&
6209 context->binder_context_mgr_node->proc == proc) {
6210 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6211 "%s: %d context_mgr_node gone\n",
6212 __func__, proc->pid);
6213 context->binder_context_mgr_node = NULL;
6214 }
6215 mutex_unlock(&context->context_mgr_node_lock);
6216 binder_inner_proc_lock(proc);
6217 /*
6218 * Make sure proc stays alive after we
6219 * remove all the threads
6220 */
6221 proc->tmp_ref++;
6222
6223 proc->is_dead = true;
6224 proc->is_frozen = false;
6225 proc->sync_recv = false;
6226 proc->async_recv = false;
6227 threads = 0;
6228 active_transactions = 0;
6229 while ((n = rb_first(&proc->threads))) {
6230 struct binder_thread *thread;
6231
6232 thread = rb_entry(n, struct binder_thread, rb_node);
6233 binder_inner_proc_unlock(proc);
6234 threads++;
6235 active_transactions += binder_thread_release(proc, thread);
6236 binder_inner_proc_lock(proc);
6237 }
6238
6239 nodes = 0;
6240 incoming_refs = 0;
6241 while ((n = rb_first(&proc->nodes))) {
6242 struct binder_node *node;
6243
6244 node = rb_entry(n, struct binder_node, rb_node);
6245 nodes++;
6246 /*
6247 * take a temporary ref on the node before
6248 * calling binder_node_release() which will either
6249 * kfree() the node or call binder_put_node()
6250 */
6251 binder_inc_node_tmpref_ilocked(node);
6252 rb_erase(&node->rb_node, &proc->nodes);
6253 binder_inner_proc_unlock(proc);
6254 incoming_refs = binder_node_release(node, incoming_refs);
6255 binder_inner_proc_lock(proc);
6256 }
6257 binder_inner_proc_unlock(proc);
6258
6259 outgoing_refs = 0;
6260 binder_proc_lock(proc);
6261 while ((n = rb_first(&proc->refs_by_desc))) {
6262 struct binder_ref *ref;
6263
6264 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6265 outgoing_refs++;
6266 binder_cleanup_ref_olocked(ref);
6267 binder_proc_unlock(proc);
6268 binder_free_ref(ref);
6269 binder_proc_lock(proc);
6270 }
6271 binder_proc_unlock(proc);
6272
6273 binder_release_work(proc, &proc->todo);
6274 binder_release_work(proc, &proc->delivered_death);
6275 binder_release_work(proc, &proc->delivered_freeze);
6276
6277 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6278 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6279 __func__, proc->pid, threads, nodes, incoming_refs,
6280 outgoing_refs, active_transactions);
6281
6282 binder_proc_dec_tmpref(proc);
6283 }
6284
binder_deferred_func(struct work_struct * work)6285 static void binder_deferred_func(struct work_struct *work)
6286 {
6287 struct binder_proc *proc;
6288
6289 int defer;
6290
6291 do {
6292 mutex_lock(&binder_deferred_lock);
6293 if (!hlist_empty(&binder_deferred_list)) {
6294 proc = hlist_entry(binder_deferred_list.first,
6295 struct binder_proc, deferred_work_node);
6296 hlist_del_init(&proc->deferred_work_node);
6297 defer = proc->deferred_work;
6298 proc->deferred_work = 0;
6299 } else {
6300 proc = NULL;
6301 defer = 0;
6302 }
6303 mutex_unlock(&binder_deferred_lock);
6304
6305 if (defer & BINDER_DEFERRED_FLUSH)
6306 binder_deferred_flush(proc);
6307
6308 if (defer & BINDER_DEFERRED_RELEASE)
6309 binder_deferred_release(proc); /* frees proc */
6310 } while (proc);
6311 }
6312 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6313
6314 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6315 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6316 {
6317 mutex_lock(&binder_deferred_lock);
6318 proc->deferred_work |= defer;
6319 if (hlist_unhashed(&proc->deferred_work_node)) {
6320 hlist_add_head(&proc->deferred_work_node,
6321 &binder_deferred_list);
6322 schedule_work(&binder_deferred_work);
6323 }
6324 mutex_unlock(&binder_deferred_lock);
6325 }
6326
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6327 static void print_binder_transaction_ilocked(struct seq_file *m,
6328 struct binder_proc *proc,
6329 const char *prefix,
6330 struct binder_transaction *t)
6331 {
6332 struct binder_proc *to_proc;
6333 struct binder_buffer *buffer = t->buffer;
6334 ktime_t current_time = ktime_get();
6335
6336 spin_lock(&t->lock);
6337 to_proc = t->to_proc;
6338 seq_printf(m,
6339 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6340 prefix, t->debug_id, t,
6341 t->from_pid,
6342 t->from_tid,
6343 to_proc ? to_proc->pid : 0,
6344 t->to_thread ? t->to_thread->pid : 0,
6345 t->code, t->flags, t->priority, t->need_reply,
6346 ktime_ms_delta(current_time, t->start_time));
6347 spin_unlock(&t->lock);
6348
6349 if (proc != to_proc) {
6350 /*
6351 * Can only safely deref buffer if we are holding the
6352 * correct proc inner lock for this node
6353 */
6354 seq_puts(m, "\n");
6355 return;
6356 }
6357
6358 if (buffer == NULL) {
6359 seq_puts(m, " buffer free\n");
6360 return;
6361 }
6362 if (buffer->target_node)
6363 seq_printf(m, " node %d", buffer->target_node->debug_id);
6364 seq_printf(m, " size %zd:%zd offset %lx\n",
6365 buffer->data_size, buffer->offsets_size,
6366 buffer->user_data - proc->alloc.vm_start);
6367 }
6368
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w,bool hash_ptrs)6369 static void print_binder_work_ilocked(struct seq_file *m,
6370 struct binder_proc *proc,
6371 const char *prefix,
6372 const char *transaction_prefix,
6373 struct binder_work *w, bool hash_ptrs)
6374 {
6375 struct binder_node *node;
6376 struct binder_transaction *t;
6377
6378 switch (w->type) {
6379 case BINDER_WORK_TRANSACTION:
6380 t = container_of(w, struct binder_transaction, work);
6381 print_binder_transaction_ilocked(
6382 m, proc, transaction_prefix, t);
6383 break;
6384 case BINDER_WORK_RETURN_ERROR: {
6385 struct binder_error *e = container_of(
6386 w, struct binder_error, work);
6387
6388 seq_printf(m, "%stransaction error: %u\n",
6389 prefix, e->cmd);
6390 } break;
6391 case BINDER_WORK_TRANSACTION_COMPLETE:
6392 seq_printf(m, "%stransaction complete\n", prefix);
6393 break;
6394 case BINDER_WORK_NODE:
6395 node = container_of(w, struct binder_node, work);
6396 if (hash_ptrs)
6397 seq_printf(m, "%snode work %d: u%p c%p\n",
6398 prefix, node->debug_id,
6399 (void *)(long)node->ptr,
6400 (void *)(long)node->cookie);
6401 else
6402 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6403 prefix, node->debug_id,
6404 (u64)node->ptr, (u64)node->cookie);
6405 break;
6406 case BINDER_WORK_DEAD_BINDER:
6407 seq_printf(m, "%shas dead binder\n", prefix);
6408 break;
6409 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6410 seq_printf(m, "%shas cleared dead binder\n", prefix);
6411 break;
6412 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6413 seq_printf(m, "%shas cleared death notification\n", prefix);
6414 break;
6415 case BINDER_WORK_FROZEN_BINDER:
6416 seq_printf(m, "%shas frozen binder\n", prefix);
6417 break;
6418 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6419 seq_printf(m, "%shas cleared freeze notification\n", prefix);
6420 break;
6421 default:
6422 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6423 break;
6424 }
6425 }
6426
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,bool print_always,bool hash_ptrs)6427 static void print_binder_thread_ilocked(struct seq_file *m,
6428 struct binder_thread *thread,
6429 bool print_always, bool hash_ptrs)
6430 {
6431 struct binder_transaction *t;
6432 struct binder_work *w;
6433 size_t start_pos = m->count;
6434 size_t header_pos;
6435
6436 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6437 thread->pid, thread->looper,
6438 thread->looper_need_return,
6439 atomic_read(&thread->tmp_ref));
6440 header_pos = m->count;
6441 t = thread->transaction_stack;
6442 while (t) {
6443 if (t->from == thread) {
6444 print_binder_transaction_ilocked(m, thread->proc,
6445 " outgoing transaction", t);
6446 t = t->from_parent;
6447 } else if (t->to_thread == thread) {
6448 print_binder_transaction_ilocked(m, thread->proc,
6449 " incoming transaction", t);
6450 t = t->to_parent;
6451 } else {
6452 print_binder_transaction_ilocked(m, thread->proc,
6453 " bad transaction", t);
6454 t = NULL;
6455 }
6456 }
6457 list_for_each_entry(w, &thread->todo, entry) {
6458 print_binder_work_ilocked(m, thread->proc, " ",
6459 " pending transaction",
6460 w, hash_ptrs);
6461 }
6462 if (!print_always && m->count == header_pos)
6463 m->count = start_pos;
6464 }
6465
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node,bool hash_ptrs)6466 static void print_binder_node_nilocked(struct seq_file *m,
6467 struct binder_node *node,
6468 bool hash_ptrs)
6469 {
6470 struct binder_ref *ref;
6471 struct binder_work *w;
6472 int count;
6473
6474 count = hlist_count_nodes(&node->refs);
6475
6476 if (hash_ptrs)
6477 seq_printf(m, " node %d: u%p c%p", node->debug_id,
6478 (void *)(long)node->ptr, (void *)(long)node->cookie);
6479 else
6480 seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
6481 (u64)node->ptr, (u64)node->cookie);
6482 seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6483 node->has_strong_ref, node->has_weak_ref,
6484 node->local_strong_refs, node->local_weak_refs,
6485 node->internal_strong_refs, count, node->tmp_refs);
6486 if (count) {
6487 seq_puts(m, " proc");
6488 hlist_for_each_entry(ref, &node->refs, node_entry)
6489 seq_printf(m, " %d", ref->proc->pid);
6490 }
6491 seq_puts(m, "\n");
6492 if (node->proc) {
6493 list_for_each_entry(w, &node->async_todo, entry)
6494 print_binder_work_ilocked(m, node->proc, " ",
6495 " pending async transaction",
6496 w, hash_ptrs);
6497 }
6498 }
6499
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6500 static void print_binder_ref_olocked(struct seq_file *m,
6501 struct binder_ref *ref)
6502 {
6503 binder_node_lock(ref->node);
6504 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6505 ref->data.debug_id, ref->data.desc,
6506 ref->node->proc ? "" : "dead ",
6507 ref->node->debug_id, ref->data.strong,
6508 ref->data.weak, ref->death);
6509 binder_node_unlock(ref->node);
6510 }
6511
6512 /**
6513 * print_next_binder_node_ilocked() - Print binder_node from a locked list
6514 * @m: struct seq_file for output via seq_printf()
6515 * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
6516 * @node: struct binder_node to print fields of
6517 * @prev_node: struct binder_node we hold a temporary reference to (if any)
6518 * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
6519 *
6520 * Helper function to handle synchronization around printing a struct
6521 * binder_node while iterating through @proc->nodes or the dead nodes list.
6522 * Caller must hold either @proc->inner_lock (for live nodes) or
6523 * binder_dead_nodes_lock. This lock will be released during the body of this
6524 * function, but it will be reacquired before returning to the caller.
6525 *
6526 * Return: pointer to the struct binder_node we hold a tmpref on
6527 */
6528 static struct binder_node *
print_next_binder_node_ilocked(struct seq_file * m,struct binder_proc * proc,struct binder_node * node,struct binder_node * prev_node,bool hash_ptrs)6529 print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6530 struct binder_node *node,
6531 struct binder_node *prev_node, bool hash_ptrs)
6532 {
6533 /*
6534 * Take a temporary reference on the node so that isn't freed while
6535 * we print it.
6536 */
6537 binder_inc_node_tmpref_ilocked(node);
6538 /*
6539 * Live nodes need to drop the inner proc lock and dead nodes need to
6540 * drop the binder_dead_nodes_lock before trying to take the node lock.
6541 */
6542 if (proc)
6543 binder_inner_proc_unlock(proc);
6544 else
6545 spin_unlock(&binder_dead_nodes_lock);
6546 if (prev_node)
6547 binder_put_node(prev_node);
6548 binder_node_inner_lock(node);
6549 print_binder_node_nilocked(m, node, hash_ptrs);
6550 binder_node_inner_unlock(node);
6551 if (proc)
6552 binder_inner_proc_lock(proc);
6553 else
6554 spin_lock(&binder_dead_nodes_lock);
6555 return node;
6556 }
6557
print_binder_proc(struct seq_file * m,struct binder_proc * proc,bool print_all,bool hash_ptrs)6558 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6559 bool print_all, bool hash_ptrs)
6560 {
6561 struct binder_work *w;
6562 struct rb_node *n;
6563 size_t start_pos = m->count;
6564 size_t header_pos;
6565 struct binder_node *last_node = NULL;
6566
6567 seq_printf(m, "proc %d\n", proc->pid);
6568 seq_printf(m, "context %s\n", proc->context->name);
6569 header_pos = m->count;
6570
6571 binder_inner_proc_lock(proc);
6572 for (n = rb_first(&proc->threads); n; n = rb_next(n))
6573 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6574 rb_node), print_all, hash_ptrs);
6575
6576 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6577 struct binder_node *node = rb_entry(n, struct binder_node,
6578 rb_node);
6579 if (!print_all && !node->has_async_transaction)
6580 continue;
6581
6582 last_node = print_next_binder_node_ilocked(m, proc, node,
6583 last_node,
6584 hash_ptrs);
6585 }
6586 binder_inner_proc_unlock(proc);
6587 if (last_node)
6588 binder_put_node(last_node);
6589
6590 if (print_all) {
6591 binder_proc_lock(proc);
6592 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6593 print_binder_ref_olocked(m, rb_entry(n,
6594 struct binder_ref,
6595 rb_node_desc));
6596 binder_proc_unlock(proc);
6597 }
6598 binder_alloc_print_allocated(m, &proc->alloc);
6599 binder_inner_proc_lock(proc);
6600 list_for_each_entry(w, &proc->todo, entry)
6601 print_binder_work_ilocked(m, proc, " ",
6602 " pending transaction", w,
6603 hash_ptrs);
6604 list_for_each_entry(w, &proc->delivered_death, entry) {
6605 seq_puts(m, " has delivered dead binder\n");
6606 break;
6607 }
6608 list_for_each_entry(w, &proc->delivered_freeze, entry) {
6609 seq_puts(m, " has delivered freeze binder\n");
6610 break;
6611 }
6612 binder_inner_proc_unlock(proc);
6613 if (!print_all && m->count == header_pos)
6614 m->count = start_pos;
6615 }
6616
6617 static const char * const binder_return_strings[] = {
6618 "BR_ERROR",
6619 "BR_OK",
6620 "BR_TRANSACTION",
6621 "BR_REPLY",
6622 "BR_ACQUIRE_RESULT",
6623 "BR_DEAD_REPLY",
6624 "BR_TRANSACTION_COMPLETE",
6625 "BR_INCREFS",
6626 "BR_ACQUIRE",
6627 "BR_RELEASE",
6628 "BR_DECREFS",
6629 "BR_ATTEMPT_ACQUIRE",
6630 "BR_NOOP",
6631 "BR_SPAWN_LOOPER",
6632 "BR_FINISHED",
6633 "BR_DEAD_BINDER",
6634 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6635 "BR_FAILED_REPLY",
6636 "BR_FROZEN_REPLY",
6637 "BR_ONEWAY_SPAM_SUSPECT",
6638 "BR_TRANSACTION_PENDING_FROZEN",
6639 "BR_FROZEN_BINDER",
6640 "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6641 };
6642
6643 static const char * const binder_command_strings[] = {
6644 "BC_TRANSACTION",
6645 "BC_REPLY",
6646 "BC_ACQUIRE_RESULT",
6647 "BC_FREE_BUFFER",
6648 "BC_INCREFS",
6649 "BC_ACQUIRE",
6650 "BC_RELEASE",
6651 "BC_DECREFS",
6652 "BC_INCREFS_DONE",
6653 "BC_ACQUIRE_DONE",
6654 "BC_ATTEMPT_ACQUIRE",
6655 "BC_REGISTER_LOOPER",
6656 "BC_ENTER_LOOPER",
6657 "BC_EXIT_LOOPER",
6658 "BC_REQUEST_DEATH_NOTIFICATION",
6659 "BC_CLEAR_DEATH_NOTIFICATION",
6660 "BC_DEAD_BINDER_DONE",
6661 "BC_TRANSACTION_SG",
6662 "BC_REPLY_SG",
6663 "BC_REQUEST_FREEZE_NOTIFICATION",
6664 "BC_CLEAR_FREEZE_NOTIFICATION",
6665 "BC_FREEZE_NOTIFICATION_DONE",
6666 };
6667
6668 static const char * const binder_objstat_strings[] = {
6669 "proc",
6670 "thread",
6671 "node",
6672 "ref",
6673 "death",
6674 "transaction",
6675 "transaction_complete",
6676 "freeze",
6677 };
6678
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6679 static void print_binder_stats(struct seq_file *m, const char *prefix,
6680 struct binder_stats *stats)
6681 {
6682 int i;
6683
6684 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6685 ARRAY_SIZE(binder_command_strings));
6686 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6687 int temp = atomic_read(&stats->bc[i]);
6688
6689 if (temp)
6690 seq_printf(m, "%s%s: %d\n", prefix,
6691 binder_command_strings[i], temp);
6692 }
6693
6694 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6695 ARRAY_SIZE(binder_return_strings));
6696 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6697 int temp = atomic_read(&stats->br[i]);
6698
6699 if (temp)
6700 seq_printf(m, "%s%s: %d\n", prefix,
6701 binder_return_strings[i], temp);
6702 }
6703
6704 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6705 ARRAY_SIZE(binder_objstat_strings));
6706 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6707 ARRAY_SIZE(stats->obj_deleted));
6708 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6709 int created = atomic_read(&stats->obj_created[i]);
6710 int deleted = atomic_read(&stats->obj_deleted[i]);
6711
6712 if (created || deleted)
6713 seq_printf(m, "%s%s: active %d total %d\n",
6714 prefix,
6715 binder_objstat_strings[i],
6716 created - deleted,
6717 created);
6718 }
6719 }
6720
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6721 static void print_binder_proc_stats(struct seq_file *m,
6722 struct binder_proc *proc)
6723 {
6724 struct binder_work *w;
6725 struct binder_thread *thread;
6726 struct rb_node *n;
6727 int count, strong, weak, ready_threads;
6728 size_t free_async_space =
6729 binder_alloc_get_free_async_space(&proc->alloc);
6730
6731 seq_printf(m, "proc %d\n", proc->pid);
6732 seq_printf(m, "context %s\n", proc->context->name);
6733 count = 0;
6734 ready_threads = 0;
6735 binder_inner_proc_lock(proc);
6736 for (n = rb_first(&proc->threads); n; n = rb_next(n))
6737 count++;
6738
6739 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6740 ready_threads++;
6741
6742 seq_printf(m, " threads: %d\n", count);
6743 seq_printf(m, " requested threads: %d+%d/%d\n"
6744 " ready threads %d\n"
6745 " free async space %zd\n", proc->requested_threads,
6746 proc->requested_threads_started, proc->max_threads,
6747 ready_threads,
6748 free_async_space);
6749 count = 0;
6750 for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6751 count++;
6752 binder_inner_proc_unlock(proc);
6753 seq_printf(m, " nodes: %d\n", count);
6754 count = 0;
6755 strong = 0;
6756 weak = 0;
6757 binder_proc_lock(proc);
6758 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6759 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6760 rb_node_desc);
6761 count++;
6762 strong += ref->data.strong;
6763 weak += ref->data.weak;
6764 }
6765 binder_proc_unlock(proc);
6766 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6767
6768 count = binder_alloc_get_allocated_count(&proc->alloc);
6769 seq_printf(m, " buffers: %d\n", count);
6770
6771 binder_alloc_print_pages(m, &proc->alloc);
6772
6773 count = 0;
6774 binder_inner_proc_lock(proc);
6775 list_for_each_entry(w, &proc->todo, entry) {
6776 if (w->type == BINDER_WORK_TRANSACTION)
6777 count++;
6778 }
6779 binder_inner_proc_unlock(proc);
6780 seq_printf(m, " pending transactions: %d\n", count);
6781
6782 print_binder_stats(m, " ", &proc->stats);
6783 }
6784
print_binder_state(struct seq_file * m,bool hash_ptrs)6785 static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6786 {
6787 struct binder_proc *proc;
6788 struct binder_node *node;
6789 struct binder_node *last_node = NULL;
6790
6791 seq_puts(m, "binder state:\n");
6792
6793 spin_lock(&binder_dead_nodes_lock);
6794 if (!hlist_empty(&binder_dead_nodes))
6795 seq_puts(m, "dead nodes:\n");
6796 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6797 last_node = print_next_binder_node_ilocked(m, NULL, node,
6798 last_node,
6799 hash_ptrs);
6800 spin_unlock(&binder_dead_nodes_lock);
6801 if (last_node)
6802 binder_put_node(last_node);
6803
6804 mutex_lock(&binder_procs_lock);
6805 hlist_for_each_entry(proc, &binder_procs, proc_node)
6806 print_binder_proc(m, proc, true, hash_ptrs);
6807 mutex_unlock(&binder_procs_lock);
6808 }
6809
print_binder_transactions(struct seq_file * m,bool hash_ptrs)6810 static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6811 {
6812 struct binder_proc *proc;
6813
6814 seq_puts(m, "binder transactions:\n");
6815 mutex_lock(&binder_procs_lock);
6816 hlist_for_each_entry(proc, &binder_procs, proc_node)
6817 print_binder_proc(m, proc, false, hash_ptrs);
6818 mutex_unlock(&binder_procs_lock);
6819 }
6820
state_show(struct seq_file * m,void * unused)6821 static int state_show(struct seq_file *m, void *unused)
6822 {
6823 print_binder_state(m, false);
6824 return 0;
6825 }
6826
state_hashed_show(struct seq_file * m,void * unused)6827 static int state_hashed_show(struct seq_file *m, void *unused)
6828 {
6829 print_binder_state(m, true);
6830 return 0;
6831 }
6832
stats_show(struct seq_file * m,void * unused)6833 static int stats_show(struct seq_file *m, void *unused)
6834 {
6835 struct binder_proc *proc;
6836
6837 seq_puts(m, "binder stats:\n");
6838
6839 print_binder_stats(m, "", &binder_stats);
6840
6841 mutex_lock(&binder_procs_lock);
6842 hlist_for_each_entry(proc, &binder_procs, proc_node)
6843 print_binder_proc_stats(m, proc);
6844 mutex_unlock(&binder_procs_lock);
6845
6846 return 0;
6847 }
6848
transactions_show(struct seq_file * m,void * unused)6849 static int transactions_show(struct seq_file *m, void *unused)
6850 {
6851 print_binder_transactions(m, false);
6852 return 0;
6853 }
6854
transactions_hashed_show(struct seq_file * m,void * unused)6855 static int transactions_hashed_show(struct seq_file *m, void *unused)
6856 {
6857 print_binder_transactions(m, true);
6858 return 0;
6859 }
6860
proc_show(struct seq_file * m,void * unused)6861 static int proc_show(struct seq_file *m, void *unused)
6862 {
6863 struct binder_proc *itr;
6864 int pid = (unsigned long)m->private;
6865
6866 mutex_lock(&binder_procs_lock);
6867 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6868 if (itr->pid == pid) {
6869 seq_puts(m, "binder proc state:\n");
6870 print_binder_proc(m, itr, true, false);
6871 }
6872 }
6873 mutex_unlock(&binder_procs_lock);
6874
6875 return 0;
6876 }
6877
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6878 static void print_binder_transaction_log_entry(struct seq_file *m,
6879 struct binder_transaction_log_entry *e)
6880 {
6881 int debug_id = READ_ONCE(e->debug_id_done);
6882 /*
6883 * read barrier to guarantee debug_id_done read before
6884 * we print the log values
6885 */
6886 smp_rmb();
6887 seq_printf(m,
6888 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6889 e->debug_id, (e->call_type == 2) ? "reply" :
6890 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6891 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6892 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6893 e->return_error, e->return_error_param,
6894 e->return_error_line);
6895 /*
6896 * read-barrier to guarantee read of debug_id_done after
6897 * done printing the fields of the entry
6898 */
6899 smp_rmb();
6900 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6901 "\n" : " (incomplete)\n");
6902 }
6903
transaction_log_show(struct seq_file * m,void * unused)6904 static int transaction_log_show(struct seq_file *m, void *unused)
6905 {
6906 struct binder_transaction_log *log = m->private;
6907 unsigned int log_cur = atomic_read(&log->cur);
6908 unsigned int count;
6909 unsigned int cur;
6910 int i;
6911
6912 count = log_cur + 1;
6913 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6914 0 : count % ARRAY_SIZE(log->entry);
6915 if (count > ARRAY_SIZE(log->entry) || log->full)
6916 count = ARRAY_SIZE(log->entry);
6917 for (i = 0; i < count; i++) {
6918 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6919
6920 print_binder_transaction_log_entry(m, &log->entry[index]);
6921 }
6922 return 0;
6923 }
6924
6925 const struct file_operations binder_fops = {
6926 .owner = THIS_MODULE,
6927 .poll = binder_poll,
6928 .unlocked_ioctl = binder_ioctl,
6929 .compat_ioctl = compat_ptr_ioctl,
6930 .mmap = binder_mmap,
6931 .open = binder_open,
6932 .flush = binder_flush,
6933 .release = binder_release,
6934 };
6935
6936 DEFINE_SHOW_ATTRIBUTE(state);
6937 DEFINE_SHOW_ATTRIBUTE(state_hashed);
6938 DEFINE_SHOW_ATTRIBUTE(stats);
6939 DEFINE_SHOW_ATTRIBUTE(transactions);
6940 DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6941 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6942
6943 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6944 {
6945 .name = "state",
6946 .mode = 0444,
6947 .fops = &state_fops,
6948 .data = NULL,
6949 },
6950 {
6951 .name = "state_hashed",
6952 .mode = 0444,
6953 .fops = &state_hashed_fops,
6954 .data = NULL,
6955 },
6956 {
6957 .name = "stats",
6958 .mode = 0444,
6959 .fops = &stats_fops,
6960 .data = NULL,
6961 },
6962 {
6963 .name = "transactions",
6964 .mode = 0444,
6965 .fops = &transactions_fops,
6966 .data = NULL,
6967 },
6968 {
6969 .name = "transactions_hashed",
6970 .mode = 0444,
6971 .fops = &transactions_hashed_fops,
6972 .data = NULL,
6973 },
6974 {
6975 .name = "transaction_log",
6976 .mode = 0444,
6977 .fops = &transaction_log_fops,
6978 .data = &binder_transaction_log,
6979 },
6980 {
6981 .name = "failed_transaction_log",
6982 .mode = 0444,
6983 .fops = &transaction_log_fops,
6984 .data = &binder_transaction_log_failed,
6985 },
6986 {} /* terminator */
6987 };
6988
binder_add_device(struct binder_device * device)6989 void binder_add_device(struct binder_device *device)
6990 {
6991 spin_lock(&binder_devices_lock);
6992 hlist_add_head(&device->hlist, &binder_devices);
6993 spin_unlock(&binder_devices_lock);
6994 }
6995
binder_remove_device(struct binder_device * device)6996 void binder_remove_device(struct binder_device *device)
6997 {
6998 spin_lock(&binder_devices_lock);
6999 hlist_del_init(&device->hlist);
7000 spin_unlock(&binder_devices_lock);
7001 }
7002
init_binder_device(const char * name)7003 static int __init init_binder_device(const char *name)
7004 {
7005 int ret;
7006 struct binder_device *binder_device;
7007
7008 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7009 if (!binder_device)
7010 return -ENOMEM;
7011
7012 binder_device->miscdev.fops = &binder_fops;
7013 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7014 binder_device->miscdev.name = name;
7015
7016 refcount_set(&binder_device->ref, 1);
7017 binder_device->context.binder_context_mgr_uid = INVALID_UID;
7018 binder_device->context.name = name;
7019 mutex_init(&binder_device->context.context_mgr_node_lock);
7020
7021 ret = misc_register(&binder_device->miscdev);
7022 if (ret < 0) {
7023 kfree(binder_device);
7024 return ret;
7025 }
7026
7027 binder_add_device(binder_device);
7028
7029 return ret;
7030 }
7031
binder_init(void)7032 static int __init binder_init(void)
7033 {
7034 int ret;
7035 char *device_name, *device_tmp;
7036 struct binder_device *device;
7037 struct hlist_node *tmp;
7038 char *device_names = NULL;
7039 const struct binder_debugfs_entry *db_entry;
7040
7041 ret = binder_alloc_shrinker_init();
7042 if (ret)
7043 return ret;
7044
7045 atomic_set(&binder_transaction_log.cur, ~0U);
7046 atomic_set(&binder_transaction_log_failed.cur, ~0U);
7047
7048 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7049
7050 binder_for_each_debugfs_entry(db_entry)
7051 debugfs_create_file(db_entry->name,
7052 db_entry->mode,
7053 binder_debugfs_dir_entry_root,
7054 db_entry->data,
7055 db_entry->fops);
7056
7057 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7058 binder_debugfs_dir_entry_root);
7059
7060 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7061 strcmp(binder_devices_param, "") != 0) {
7062 /*
7063 * Copy the module_parameter string, because we don't want to
7064 * tokenize it in-place.
7065 */
7066 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7067 if (!device_names) {
7068 ret = -ENOMEM;
7069 goto err_alloc_device_names_failed;
7070 }
7071
7072 device_tmp = device_names;
7073 while ((device_name = strsep(&device_tmp, ","))) {
7074 ret = init_binder_device(device_name);
7075 if (ret)
7076 goto err_init_binder_device_failed;
7077 }
7078 }
7079
7080 ret = init_binderfs();
7081 if (ret)
7082 goto err_init_binder_device_failed;
7083
7084 return ret;
7085
7086 err_init_binder_device_failed:
7087 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7088 misc_deregister(&device->miscdev);
7089 binder_remove_device(device);
7090 kfree(device);
7091 }
7092
7093 kfree(device_names);
7094
7095 err_alloc_device_names_failed:
7096 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7097 binder_alloc_shrinker_exit();
7098
7099 return ret;
7100 }
7101
7102 device_initcall(binder_init);
7103
7104 #define CREATE_TRACE_POINTS
7105 #include "binder_trace.h"
7106
7107 MODULE_LICENSE("GPL v2");
7108