1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70
71 #include <uapi/linux/android/binder.h>
72
73 #include <linux/cacheflush.h>
74
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94
95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
96
97 enum {
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 };
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
120
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)124 static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
126 {
127 int ret;
128
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
132 return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
136
binder_debug(int mask,const char * format,...)137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 {
139 struct va_format vaf;
140 va_list args;
141
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
144 vaf.va = &args;
145 vaf.fmt = format;
146 pr_info_ratelimited("%pV", &vaf);
147 va_end(args);
148 }
149 }
150
151 #define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153
binder_user_error(const char * format,...)154 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 {
156 struct va_format vaf;
157 va_list args;
158
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
161 vaf.va = &args;
162 vaf.fmt = format;
163 pr_info_ratelimited("%pV", &vaf);
164 va_end(args);
165 }
166
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
169 }
170
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172 do { \
173 (ee)->id = _id; \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
176 } while (0)
177
178 #define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
180
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183 #define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
185
186 #define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
188
189 static struct binder_stats binder_stats;
190
binder_stats_deleted(enum binder_stat_types type)191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193 atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195
binder_stats_created(enum binder_stat_types type)196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198 atomic_inc(&binder_stats.obj_created[type]);
199 }
200
201 struct binder_transaction_log_entry {
202 int debug_id;
203 int debug_id_done;
204 int call_type;
205 int from_proc;
206 int from_thread;
207 int target_handle;
208 int to_proc;
209 int to_thread;
210 int to_node;
211 int data_size;
212 int offsets_size;
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
217 };
218
219 struct binder_transaction_log {
220 atomic_t cur;
221 bool full;
222 struct binder_transaction_log_entry entry[32];
223 };
224
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
227
binder_transaction_log_add(struct binder_transaction_log * log)228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
230 {
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
233
234 if (cur >= ARRAY_SIZE(log->entry))
235 log->full = true;
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
238 /*
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
242 */
243 smp_wmb();
244 memset(e, 0, sizeof(*e));
245 return e;
246 }
247
248 enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
251 };
252
253 enum {
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
260 };
261
262 /**
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
265 *
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
268 */
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
_binder_proc_lock(struct binder_proc * proc,int line)271 _binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
273 {
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
277 }
278
279 /**
280 * binder_proc_unlock() - Release outer lock for given binder_proc
281 * @proc: struct binder_proc to acquire
282 *
283 * Release lock acquired via binder_proc_lock()
284 */
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
_binder_proc_unlock(struct binder_proc * proc,int line)287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
289 {
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
293 }
294
295 /**
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
298 *
299 * Acquires proc->inner_lock. Used to protect todo lists
300 */
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
305 {
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
309 }
310
311 /**
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
314 *
315 * Release lock acquired via binder_inner_proc_lock()
316 */
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
321 {
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
325 }
326
327 /**
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
330 *
331 * Acquires node->lock. Used to protect binder_node fields
332 */
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
_binder_node_lock(struct binder_node * node,int line)335 _binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
337 {
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
341 }
342
343 /**
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
346 *
347 * Release lock acquired via binder_node_lock()
348 */
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
_binder_node_unlock(struct binder_node * node,int line)351 _binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
353 {
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
357 }
358
359 /**
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
362 *
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
365 */
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
_binder_node_inner_lock(struct binder_node * node,int line)368 _binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 {
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
374 if (node->proc)
375 binder_inner_proc_lock(node->proc);
376 else
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
379 }
380
381 /**
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
384 *
385 * Release lock acquired via binder_node_lock()
386 */
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
_binder_node_inner_unlock(struct binder_node * node,int line)389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
391 {
392 struct binder_proc *proc = node->proc;
393
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
396 if (proc)
397 binder_inner_proc_unlock(proc);
398 else
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
402 }
403
binder_worklist_empty_ilocked(struct list_head * list)404 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 {
406 return list_empty(list);
407 }
408
409 /**
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
413 *
414 * Return: true if there are no items on list, else false
415 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)416 static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
418 {
419 bool ret;
420
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
424 return ret;
425 }
426
427 /**
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
431 *
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
434 *
435 * Requires the proc->inner_lock to be held.
436 */
437 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)438 binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
440 {
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
444 }
445
446 /**
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
450 *
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
454 *
455 * Requires the proc->inner_lock to be held.
456 */
457 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
460 {
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
463 }
464
465 /**
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
469 *
470 * Adds the work to the todo list of the thread, and enables processing
471 * of the todo queue.
472 *
473 * Requires the proc->inner_lock to be held.
474 */
475 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
478 {
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
481
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
486 */
487 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 thread->pid == current->pid && !thread->process_todo)
489 wake_up_interruptible_sync(&thread->wait);
490
491 thread->process_todo = true;
492 }
493
494 /**
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
498 *
499 * Adds the work to the todo list of the thread, and enables processing
500 * of the todo queue.
501 */
502 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)503 binder_enqueue_thread_work(struct binder_thread *thread,
504 struct binder_work *work)
505 {
506 binder_inner_proc_lock(thread->proc);
507 binder_enqueue_thread_work_ilocked(thread, work);
508 binder_inner_proc_unlock(thread->proc);
509 }
510
511 static void
binder_dequeue_work_ilocked(struct binder_work * work)512 binder_dequeue_work_ilocked(struct binder_work *work)
513 {
514 list_del_init(&work->entry);
515 }
516
517 /**
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
521 *
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
524 */
525 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527 {
528 binder_inner_proc_lock(proc);
529 binder_dequeue_work_ilocked(work);
530 binder_inner_proc_unlock(proc);
531 }
532
binder_dequeue_work_head_ilocked(struct list_head * list)533 static struct binder_work *binder_dequeue_work_head_ilocked(
534 struct list_head *list)
535 {
536 struct binder_work *w;
537
538 w = list_first_entry_or_null(list, struct binder_work, entry);
539 if (w)
540 list_del_init(&w->entry);
541 return w;
542 }
543
544 static void
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)550 static bool binder_has_work_ilocked(struct binder_thread *thread,
551 bool do_proc_work)
552 {
553 return thread->process_todo ||
554 thread->looper_need_return ||
555 (do_proc_work &&
556 !binder_worklist_empty_ilocked(&thread->proc->todo));
557 }
558
binder_has_work(struct binder_thread * thread,bool do_proc_work)559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560 {
561 bool has_work;
562
563 binder_inner_proc_lock(thread->proc);
564 has_work = binder_has_work_ilocked(thread, do_proc_work);
565 binder_inner_proc_unlock(thread->proc);
566
567 return has_work;
568 }
569
binder_available_for_proc_work_ilocked(struct binder_thread * thread)570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571 {
572 return !thread->transaction_stack &&
573 binder_worklist_empty_ilocked(&thread->todo);
574 }
575
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)576 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
577 bool sync)
578 {
579 struct rb_node *n;
580 struct binder_thread *thread;
581
582 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
583 thread = rb_entry(n, struct binder_thread, rb_node);
584 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
585 binder_available_for_proc_work_ilocked(thread)) {
586 if (sync)
587 wake_up_interruptible_sync(&thread->wait);
588 else
589 wake_up_interruptible(&thread->wait);
590 }
591 }
592 }
593
594 /**
595 * binder_select_thread_ilocked() - selects a thread for doing proc work.
596 * @proc: process to select a thread from
597 *
598 * Note that calling this function moves the thread off the waiting_threads
599 * list, so it can only be woken up by the caller of this function, or a
600 * signal. Therefore, callers *should* always wake up the thread this function
601 * returns.
602 *
603 * Return: If there's a thread currently waiting for process work,
604 * returns that thread. Otherwise returns NULL.
605 */
606 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)607 binder_select_thread_ilocked(struct binder_proc *proc)
608 {
609 struct binder_thread *thread;
610
611 assert_spin_locked(&proc->inner_lock);
612 thread = list_first_entry_or_null(&proc->waiting_threads,
613 struct binder_thread,
614 waiting_thread_node);
615
616 if (thread)
617 list_del_init(&thread->waiting_thread_node);
618
619 return thread;
620 }
621
622 /**
623 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624 * @proc: process to wake up a thread in
625 * @thread: specific thread to wake-up (may be NULL)
626 * @sync: whether to do a synchronous wake-up
627 *
628 * This function wakes up a thread in the @proc process.
629 * The caller may provide a specific thread to wake-up in
630 * the @thread parameter. If @thread is NULL, this function
631 * will wake up threads that have called poll().
632 *
633 * Note that for this function to work as expected, callers
634 * should first call binder_select_thread() to find a thread
635 * to handle the work (if they don't have a thread already),
636 * and pass the result into the @thread parameter.
637 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)638 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
639 struct binder_thread *thread,
640 bool sync)
641 {
642 assert_spin_locked(&proc->inner_lock);
643
644 if (thread) {
645 if (sync)
646 wake_up_interruptible_sync(&thread->wait);
647 else
648 wake_up_interruptible(&thread->wait);
649 return;
650 }
651
652 /* Didn't find a thread waiting for proc work; this can happen
653 * in two scenarios:
654 * 1. All threads are busy handling transactions
655 * In that case, one of those threads should call back into
656 * the kernel driver soon and pick up this work.
657 * 2. Threads are using the (e)poll interface, in which case
658 * they may be blocked on the waitqueue without having been
659 * added to waiting_threads. For this case, we just iterate
660 * over all threads not handling transaction work, and
661 * wake them all up. We wake all because we don't know whether
662 * a thread that called into (e)poll is handling non-binder
663 * work currently.
664 */
665 binder_wakeup_poll_threads_ilocked(proc, sync);
666 }
667
binder_wakeup_proc_ilocked(struct binder_proc * proc)668 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
669 {
670 struct binder_thread *thread = binder_select_thread_ilocked(proc);
671
672 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
673 }
674
binder_set_nice(long nice)675 static void binder_set_nice(long nice)
676 {
677 long min_nice;
678
679 if (can_nice(current, nice)) {
680 set_user_nice(current, nice);
681 return;
682 }
683 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
684 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
685 "%d: nice value %ld not allowed use %ld instead\n",
686 current->pid, nice, min_nice);
687 set_user_nice(current, min_nice);
688 if (min_nice <= MAX_NICE)
689 return;
690 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
691 }
692
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)693 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
694 binder_uintptr_t ptr)
695 {
696 struct rb_node *n = proc->nodes.rb_node;
697 struct binder_node *node;
698
699 assert_spin_locked(&proc->inner_lock);
700
701 while (n) {
702 node = rb_entry(n, struct binder_node, rb_node);
703
704 if (ptr < node->ptr)
705 n = n->rb_left;
706 else if (ptr > node->ptr)
707 n = n->rb_right;
708 else {
709 /*
710 * take an implicit weak reference
711 * to ensure node stays alive until
712 * call to binder_put_node()
713 */
714 binder_inc_node_tmpref_ilocked(node);
715 return node;
716 }
717 }
718 return NULL;
719 }
720
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)721 static struct binder_node *binder_get_node(struct binder_proc *proc,
722 binder_uintptr_t ptr)
723 {
724 struct binder_node *node;
725
726 binder_inner_proc_lock(proc);
727 node = binder_get_node_ilocked(proc, ptr);
728 binder_inner_proc_unlock(proc);
729 return node;
730 }
731
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)732 static struct binder_node *binder_init_node_ilocked(
733 struct binder_proc *proc,
734 struct binder_node *new_node,
735 struct flat_binder_object *fp)
736 {
737 struct rb_node **p = &proc->nodes.rb_node;
738 struct rb_node *parent = NULL;
739 struct binder_node *node;
740 binder_uintptr_t ptr = fp ? fp->binder : 0;
741 binder_uintptr_t cookie = fp ? fp->cookie : 0;
742 __u32 flags = fp ? fp->flags : 0;
743
744 assert_spin_locked(&proc->inner_lock);
745
746 while (*p) {
747
748 parent = *p;
749 node = rb_entry(parent, struct binder_node, rb_node);
750
751 if (ptr < node->ptr)
752 p = &(*p)->rb_left;
753 else if (ptr > node->ptr)
754 p = &(*p)->rb_right;
755 else {
756 /*
757 * A matching node is already in
758 * the rb tree. Abandon the init
759 * and return it.
760 */
761 binder_inc_node_tmpref_ilocked(node);
762 return node;
763 }
764 }
765 node = new_node;
766 binder_stats_created(BINDER_STAT_NODE);
767 node->tmp_refs++;
768 rb_link_node(&node->rb_node, parent, p);
769 rb_insert_color(&node->rb_node, &proc->nodes);
770 node->debug_id = atomic_inc_return(&binder_last_id);
771 node->proc = proc;
772 node->ptr = ptr;
773 node->cookie = cookie;
774 node->work.type = BINDER_WORK_NODE;
775 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
776 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
777 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
778 spin_lock_init(&node->lock);
779 INIT_LIST_HEAD(&node->work.entry);
780 INIT_LIST_HEAD(&node->async_todo);
781 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
782 "%d:%d node %d u%016llx c%016llx created\n",
783 proc->pid, current->pid, node->debug_id,
784 (u64)node->ptr, (u64)node->cookie);
785
786 return node;
787 }
788
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)789 static struct binder_node *binder_new_node(struct binder_proc *proc,
790 struct flat_binder_object *fp)
791 {
792 struct binder_node *node;
793 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
794
795 if (!new_node)
796 return NULL;
797 binder_inner_proc_lock(proc);
798 node = binder_init_node_ilocked(proc, new_node, fp);
799 binder_inner_proc_unlock(proc);
800 if (node != new_node)
801 /*
802 * The node was already added by another thread
803 */
804 kfree(new_node);
805
806 return node;
807 }
808
binder_free_node(struct binder_node * node)809 static void binder_free_node(struct binder_node *node)
810 {
811 kfree(node);
812 binder_stats_deleted(BINDER_STAT_NODE);
813 }
814
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)815 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
816 int internal,
817 struct list_head *target_list)
818 {
819 struct binder_proc *proc = node->proc;
820
821 assert_spin_locked(&node->lock);
822 if (proc)
823 assert_spin_locked(&proc->inner_lock);
824 if (strong) {
825 if (internal) {
826 if (target_list == NULL &&
827 node->internal_strong_refs == 0 &&
828 !(node->proc &&
829 node == node->proc->context->binder_context_mgr_node &&
830 node->has_strong_ref)) {
831 pr_err("invalid inc strong node for %d\n",
832 node->debug_id);
833 return -EINVAL;
834 }
835 node->internal_strong_refs++;
836 } else
837 node->local_strong_refs++;
838 if (!node->has_strong_ref && target_list) {
839 struct binder_thread *thread = container_of(target_list,
840 struct binder_thread, todo);
841 binder_dequeue_work_ilocked(&node->work);
842 BUG_ON(&thread->todo != target_list);
843 binder_enqueue_deferred_thread_work_ilocked(thread,
844 &node->work);
845 }
846 } else {
847 if (!internal)
848 node->local_weak_refs++;
849 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
850 if (target_list == NULL) {
851 pr_err("invalid inc weak node for %d\n",
852 node->debug_id);
853 return -EINVAL;
854 }
855 /*
856 * See comment above
857 */
858 binder_enqueue_work_ilocked(&node->work, target_list);
859 }
860 }
861 return 0;
862 }
863
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)864 static int binder_inc_node(struct binder_node *node, int strong, int internal,
865 struct list_head *target_list)
866 {
867 int ret;
868
869 binder_node_inner_lock(node);
870 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
871 binder_node_inner_unlock(node);
872
873 return ret;
874 }
875
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)876 static bool binder_dec_node_nilocked(struct binder_node *node,
877 int strong, int internal)
878 {
879 struct binder_proc *proc = node->proc;
880
881 assert_spin_locked(&node->lock);
882 if (proc)
883 assert_spin_locked(&proc->inner_lock);
884 if (strong) {
885 if (internal)
886 node->internal_strong_refs--;
887 else
888 node->local_strong_refs--;
889 if (node->local_strong_refs || node->internal_strong_refs)
890 return false;
891 } else {
892 if (!internal)
893 node->local_weak_refs--;
894 if (node->local_weak_refs || node->tmp_refs ||
895 !hlist_empty(&node->refs))
896 return false;
897 }
898
899 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
900 if (list_empty(&node->work.entry)) {
901 binder_enqueue_work_ilocked(&node->work, &proc->todo);
902 binder_wakeup_proc_ilocked(proc);
903 }
904 } else {
905 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
906 !node->local_weak_refs && !node->tmp_refs) {
907 if (proc) {
908 binder_dequeue_work_ilocked(&node->work);
909 rb_erase(&node->rb_node, &proc->nodes);
910 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911 "refless node %d deleted\n",
912 node->debug_id);
913 } else {
914 BUG_ON(!list_empty(&node->work.entry));
915 spin_lock(&binder_dead_nodes_lock);
916 /*
917 * tmp_refs could have changed so
918 * check it again
919 */
920 if (node->tmp_refs) {
921 spin_unlock(&binder_dead_nodes_lock);
922 return false;
923 }
924 hlist_del(&node->dead_node);
925 spin_unlock(&binder_dead_nodes_lock);
926 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
927 "dead node %d deleted\n",
928 node->debug_id);
929 }
930 return true;
931 }
932 }
933 return false;
934 }
935
binder_dec_node(struct binder_node * node,int strong,int internal)936 static void binder_dec_node(struct binder_node *node, int strong, int internal)
937 {
938 bool free_node;
939
940 binder_node_inner_lock(node);
941 free_node = binder_dec_node_nilocked(node, strong, internal);
942 binder_node_inner_unlock(node);
943 if (free_node)
944 binder_free_node(node);
945 }
946
binder_inc_node_tmpref_ilocked(struct binder_node * node)947 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
948 {
949 /*
950 * No call to binder_inc_node() is needed since we
951 * don't need to inform userspace of any changes to
952 * tmp_refs
953 */
954 node->tmp_refs++;
955 }
956
957 /**
958 * binder_inc_node_tmpref() - take a temporary reference on node
959 * @node: node to reference
960 *
961 * Take reference on node to prevent the node from being freed
962 * while referenced only by a local variable. The inner lock is
963 * needed to serialize with the node work on the queue (which
964 * isn't needed after the node is dead). If the node is dead
965 * (node->proc is NULL), use binder_dead_nodes_lock to protect
966 * node->tmp_refs against dead-node-only cases where the node
967 * lock cannot be acquired (eg traversing the dead node list to
968 * print nodes)
969 */
binder_inc_node_tmpref(struct binder_node * node)970 static void binder_inc_node_tmpref(struct binder_node *node)
971 {
972 binder_node_lock(node);
973 if (node->proc)
974 binder_inner_proc_lock(node->proc);
975 else
976 spin_lock(&binder_dead_nodes_lock);
977 binder_inc_node_tmpref_ilocked(node);
978 if (node->proc)
979 binder_inner_proc_unlock(node->proc);
980 else
981 spin_unlock(&binder_dead_nodes_lock);
982 binder_node_unlock(node);
983 }
984
985 /**
986 * binder_dec_node_tmpref() - remove a temporary reference on node
987 * @node: node to reference
988 *
989 * Release temporary reference on node taken via binder_inc_node_tmpref()
990 */
binder_dec_node_tmpref(struct binder_node * node)991 static void binder_dec_node_tmpref(struct binder_node *node)
992 {
993 bool free_node;
994
995 binder_node_inner_lock(node);
996 if (!node->proc)
997 spin_lock(&binder_dead_nodes_lock);
998 else
999 __acquire(&binder_dead_nodes_lock);
1000 node->tmp_refs--;
1001 BUG_ON(node->tmp_refs < 0);
1002 if (!node->proc)
1003 spin_unlock(&binder_dead_nodes_lock);
1004 else
1005 __release(&binder_dead_nodes_lock);
1006 /*
1007 * Call binder_dec_node() to check if all refcounts are 0
1008 * and cleanup is needed. Calling with strong=0 and internal=1
1009 * causes no actual reference to be released in binder_dec_node().
1010 * If that changes, a change is needed here too.
1011 */
1012 free_node = binder_dec_node_nilocked(node, 0, 1);
1013 binder_node_inner_unlock(node);
1014 if (free_node)
1015 binder_free_node(node);
1016 }
1017
binder_put_node(struct binder_node * node)1018 static void binder_put_node(struct binder_node *node)
1019 {
1020 binder_dec_node_tmpref(node);
1021 }
1022
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1023 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1024 u32 desc, bool need_strong_ref)
1025 {
1026 struct rb_node *n = proc->refs_by_desc.rb_node;
1027 struct binder_ref *ref;
1028
1029 while (n) {
1030 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1031
1032 if (desc < ref->data.desc) {
1033 n = n->rb_left;
1034 } else if (desc > ref->data.desc) {
1035 n = n->rb_right;
1036 } else if (need_strong_ref && !ref->data.strong) {
1037 binder_user_error("tried to use weak ref as strong ref\n");
1038 return NULL;
1039 } else {
1040 return ref;
1041 }
1042 }
1043 return NULL;
1044 }
1045
1046 /* Find the smallest unused descriptor the "slow way" */
slow_desc_lookup_olocked(struct binder_proc * proc,u32 offset)1047 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1048 {
1049 struct binder_ref *ref;
1050 struct rb_node *n;
1051 u32 desc;
1052
1053 desc = offset;
1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1056 if (ref->data.desc > desc)
1057 break;
1058 desc = ref->data.desc + 1;
1059 }
1060
1061 return desc;
1062 }
1063
1064 /*
1065 * Find an available reference descriptor ID. The proc->outer_lock might
1066 * be released in the process, in which case -EAGAIN is returned and the
1067 * @desc should be considered invalid.
1068 */
get_ref_desc_olocked(struct binder_proc * proc,struct binder_node * node,u32 * desc)1069 static int get_ref_desc_olocked(struct binder_proc *proc,
1070 struct binder_node *node,
1071 u32 *desc)
1072 {
1073 struct dbitmap *dmap = &proc->dmap;
1074 unsigned int nbits, offset;
1075 unsigned long *new, bit;
1076
1077 /* 0 is reserved for the context manager */
1078 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1079
1080 if (!dbitmap_enabled(dmap)) {
1081 *desc = slow_desc_lookup_olocked(proc, offset);
1082 return 0;
1083 }
1084
1085 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1086 *desc = bit;
1087 return 0;
1088 }
1089
1090 /*
1091 * The dbitmap is full and needs to grow. The proc->outer_lock
1092 * is briefly released to allocate the new bitmap safely.
1093 */
1094 nbits = dbitmap_grow_nbits(dmap);
1095 binder_proc_unlock(proc);
1096 new = bitmap_zalloc(nbits, GFP_KERNEL);
1097 binder_proc_lock(proc);
1098 dbitmap_grow(dmap, new, nbits);
1099
1100 return -EAGAIN;
1101 }
1102
1103 /**
1104 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1105 * @proc: binder_proc that owns the ref
1106 * @node: binder_node of target
1107 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1108 *
1109 * Look up the ref for the given node and return it if it exists
1110 *
1111 * If it doesn't exist and the caller provides a newly allocated
1112 * ref, initialize the fields of the newly allocated ref and insert
1113 * into the given proc rb_trees and node refs list.
1114 *
1115 * Return: the ref for node. It is possible that another thread
1116 * allocated/initialized the ref first in which case the
1117 * returned ref would be different than the passed-in
1118 * new_ref. new_ref must be kfree'd by the caller in
1119 * this case.
1120 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1121 static struct binder_ref *binder_get_ref_for_node_olocked(
1122 struct binder_proc *proc,
1123 struct binder_node *node,
1124 struct binder_ref *new_ref)
1125 {
1126 struct binder_ref *ref;
1127 struct rb_node *parent;
1128 struct rb_node **p;
1129 u32 desc;
1130
1131 retry:
1132 p = &proc->refs_by_node.rb_node;
1133 parent = NULL;
1134 while (*p) {
1135 parent = *p;
1136 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1137
1138 if (node < ref->node)
1139 p = &(*p)->rb_left;
1140 else if (node > ref->node)
1141 p = &(*p)->rb_right;
1142 else
1143 return ref;
1144 }
1145 if (!new_ref)
1146 return NULL;
1147
1148 /* might release the proc->outer_lock */
1149 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1150 goto retry;
1151
1152 binder_stats_created(BINDER_STAT_REF);
1153 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1154 new_ref->proc = proc;
1155 new_ref->node = node;
1156 rb_link_node(&new_ref->rb_node_node, parent, p);
1157 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1158
1159 new_ref->data.desc = desc;
1160 p = &proc->refs_by_desc.rb_node;
1161 while (*p) {
1162 parent = *p;
1163 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1164
1165 if (new_ref->data.desc < ref->data.desc)
1166 p = &(*p)->rb_left;
1167 else if (new_ref->data.desc > ref->data.desc)
1168 p = &(*p)->rb_right;
1169 else
1170 BUG();
1171 }
1172 rb_link_node(&new_ref->rb_node_desc, parent, p);
1173 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1174
1175 binder_node_lock(node);
1176 hlist_add_head(&new_ref->node_entry, &node->refs);
1177
1178 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1179 "%d new ref %d desc %d for node %d\n",
1180 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1181 node->debug_id);
1182 binder_node_unlock(node);
1183 return new_ref;
1184 }
1185
binder_cleanup_ref_olocked(struct binder_ref * ref)1186 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1187 {
1188 struct dbitmap *dmap = &ref->proc->dmap;
1189 bool delete_node = false;
1190
1191 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1192 "%d delete ref %d desc %d for node %d\n",
1193 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1194 ref->node->debug_id);
1195
1196 if (dbitmap_enabled(dmap))
1197 dbitmap_clear_bit(dmap, ref->data.desc);
1198 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1199 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1200
1201 binder_node_inner_lock(ref->node);
1202 if (ref->data.strong)
1203 binder_dec_node_nilocked(ref->node, 1, 1);
1204
1205 hlist_del(&ref->node_entry);
1206 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1207 binder_node_inner_unlock(ref->node);
1208 /*
1209 * Clear ref->node unless we want the caller to free the node
1210 */
1211 if (!delete_node) {
1212 /*
1213 * The caller uses ref->node to determine
1214 * whether the node needs to be freed. Clear
1215 * it since the node is still alive.
1216 */
1217 ref->node = NULL;
1218 }
1219
1220 if (ref->death) {
1221 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1222 "%d delete ref %d desc %d has death notification\n",
1223 ref->proc->pid, ref->data.debug_id,
1224 ref->data.desc);
1225 binder_dequeue_work(ref->proc, &ref->death->work);
1226 binder_stats_deleted(BINDER_STAT_DEATH);
1227 }
1228 binder_stats_deleted(BINDER_STAT_REF);
1229 }
1230
1231 /**
1232 * binder_inc_ref_olocked() - increment the ref for given handle
1233 * @ref: ref to be incremented
1234 * @strong: if true, strong increment, else weak
1235 * @target_list: list to queue node work on
1236 *
1237 * Increment the ref. @ref->proc->outer_lock must be held on entry
1238 *
1239 * Return: 0, if successful, else errno
1240 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1241 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1242 struct list_head *target_list)
1243 {
1244 int ret;
1245
1246 if (strong) {
1247 if (ref->data.strong == 0) {
1248 ret = binder_inc_node(ref->node, 1, 1, target_list);
1249 if (ret)
1250 return ret;
1251 }
1252 ref->data.strong++;
1253 } else {
1254 if (ref->data.weak == 0) {
1255 ret = binder_inc_node(ref->node, 0, 1, target_list);
1256 if (ret)
1257 return ret;
1258 }
1259 ref->data.weak++;
1260 }
1261 return 0;
1262 }
1263
1264 /**
1265 * binder_dec_ref_olocked() - dec the ref for given handle
1266 * @ref: ref to be decremented
1267 * @strong: if true, strong decrement, else weak
1268 *
1269 * Decrement the ref.
1270 *
1271 * Return: %true if ref is cleaned up and ready to be freed.
1272 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1273 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1274 {
1275 if (strong) {
1276 if (ref->data.strong == 0) {
1277 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1278 ref->proc->pid, ref->data.debug_id,
1279 ref->data.desc, ref->data.strong,
1280 ref->data.weak);
1281 return false;
1282 }
1283 ref->data.strong--;
1284 if (ref->data.strong == 0)
1285 binder_dec_node(ref->node, strong, 1);
1286 } else {
1287 if (ref->data.weak == 0) {
1288 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1289 ref->proc->pid, ref->data.debug_id,
1290 ref->data.desc, ref->data.strong,
1291 ref->data.weak);
1292 return false;
1293 }
1294 ref->data.weak--;
1295 }
1296 if (ref->data.strong == 0 && ref->data.weak == 0) {
1297 binder_cleanup_ref_olocked(ref);
1298 return true;
1299 }
1300 return false;
1301 }
1302
1303 /**
1304 * binder_get_node_from_ref() - get the node from the given proc/desc
1305 * @proc: proc containing the ref
1306 * @desc: the handle associated with the ref
1307 * @need_strong_ref: if true, only return node if ref is strong
1308 * @rdata: the id/refcount data for the ref
1309 *
1310 * Given a proc and ref handle, return the associated binder_node
1311 *
1312 * Return: a binder_node or NULL if not found or not strong when strong required
1313 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1314 static struct binder_node *binder_get_node_from_ref(
1315 struct binder_proc *proc,
1316 u32 desc, bool need_strong_ref,
1317 struct binder_ref_data *rdata)
1318 {
1319 struct binder_node *node;
1320 struct binder_ref *ref;
1321
1322 binder_proc_lock(proc);
1323 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1324 if (!ref)
1325 goto err_no_ref;
1326 node = ref->node;
1327 /*
1328 * Take an implicit reference on the node to ensure
1329 * it stays alive until the call to binder_put_node()
1330 */
1331 binder_inc_node_tmpref(node);
1332 if (rdata)
1333 *rdata = ref->data;
1334 binder_proc_unlock(proc);
1335
1336 return node;
1337
1338 err_no_ref:
1339 binder_proc_unlock(proc);
1340 return NULL;
1341 }
1342
1343 /**
1344 * binder_free_ref() - free the binder_ref
1345 * @ref: ref to free
1346 *
1347 * Free the binder_ref. Free the binder_node indicated by ref->node
1348 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1349 */
binder_free_ref(struct binder_ref * ref)1350 static void binder_free_ref(struct binder_ref *ref)
1351 {
1352 if (ref->node)
1353 binder_free_node(ref->node);
1354 kfree(ref->death);
1355 kfree(ref->freeze);
1356 kfree(ref);
1357 }
1358
1359 /* shrink descriptor bitmap if needed */
try_shrink_dmap(struct binder_proc * proc)1360 static void try_shrink_dmap(struct binder_proc *proc)
1361 {
1362 unsigned long *new;
1363 int nbits;
1364
1365 binder_proc_lock(proc);
1366 nbits = dbitmap_shrink_nbits(&proc->dmap);
1367 binder_proc_unlock(proc);
1368
1369 if (!nbits)
1370 return;
1371
1372 new = bitmap_zalloc(nbits, GFP_KERNEL);
1373 binder_proc_lock(proc);
1374 dbitmap_shrink(&proc->dmap, new, nbits);
1375 binder_proc_unlock(proc);
1376 }
1377
1378 /**
1379 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1380 * @proc: proc containing the ref
1381 * @desc: the handle associated with the ref
1382 * @increment: true=inc reference, false=dec reference
1383 * @strong: true=strong reference, false=weak reference
1384 * @rdata: the id/refcount data for the ref
1385 *
1386 * Given a proc and ref handle, increment or decrement the ref
1387 * according to "increment" arg.
1388 *
1389 * Return: 0 if successful, else errno
1390 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1391 static int binder_update_ref_for_handle(struct binder_proc *proc,
1392 uint32_t desc, bool increment, bool strong,
1393 struct binder_ref_data *rdata)
1394 {
1395 int ret = 0;
1396 struct binder_ref *ref;
1397 bool delete_ref = false;
1398
1399 binder_proc_lock(proc);
1400 ref = binder_get_ref_olocked(proc, desc, strong);
1401 if (!ref) {
1402 ret = -EINVAL;
1403 goto err_no_ref;
1404 }
1405 if (increment)
1406 ret = binder_inc_ref_olocked(ref, strong, NULL);
1407 else
1408 delete_ref = binder_dec_ref_olocked(ref, strong);
1409
1410 if (rdata)
1411 *rdata = ref->data;
1412 binder_proc_unlock(proc);
1413
1414 if (delete_ref) {
1415 binder_free_ref(ref);
1416 try_shrink_dmap(proc);
1417 }
1418 return ret;
1419
1420 err_no_ref:
1421 binder_proc_unlock(proc);
1422 return ret;
1423 }
1424
1425 /**
1426 * binder_dec_ref_for_handle() - dec the ref for given handle
1427 * @proc: proc containing the ref
1428 * @desc: the handle associated with the ref
1429 * @strong: true=strong reference, false=weak reference
1430 * @rdata: the id/refcount data for the ref
1431 *
1432 * Just calls binder_update_ref_for_handle() to decrement the ref.
1433 *
1434 * Return: 0 if successful, else errno
1435 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1436 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1437 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1438 {
1439 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1440 }
1441
1442
1443 /**
1444 * binder_inc_ref_for_node() - increment the ref for given proc/node
1445 * @proc: proc containing the ref
1446 * @node: target node
1447 * @strong: true=strong reference, false=weak reference
1448 * @target_list: worklist to use if node is incremented
1449 * @rdata: the id/refcount data for the ref
1450 *
1451 * Given a proc and node, increment the ref. Create the ref if it
1452 * doesn't already exist
1453 *
1454 * Return: 0 if successful, else errno
1455 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1456 static int binder_inc_ref_for_node(struct binder_proc *proc,
1457 struct binder_node *node,
1458 bool strong,
1459 struct list_head *target_list,
1460 struct binder_ref_data *rdata)
1461 {
1462 struct binder_ref *ref;
1463 struct binder_ref *new_ref = NULL;
1464 int ret = 0;
1465
1466 binder_proc_lock(proc);
1467 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1468 if (!ref) {
1469 binder_proc_unlock(proc);
1470 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1471 if (!new_ref)
1472 return -ENOMEM;
1473 binder_proc_lock(proc);
1474 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1475 }
1476 ret = binder_inc_ref_olocked(ref, strong, target_list);
1477 *rdata = ref->data;
1478 if (ret && ref == new_ref) {
1479 /*
1480 * Cleanup the failed reference here as the target
1481 * could now be dead and have already released its
1482 * references by now. Calling on the new reference
1483 * with strong=0 and a tmp_refs will not decrement
1484 * the node. The new_ref gets kfree'd below.
1485 */
1486 binder_cleanup_ref_olocked(new_ref);
1487 ref = NULL;
1488 }
1489
1490 binder_proc_unlock(proc);
1491 if (new_ref && ref != new_ref)
1492 /*
1493 * Another thread created the ref first so
1494 * free the one we allocated
1495 */
1496 kfree(new_ref);
1497 return ret;
1498 }
1499
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1500 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1501 struct binder_transaction *t)
1502 {
1503 BUG_ON(!target_thread);
1504 assert_spin_locked(&target_thread->proc->inner_lock);
1505 BUG_ON(target_thread->transaction_stack != t);
1506 BUG_ON(target_thread->transaction_stack->from != target_thread);
1507 target_thread->transaction_stack =
1508 target_thread->transaction_stack->from_parent;
1509 t->from = NULL;
1510 }
1511
1512 /**
1513 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1514 * @thread: thread to decrement
1515 *
1516 * A thread needs to be kept alive while being used to create or
1517 * handle a transaction. binder_get_txn_from() is used to safely
1518 * extract t->from from a binder_transaction and keep the thread
1519 * indicated by t->from from being freed. When done with that
1520 * binder_thread, this function is called to decrement the
1521 * tmp_ref and free if appropriate (thread has been released
1522 * and no transaction being processed by the driver)
1523 */
binder_thread_dec_tmpref(struct binder_thread * thread)1524 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1525 {
1526 /*
1527 * atomic is used to protect the counter value while
1528 * it cannot reach zero or thread->is_dead is false
1529 */
1530 binder_inner_proc_lock(thread->proc);
1531 atomic_dec(&thread->tmp_ref);
1532 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1533 binder_inner_proc_unlock(thread->proc);
1534 binder_free_thread(thread);
1535 return;
1536 }
1537 binder_inner_proc_unlock(thread->proc);
1538 }
1539
1540 /**
1541 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1542 * @proc: proc to decrement
1543 *
1544 * A binder_proc needs to be kept alive while being used to create or
1545 * handle a transaction. proc->tmp_ref is incremented when
1546 * creating a new transaction or the binder_proc is currently in-use
1547 * by threads that are being released. When done with the binder_proc,
1548 * this function is called to decrement the counter and free the
1549 * proc if appropriate (proc has been released, all threads have
1550 * been released and not currently in-use to process a transaction).
1551 */
binder_proc_dec_tmpref(struct binder_proc * proc)1552 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1553 {
1554 binder_inner_proc_lock(proc);
1555 proc->tmp_ref--;
1556 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1557 !proc->tmp_ref) {
1558 binder_inner_proc_unlock(proc);
1559 binder_free_proc(proc);
1560 return;
1561 }
1562 binder_inner_proc_unlock(proc);
1563 }
1564
1565 /**
1566 * binder_get_txn_from() - safely extract the "from" thread in transaction
1567 * @t: binder transaction for t->from
1568 *
1569 * Atomically return the "from" thread and increment the tmp_ref
1570 * count for the thread to ensure it stays alive until
1571 * binder_thread_dec_tmpref() is called.
1572 *
1573 * Return: the value of t->from
1574 */
binder_get_txn_from(struct binder_transaction * t)1575 static struct binder_thread *binder_get_txn_from(
1576 struct binder_transaction *t)
1577 {
1578 struct binder_thread *from;
1579
1580 spin_lock(&t->lock);
1581 from = t->from;
1582 if (from)
1583 atomic_inc(&from->tmp_ref);
1584 spin_unlock(&t->lock);
1585 return from;
1586 }
1587
1588 /**
1589 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1590 * @t: binder transaction for t->from
1591 *
1592 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1593 * to guarantee that the thread cannot be released while operating on it.
1594 * The caller must call binder_inner_proc_unlock() to release the inner lock
1595 * as well as call binder_dec_thread_txn() to release the reference.
1596 *
1597 * Return: the value of t->from
1598 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1599 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1600 struct binder_transaction *t)
1601 __acquires(&t->from->proc->inner_lock)
1602 {
1603 struct binder_thread *from;
1604
1605 from = binder_get_txn_from(t);
1606 if (!from) {
1607 __acquire(&from->proc->inner_lock);
1608 return NULL;
1609 }
1610 binder_inner_proc_lock(from->proc);
1611 if (t->from) {
1612 BUG_ON(from != t->from);
1613 return from;
1614 }
1615 binder_inner_proc_unlock(from->proc);
1616 __acquire(&from->proc->inner_lock);
1617 binder_thread_dec_tmpref(from);
1618 return NULL;
1619 }
1620
1621 /**
1622 * binder_free_txn_fixups() - free unprocessed fd fixups
1623 * @t: binder transaction for t->from
1624 *
1625 * If the transaction is being torn down prior to being
1626 * processed by the target process, free all of the
1627 * fd fixups and fput the file structs. It is safe to
1628 * call this function after the fixups have been
1629 * processed -- in that case, the list will be empty.
1630 */
binder_free_txn_fixups(struct binder_transaction * t)1631 static void binder_free_txn_fixups(struct binder_transaction *t)
1632 {
1633 struct binder_txn_fd_fixup *fixup, *tmp;
1634
1635 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1636 fput(fixup->file);
1637 if (fixup->target_fd >= 0)
1638 put_unused_fd(fixup->target_fd);
1639 list_del(&fixup->fixup_entry);
1640 kfree(fixup);
1641 }
1642 }
1643
binder_txn_latency_free(struct binder_transaction * t)1644 static void binder_txn_latency_free(struct binder_transaction *t)
1645 {
1646 int from_proc, from_thread, to_proc, to_thread;
1647
1648 spin_lock(&t->lock);
1649 from_proc = t->from ? t->from->proc->pid : 0;
1650 from_thread = t->from ? t->from->pid : 0;
1651 to_proc = t->to_proc ? t->to_proc->pid : 0;
1652 to_thread = t->to_thread ? t->to_thread->pid : 0;
1653 spin_unlock(&t->lock);
1654
1655 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1656 }
1657
binder_free_transaction(struct binder_transaction * t)1658 static void binder_free_transaction(struct binder_transaction *t)
1659 {
1660 struct binder_proc *target_proc = t->to_proc;
1661
1662 if (target_proc) {
1663 binder_inner_proc_lock(target_proc);
1664 target_proc->outstanding_txns--;
1665 if (target_proc->outstanding_txns < 0)
1666 pr_warn("%s: Unexpected outstanding_txns %d\n",
1667 __func__, target_proc->outstanding_txns);
1668 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1669 wake_up_interruptible_all(&target_proc->freeze_wait);
1670 if (t->buffer)
1671 t->buffer->transaction = NULL;
1672 binder_inner_proc_unlock(target_proc);
1673 }
1674 if (trace_binder_txn_latency_free_enabled())
1675 binder_txn_latency_free(t);
1676 /*
1677 * If the transaction has no target_proc, then
1678 * t->buffer->transaction has already been cleared.
1679 */
1680 binder_free_txn_fixups(t);
1681 kfree(t);
1682 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1683 }
1684
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1685 static void binder_send_failed_reply(struct binder_transaction *t,
1686 uint32_t error_code)
1687 {
1688 struct binder_thread *target_thread;
1689 struct binder_transaction *next;
1690
1691 BUG_ON(t->flags & TF_ONE_WAY);
1692 while (1) {
1693 target_thread = binder_get_txn_from_and_acq_inner(t);
1694 if (target_thread) {
1695 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1696 "send failed reply for transaction %d to %d:%d\n",
1697 t->debug_id,
1698 target_thread->proc->pid,
1699 target_thread->pid);
1700
1701 binder_pop_transaction_ilocked(target_thread, t);
1702 if (target_thread->reply_error.cmd == BR_OK) {
1703 target_thread->reply_error.cmd = error_code;
1704 binder_enqueue_thread_work_ilocked(
1705 target_thread,
1706 &target_thread->reply_error.work);
1707 wake_up_interruptible(&target_thread->wait);
1708 } else {
1709 /*
1710 * Cannot get here for normal operation, but
1711 * we can if multiple synchronous transactions
1712 * are sent without blocking for responses.
1713 * Just ignore the 2nd error in this case.
1714 */
1715 pr_warn("Unexpected reply error: %u\n",
1716 target_thread->reply_error.cmd);
1717 }
1718 binder_inner_proc_unlock(target_thread->proc);
1719 binder_thread_dec_tmpref(target_thread);
1720 binder_free_transaction(t);
1721 return;
1722 }
1723 __release(&target_thread->proc->inner_lock);
1724 next = t->from_parent;
1725
1726 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1727 "send failed reply for transaction %d, target dead\n",
1728 t->debug_id);
1729
1730 binder_free_transaction(t);
1731 if (next == NULL) {
1732 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1733 "reply failed, no target thread at root\n");
1734 return;
1735 }
1736 t = next;
1737 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1738 "reply failed, no target thread -- retry %d\n",
1739 t->debug_id);
1740 }
1741 }
1742
1743 /**
1744 * binder_cleanup_transaction() - cleans up undelivered transaction
1745 * @t: transaction that needs to be cleaned up
1746 * @reason: reason the transaction wasn't delivered
1747 * @error_code: error to return to caller (if synchronous call)
1748 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1749 static void binder_cleanup_transaction(struct binder_transaction *t,
1750 const char *reason,
1751 uint32_t error_code)
1752 {
1753 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1754 binder_send_failed_reply(t, error_code);
1755 } else {
1756 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1757 "undelivered transaction %d, %s\n",
1758 t->debug_id, reason);
1759 binder_free_transaction(t);
1760 }
1761 }
1762
1763 /**
1764 * binder_get_object() - gets object and checks for valid metadata
1765 * @proc: binder_proc owning the buffer
1766 * @u: sender's user pointer to base of buffer
1767 * @buffer: binder_buffer that we're parsing.
1768 * @offset: offset in the @buffer at which to validate an object.
1769 * @object: struct binder_object to read into
1770 *
1771 * Copy the binder object at the given offset into @object. If @u is
1772 * provided then the copy is from the sender's buffer. If not, then
1773 * it is copied from the target's @buffer.
1774 *
1775 * Return: If there's a valid metadata object at @offset, the
1776 * size of that object. Otherwise, it returns zero. The object
1777 * is read into the struct binder_object pointed to by @object.
1778 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1779 static size_t binder_get_object(struct binder_proc *proc,
1780 const void __user *u,
1781 struct binder_buffer *buffer,
1782 unsigned long offset,
1783 struct binder_object *object)
1784 {
1785 size_t read_size;
1786 struct binder_object_header *hdr;
1787 size_t object_size = 0;
1788
1789 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1790 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1791 !IS_ALIGNED(offset, sizeof(u32)))
1792 return 0;
1793
1794 if (u) {
1795 if (copy_from_user(object, u + offset, read_size))
1796 return 0;
1797 } else {
1798 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1799 offset, read_size))
1800 return 0;
1801 }
1802
1803 /* Ok, now see if we read a complete object. */
1804 hdr = &object->hdr;
1805 switch (hdr->type) {
1806 case BINDER_TYPE_BINDER:
1807 case BINDER_TYPE_WEAK_BINDER:
1808 case BINDER_TYPE_HANDLE:
1809 case BINDER_TYPE_WEAK_HANDLE:
1810 object_size = sizeof(struct flat_binder_object);
1811 break;
1812 case BINDER_TYPE_FD:
1813 object_size = sizeof(struct binder_fd_object);
1814 break;
1815 case BINDER_TYPE_PTR:
1816 object_size = sizeof(struct binder_buffer_object);
1817 break;
1818 case BINDER_TYPE_FDA:
1819 object_size = sizeof(struct binder_fd_array_object);
1820 break;
1821 default:
1822 return 0;
1823 }
1824 if (offset <= buffer->data_size - object_size &&
1825 buffer->data_size >= object_size)
1826 return object_size;
1827 else
1828 return 0;
1829 }
1830
1831 /**
1832 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1833 * @proc: binder_proc owning the buffer
1834 * @b: binder_buffer containing the object
1835 * @object: struct binder_object to read into
1836 * @index: index in offset array at which the binder_buffer_object is
1837 * located
1838 * @start_offset: points to the start of the offset array
1839 * @object_offsetp: offset of @object read from @b
1840 * @num_valid: the number of valid offsets in the offset array
1841 *
1842 * Return: If @index is within the valid range of the offset array
1843 * described by @start and @num_valid, and if there's a valid
1844 * binder_buffer_object at the offset found in index @index
1845 * of the offset array, that object is returned. Otherwise,
1846 * %NULL is returned.
1847 * Note that the offset found in index @index itself is not
1848 * verified; this function assumes that @num_valid elements
1849 * from @start were previously verified to have valid offsets.
1850 * If @object_offsetp is non-NULL, then the offset within
1851 * @b is written to it.
1852 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1853 static struct binder_buffer_object *binder_validate_ptr(
1854 struct binder_proc *proc,
1855 struct binder_buffer *b,
1856 struct binder_object *object,
1857 binder_size_t index,
1858 binder_size_t start_offset,
1859 binder_size_t *object_offsetp,
1860 binder_size_t num_valid)
1861 {
1862 size_t object_size;
1863 binder_size_t object_offset;
1864 unsigned long buffer_offset;
1865
1866 if (index >= num_valid)
1867 return NULL;
1868
1869 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1870 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1871 b, buffer_offset,
1872 sizeof(object_offset)))
1873 return NULL;
1874 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1875 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1876 return NULL;
1877 if (object_offsetp)
1878 *object_offsetp = object_offset;
1879
1880 return &object->bbo;
1881 }
1882
1883 /**
1884 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1885 * @proc: binder_proc owning the buffer
1886 * @b: transaction buffer
1887 * @objects_start_offset: offset to start of objects buffer
1888 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1889 * @fixup_offset: start offset in @buffer to fix up
1890 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1891 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1892 *
1893 * Return: %true if a fixup in buffer @buffer at offset @offset is
1894 * allowed.
1895 *
1896 * For safety reasons, we only allow fixups inside a buffer to happen
1897 * at increasing offsets; additionally, we only allow fixup on the last
1898 * buffer object that was verified, or one of its parents.
1899 *
1900 * Example of what is allowed:
1901 *
1902 * A
1903 * B (parent = A, offset = 0)
1904 * C (parent = A, offset = 16)
1905 * D (parent = C, offset = 0)
1906 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1907 *
1908 * Examples of what is not allowed:
1909 *
1910 * Decreasing offsets within the same parent:
1911 * A
1912 * C (parent = A, offset = 16)
1913 * B (parent = A, offset = 0) // decreasing offset within A
1914 *
1915 * Referring to a parent that wasn't the last object or any of its parents:
1916 * A
1917 * B (parent = A, offset = 0)
1918 * C (parent = A, offset = 0)
1919 * C (parent = A, offset = 16)
1920 * D (parent = B, offset = 0) // B is not A or any of A's parents
1921 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1922 static bool binder_validate_fixup(struct binder_proc *proc,
1923 struct binder_buffer *b,
1924 binder_size_t objects_start_offset,
1925 binder_size_t buffer_obj_offset,
1926 binder_size_t fixup_offset,
1927 binder_size_t last_obj_offset,
1928 binder_size_t last_min_offset)
1929 {
1930 if (!last_obj_offset) {
1931 /* Nothing to fix up in */
1932 return false;
1933 }
1934
1935 while (last_obj_offset != buffer_obj_offset) {
1936 unsigned long buffer_offset;
1937 struct binder_object last_object;
1938 struct binder_buffer_object *last_bbo;
1939 size_t object_size = binder_get_object(proc, NULL, b,
1940 last_obj_offset,
1941 &last_object);
1942 if (object_size != sizeof(*last_bbo))
1943 return false;
1944
1945 last_bbo = &last_object.bbo;
1946 /*
1947 * Safe to retrieve the parent of last_obj, since it
1948 * was already previously verified by the driver.
1949 */
1950 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1951 return false;
1952 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1953 buffer_offset = objects_start_offset +
1954 sizeof(binder_size_t) * last_bbo->parent;
1955 if (binder_alloc_copy_from_buffer(&proc->alloc,
1956 &last_obj_offset,
1957 b, buffer_offset,
1958 sizeof(last_obj_offset)))
1959 return false;
1960 }
1961 return (fixup_offset >= last_min_offset);
1962 }
1963
1964 /**
1965 * struct binder_task_work_cb - for deferred close
1966 *
1967 * @twork: callback_head for task work
1968 * @fd: fd to close
1969 *
1970 * Structure to pass task work to be handled after
1971 * returning from binder_ioctl() via task_work_add().
1972 */
1973 struct binder_task_work_cb {
1974 struct callback_head twork;
1975 struct file *file;
1976 };
1977
1978 /**
1979 * binder_do_fd_close() - close list of file descriptors
1980 * @twork: callback head for task work
1981 *
1982 * It is not safe to call ksys_close() during the binder_ioctl()
1983 * function if there is a chance that binder's own file descriptor
1984 * might be closed. This is to meet the requirements for using
1985 * fdget() (see comments for __fget_light()). Therefore use
1986 * task_work_add() to schedule the close operation once we have
1987 * returned from binder_ioctl(). This function is a callback
1988 * for that mechanism and does the actual ksys_close() on the
1989 * given file descriptor.
1990 */
binder_do_fd_close(struct callback_head * twork)1991 static void binder_do_fd_close(struct callback_head *twork)
1992 {
1993 struct binder_task_work_cb *twcb = container_of(twork,
1994 struct binder_task_work_cb, twork);
1995
1996 fput(twcb->file);
1997 kfree(twcb);
1998 }
1999
2000 /**
2001 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2002 * @fd: file-descriptor to close
2003 *
2004 * See comments in binder_do_fd_close(). This function is used to schedule
2005 * a file-descriptor to be closed after returning from binder_ioctl().
2006 */
binder_deferred_fd_close(int fd)2007 static void binder_deferred_fd_close(int fd)
2008 {
2009 struct binder_task_work_cb *twcb;
2010
2011 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2012 if (!twcb)
2013 return;
2014 init_task_work(&twcb->twork, binder_do_fd_close);
2015 twcb->file = file_close_fd(fd);
2016 if (twcb->file) {
2017 // pin it until binder_do_fd_close(); see comments there
2018 get_file(twcb->file);
2019 filp_close(twcb->file, current->files);
2020 task_work_add(current, &twcb->twork, TWA_RESUME);
2021 } else {
2022 kfree(twcb);
2023 }
2024 }
2025
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2026 static void binder_transaction_buffer_release(struct binder_proc *proc,
2027 struct binder_thread *thread,
2028 struct binder_buffer *buffer,
2029 binder_size_t off_end_offset,
2030 bool is_failure)
2031 {
2032 int debug_id = buffer->debug_id;
2033 binder_size_t off_start_offset, buffer_offset;
2034
2035 binder_debug(BINDER_DEBUG_TRANSACTION,
2036 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2037 proc->pid, buffer->debug_id,
2038 buffer->data_size, buffer->offsets_size,
2039 (unsigned long long)off_end_offset);
2040
2041 if (buffer->target_node)
2042 binder_dec_node(buffer->target_node, 1, 0);
2043
2044 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2045
2046 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2047 buffer_offset += sizeof(binder_size_t)) {
2048 struct binder_object_header *hdr;
2049 size_t object_size = 0;
2050 struct binder_object object;
2051 binder_size_t object_offset;
2052
2053 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2054 buffer, buffer_offset,
2055 sizeof(object_offset)))
2056 object_size = binder_get_object(proc, NULL, buffer,
2057 object_offset, &object);
2058 if (object_size == 0) {
2059 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2060 debug_id, (u64)object_offset, buffer->data_size);
2061 continue;
2062 }
2063 hdr = &object.hdr;
2064 switch (hdr->type) {
2065 case BINDER_TYPE_BINDER:
2066 case BINDER_TYPE_WEAK_BINDER: {
2067 struct flat_binder_object *fp;
2068 struct binder_node *node;
2069
2070 fp = to_flat_binder_object(hdr);
2071 node = binder_get_node(proc, fp->binder);
2072 if (node == NULL) {
2073 pr_err("transaction release %d bad node %016llx\n",
2074 debug_id, (u64)fp->binder);
2075 break;
2076 }
2077 binder_debug(BINDER_DEBUG_TRANSACTION,
2078 " node %d u%016llx\n",
2079 node->debug_id, (u64)node->ptr);
2080 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2081 0);
2082 binder_put_node(node);
2083 } break;
2084 case BINDER_TYPE_HANDLE:
2085 case BINDER_TYPE_WEAK_HANDLE: {
2086 struct flat_binder_object *fp;
2087 struct binder_ref_data rdata;
2088 int ret;
2089
2090 fp = to_flat_binder_object(hdr);
2091 ret = binder_dec_ref_for_handle(proc, fp->handle,
2092 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2093
2094 if (ret) {
2095 pr_err("transaction release %d bad handle %d, ret = %d\n",
2096 debug_id, fp->handle, ret);
2097 break;
2098 }
2099 binder_debug(BINDER_DEBUG_TRANSACTION,
2100 " ref %d desc %d\n",
2101 rdata.debug_id, rdata.desc);
2102 } break;
2103
2104 case BINDER_TYPE_FD: {
2105 /*
2106 * No need to close the file here since user-space
2107 * closes it for successfully delivered
2108 * transactions. For transactions that weren't
2109 * delivered, the new fd was never allocated so
2110 * there is no need to close and the fput on the
2111 * file is done when the transaction is torn
2112 * down.
2113 */
2114 } break;
2115 case BINDER_TYPE_PTR:
2116 /*
2117 * Nothing to do here, this will get cleaned up when the
2118 * transaction buffer gets freed
2119 */
2120 break;
2121 case BINDER_TYPE_FDA: {
2122 struct binder_fd_array_object *fda;
2123 struct binder_buffer_object *parent;
2124 struct binder_object ptr_object;
2125 binder_size_t fda_offset;
2126 size_t fd_index;
2127 binder_size_t fd_buf_size;
2128 binder_size_t num_valid;
2129
2130 if (is_failure) {
2131 /*
2132 * The fd fixups have not been applied so no
2133 * fds need to be closed.
2134 */
2135 continue;
2136 }
2137
2138 num_valid = (buffer_offset - off_start_offset) /
2139 sizeof(binder_size_t);
2140 fda = to_binder_fd_array_object(hdr);
2141 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2142 fda->parent,
2143 off_start_offset,
2144 NULL,
2145 num_valid);
2146 if (!parent) {
2147 pr_err("transaction release %d bad parent offset\n",
2148 debug_id);
2149 continue;
2150 }
2151 fd_buf_size = sizeof(u32) * fda->num_fds;
2152 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2153 pr_err("transaction release %d invalid number of fds (%lld)\n",
2154 debug_id, (u64)fda->num_fds);
2155 continue;
2156 }
2157 if (fd_buf_size > parent->length ||
2158 fda->parent_offset > parent->length - fd_buf_size) {
2159 /* No space for all file descriptors here. */
2160 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2161 debug_id, (u64)fda->num_fds);
2162 continue;
2163 }
2164 /*
2165 * the source data for binder_buffer_object is visible
2166 * to user-space and the @buffer element is the user
2167 * pointer to the buffer_object containing the fd_array.
2168 * Convert the address to an offset relative to
2169 * the base of the transaction buffer.
2170 */
2171 fda_offset = parent->buffer - buffer->user_data +
2172 fda->parent_offset;
2173 for (fd_index = 0; fd_index < fda->num_fds;
2174 fd_index++) {
2175 u32 fd;
2176 int err;
2177 binder_size_t offset = fda_offset +
2178 fd_index * sizeof(fd);
2179
2180 err = binder_alloc_copy_from_buffer(
2181 &proc->alloc, &fd, buffer,
2182 offset, sizeof(fd));
2183 WARN_ON(err);
2184 if (!err) {
2185 binder_deferred_fd_close(fd);
2186 /*
2187 * Need to make sure the thread goes
2188 * back to userspace to complete the
2189 * deferred close
2190 */
2191 if (thread)
2192 thread->looper_need_return = true;
2193 }
2194 }
2195 } break;
2196 default:
2197 pr_err("transaction release %d bad object type %x\n",
2198 debug_id, hdr->type);
2199 break;
2200 }
2201 }
2202 }
2203
2204 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2205 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2206 struct binder_thread *thread,
2207 struct binder_buffer *buffer,
2208 bool is_failure)
2209 {
2210 binder_size_t off_end_offset;
2211
2212 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2213 off_end_offset += buffer->offsets_size;
2214
2215 binder_transaction_buffer_release(proc, thread, buffer,
2216 off_end_offset, is_failure);
2217 }
2218
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2219 static int binder_translate_binder(struct flat_binder_object *fp,
2220 struct binder_transaction *t,
2221 struct binder_thread *thread)
2222 {
2223 struct binder_node *node;
2224 struct binder_proc *proc = thread->proc;
2225 struct binder_proc *target_proc = t->to_proc;
2226 struct binder_ref_data rdata;
2227 int ret = 0;
2228
2229 node = binder_get_node(proc, fp->binder);
2230 if (!node) {
2231 node = binder_new_node(proc, fp);
2232 if (!node)
2233 return -ENOMEM;
2234 }
2235 if (fp->cookie != node->cookie) {
2236 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2237 proc->pid, thread->pid, (u64)fp->binder,
2238 node->debug_id, (u64)fp->cookie,
2239 (u64)node->cookie);
2240 ret = -EINVAL;
2241 goto done;
2242 }
2243 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2244 ret = -EPERM;
2245 goto done;
2246 }
2247
2248 ret = binder_inc_ref_for_node(target_proc, node,
2249 fp->hdr.type == BINDER_TYPE_BINDER,
2250 &thread->todo, &rdata);
2251 if (ret)
2252 goto done;
2253
2254 if (fp->hdr.type == BINDER_TYPE_BINDER)
2255 fp->hdr.type = BINDER_TYPE_HANDLE;
2256 else
2257 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2258 fp->binder = 0;
2259 fp->handle = rdata.desc;
2260 fp->cookie = 0;
2261
2262 trace_binder_transaction_node_to_ref(t, node, &rdata);
2263 binder_debug(BINDER_DEBUG_TRANSACTION,
2264 " node %d u%016llx -> ref %d desc %d\n",
2265 node->debug_id, (u64)node->ptr,
2266 rdata.debug_id, rdata.desc);
2267 done:
2268 binder_put_node(node);
2269 return ret;
2270 }
2271
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2272 static int binder_translate_handle(struct flat_binder_object *fp,
2273 struct binder_transaction *t,
2274 struct binder_thread *thread)
2275 {
2276 struct binder_proc *proc = thread->proc;
2277 struct binder_proc *target_proc = t->to_proc;
2278 struct binder_node *node;
2279 struct binder_ref_data src_rdata;
2280 int ret = 0;
2281
2282 node = binder_get_node_from_ref(proc, fp->handle,
2283 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2284 if (!node) {
2285 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2286 proc->pid, thread->pid, fp->handle);
2287 return -EINVAL;
2288 }
2289 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2290 ret = -EPERM;
2291 goto done;
2292 }
2293
2294 binder_node_lock(node);
2295 if (node->proc == target_proc) {
2296 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2297 fp->hdr.type = BINDER_TYPE_BINDER;
2298 else
2299 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2300 fp->binder = node->ptr;
2301 fp->cookie = node->cookie;
2302 if (node->proc)
2303 binder_inner_proc_lock(node->proc);
2304 else
2305 __acquire(&node->proc->inner_lock);
2306 binder_inc_node_nilocked(node,
2307 fp->hdr.type == BINDER_TYPE_BINDER,
2308 0, NULL);
2309 if (node->proc)
2310 binder_inner_proc_unlock(node->proc);
2311 else
2312 __release(&node->proc->inner_lock);
2313 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2314 binder_debug(BINDER_DEBUG_TRANSACTION,
2315 " ref %d desc %d -> node %d u%016llx\n",
2316 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2317 (u64)node->ptr);
2318 binder_node_unlock(node);
2319 } else {
2320 struct binder_ref_data dest_rdata;
2321
2322 binder_node_unlock(node);
2323 ret = binder_inc_ref_for_node(target_proc, node,
2324 fp->hdr.type == BINDER_TYPE_HANDLE,
2325 NULL, &dest_rdata);
2326 if (ret)
2327 goto done;
2328
2329 fp->binder = 0;
2330 fp->handle = dest_rdata.desc;
2331 fp->cookie = 0;
2332 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2333 &dest_rdata);
2334 binder_debug(BINDER_DEBUG_TRANSACTION,
2335 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2336 src_rdata.debug_id, src_rdata.desc,
2337 dest_rdata.debug_id, dest_rdata.desc,
2338 node->debug_id);
2339 }
2340 done:
2341 binder_put_node(node);
2342 return ret;
2343 }
2344
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2345 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2346 struct binder_transaction *t,
2347 struct binder_thread *thread,
2348 struct binder_transaction *in_reply_to)
2349 {
2350 struct binder_proc *proc = thread->proc;
2351 struct binder_proc *target_proc = t->to_proc;
2352 struct binder_txn_fd_fixup *fixup;
2353 struct file *file;
2354 int ret = 0;
2355 bool target_allows_fd;
2356
2357 if (in_reply_to)
2358 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2359 else
2360 target_allows_fd = t->buffer->target_node->accept_fds;
2361 if (!target_allows_fd) {
2362 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2363 proc->pid, thread->pid,
2364 in_reply_to ? "reply" : "transaction",
2365 fd);
2366 ret = -EPERM;
2367 goto err_fd_not_accepted;
2368 }
2369
2370 file = fget(fd);
2371 if (!file) {
2372 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2373 proc->pid, thread->pid, fd);
2374 ret = -EBADF;
2375 goto err_fget;
2376 }
2377 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2378 if (ret < 0) {
2379 ret = -EPERM;
2380 goto err_security;
2381 }
2382
2383 /*
2384 * Add fixup record for this transaction. The allocation
2385 * of the fd in the target needs to be done from a
2386 * target thread.
2387 */
2388 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2389 if (!fixup) {
2390 ret = -ENOMEM;
2391 goto err_alloc;
2392 }
2393 fixup->file = file;
2394 fixup->offset = fd_offset;
2395 fixup->target_fd = -1;
2396 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2397 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2398
2399 return ret;
2400
2401 err_alloc:
2402 err_security:
2403 fput(file);
2404 err_fget:
2405 err_fd_not_accepted:
2406 return ret;
2407 }
2408
2409 /**
2410 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2411 * @offset offset in target buffer to fixup
2412 * @skip_size bytes to skip in copy (fixup will be written later)
2413 * @fixup_data data to write at fixup offset
2414 * @node list node
2415 *
2416 * This is used for the pointer fixup list (pf) which is created and consumed
2417 * during binder_transaction() and is only accessed locally. No
2418 * locking is necessary.
2419 *
2420 * The list is ordered by @offset.
2421 */
2422 struct binder_ptr_fixup {
2423 binder_size_t offset;
2424 size_t skip_size;
2425 binder_uintptr_t fixup_data;
2426 struct list_head node;
2427 };
2428
2429 /**
2430 * struct binder_sg_copy - scatter-gather data to be copied
2431 * @offset offset in target buffer
2432 * @sender_uaddr user address in source buffer
2433 * @length bytes to copy
2434 * @node list node
2435 *
2436 * This is used for the sg copy list (sgc) which is created and consumed
2437 * during binder_transaction() and is only accessed locally. No
2438 * locking is necessary.
2439 *
2440 * The list is ordered by @offset.
2441 */
2442 struct binder_sg_copy {
2443 binder_size_t offset;
2444 const void __user *sender_uaddr;
2445 size_t length;
2446 struct list_head node;
2447 };
2448
2449 /**
2450 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2451 * @alloc: binder_alloc associated with @buffer
2452 * @buffer: binder buffer in target process
2453 * @sgc_head: list_head of scatter-gather copy list
2454 * @pf_head: list_head of pointer fixup list
2455 *
2456 * Processes all elements of @sgc_head, applying fixups from @pf_head
2457 * and copying the scatter-gather data from the source process' user
2458 * buffer to the target's buffer. It is expected that the list creation
2459 * and processing all occurs during binder_transaction() so these lists
2460 * are only accessed in local context.
2461 *
2462 * Return: 0=success, else -errno
2463 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2464 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2465 struct binder_buffer *buffer,
2466 struct list_head *sgc_head,
2467 struct list_head *pf_head)
2468 {
2469 int ret = 0;
2470 struct binder_sg_copy *sgc, *tmpsgc;
2471 struct binder_ptr_fixup *tmppf;
2472 struct binder_ptr_fixup *pf =
2473 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2474 node);
2475
2476 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2477 size_t bytes_copied = 0;
2478
2479 while (bytes_copied < sgc->length) {
2480 size_t copy_size;
2481 size_t bytes_left = sgc->length - bytes_copied;
2482 size_t offset = sgc->offset + bytes_copied;
2483
2484 /*
2485 * We copy up to the fixup (pointed to by pf)
2486 */
2487 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2488 : bytes_left;
2489 if (!ret && copy_size)
2490 ret = binder_alloc_copy_user_to_buffer(
2491 alloc, buffer,
2492 offset,
2493 sgc->sender_uaddr + bytes_copied,
2494 copy_size);
2495 bytes_copied += copy_size;
2496 if (copy_size != bytes_left) {
2497 BUG_ON(!pf);
2498 /* we stopped at a fixup offset */
2499 if (pf->skip_size) {
2500 /*
2501 * we are just skipping. This is for
2502 * BINDER_TYPE_FDA where the translated
2503 * fds will be fixed up when we get
2504 * to target context.
2505 */
2506 bytes_copied += pf->skip_size;
2507 } else {
2508 /* apply the fixup indicated by pf */
2509 if (!ret)
2510 ret = binder_alloc_copy_to_buffer(
2511 alloc, buffer,
2512 pf->offset,
2513 &pf->fixup_data,
2514 sizeof(pf->fixup_data));
2515 bytes_copied += sizeof(pf->fixup_data);
2516 }
2517 list_del(&pf->node);
2518 kfree(pf);
2519 pf = list_first_entry_or_null(pf_head,
2520 struct binder_ptr_fixup, node);
2521 }
2522 }
2523 list_del(&sgc->node);
2524 kfree(sgc);
2525 }
2526 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2527 BUG_ON(pf->skip_size == 0);
2528 list_del(&pf->node);
2529 kfree(pf);
2530 }
2531 BUG_ON(!list_empty(sgc_head));
2532
2533 return ret > 0 ? -EINVAL : ret;
2534 }
2535
2536 /**
2537 * binder_cleanup_deferred_txn_lists() - free specified lists
2538 * @sgc_head: list_head of scatter-gather copy list
2539 * @pf_head: list_head of pointer fixup list
2540 *
2541 * Called to clean up @sgc_head and @pf_head if there is an
2542 * error.
2543 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2544 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2545 struct list_head *pf_head)
2546 {
2547 struct binder_sg_copy *sgc, *tmpsgc;
2548 struct binder_ptr_fixup *pf, *tmppf;
2549
2550 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2551 list_del(&sgc->node);
2552 kfree(sgc);
2553 }
2554 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2555 list_del(&pf->node);
2556 kfree(pf);
2557 }
2558 }
2559
2560 /**
2561 * binder_defer_copy() - queue a scatter-gather buffer for copy
2562 * @sgc_head: list_head of scatter-gather copy list
2563 * @offset: binder buffer offset in target process
2564 * @sender_uaddr: user address in source process
2565 * @length: bytes to copy
2566 *
2567 * Specify a scatter-gather block to be copied. The actual copy must
2568 * be deferred until all the needed fixups are identified and queued.
2569 * Then the copy and fixups are done together so un-translated values
2570 * from the source are never visible in the target buffer.
2571 *
2572 * We are guaranteed that repeated calls to this function will have
2573 * monotonically increasing @offset values so the list will naturally
2574 * be ordered.
2575 *
2576 * Return: 0=success, else -errno
2577 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2578 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2579 const void __user *sender_uaddr, size_t length)
2580 {
2581 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2582
2583 if (!bc)
2584 return -ENOMEM;
2585
2586 bc->offset = offset;
2587 bc->sender_uaddr = sender_uaddr;
2588 bc->length = length;
2589 INIT_LIST_HEAD(&bc->node);
2590
2591 /*
2592 * We are guaranteed that the deferred copies are in-order
2593 * so just add to the tail.
2594 */
2595 list_add_tail(&bc->node, sgc_head);
2596
2597 return 0;
2598 }
2599
2600 /**
2601 * binder_add_fixup() - queue a fixup to be applied to sg copy
2602 * @pf_head: list_head of binder ptr fixup list
2603 * @offset: binder buffer offset in target process
2604 * @fixup: bytes to be copied for fixup
2605 * @skip_size: bytes to skip when copying (fixup will be applied later)
2606 *
2607 * Add the specified fixup to a list ordered by @offset. When copying
2608 * the scatter-gather buffers, the fixup will be copied instead of
2609 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2610 * will be applied later (in target process context), so we just skip
2611 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2612 * value in @fixup.
2613 *
2614 * This function is called *mostly* in @offset order, but there are
2615 * exceptions. Since out-of-order inserts are relatively uncommon,
2616 * we insert the new element by searching backward from the tail of
2617 * the list.
2618 *
2619 * Return: 0=success, else -errno
2620 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2621 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2622 binder_uintptr_t fixup, size_t skip_size)
2623 {
2624 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2625 struct binder_ptr_fixup *tmppf;
2626
2627 if (!pf)
2628 return -ENOMEM;
2629
2630 pf->offset = offset;
2631 pf->fixup_data = fixup;
2632 pf->skip_size = skip_size;
2633 INIT_LIST_HEAD(&pf->node);
2634
2635 /* Fixups are *mostly* added in-order, but there are some
2636 * exceptions. Look backwards through list for insertion point.
2637 */
2638 list_for_each_entry_reverse(tmppf, pf_head, node) {
2639 if (tmppf->offset < pf->offset) {
2640 list_add(&pf->node, &tmppf->node);
2641 return 0;
2642 }
2643 }
2644 /*
2645 * if we get here, then the new offset is the lowest so
2646 * insert at the head
2647 */
2648 list_add(&pf->node, pf_head);
2649 return 0;
2650 }
2651
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2652 static int binder_translate_fd_array(struct list_head *pf_head,
2653 struct binder_fd_array_object *fda,
2654 const void __user *sender_ubuffer,
2655 struct binder_buffer_object *parent,
2656 struct binder_buffer_object *sender_uparent,
2657 struct binder_transaction *t,
2658 struct binder_thread *thread,
2659 struct binder_transaction *in_reply_to)
2660 {
2661 binder_size_t fdi, fd_buf_size;
2662 binder_size_t fda_offset;
2663 const void __user *sender_ufda_base;
2664 struct binder_proc *proc = thread->proc;
2665 int ret;
2666
2667 if (fda->num_fds == 0)
2668 return 0;
2669
2670 fd_buf_size = sizeof(u32) * fda->num_fds;
2671 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2672 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2673 proc->pid, thread->pid, (u64)fda->num_fds);
2674 return -EINVAL;
2675 }
2676 if (fd_buf_size > parent->length ||
2677 fda->parent_offset > parent->length - fd_buf_size) {
2678 /* No space for all file descriptors here. */
2679 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2680 proc->pid, thread->pid, (u64)fda->num_fds);
2681 return -EINVAL;
2682 }
2683 /*
2684 * the source data for binder_buffer_object is visible
2685 * to user-space and the @buffer element is the user
2686 * pointer to the buffer_object containing the fd_array.
2687 * Convert the address to an offset relative to
2688 * the base of the transaction buffer.
2689 */
2690 fda_offset = parent->buffer - t->buffer->user_data +
2691 fda->parent_offset;
2692 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2693 fda->parent_offset;
2694
2695 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2696 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2697 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2698 proc->pid, thread->pid);
2699 return -EINVAL;
2700 }
2701 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2702 if (ret)
2703 return ret;
2704
2705 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2706 u32 fd;
2707 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2708 binder_size_t sender_uoffset = fdi * sizeof(fd);
2709
2710 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2711 if (!ret)
2712 ret = binder_translate_fd(fd, offset, t, thread,
2713 in_reply_to);
2714 if (ret)
2715 return ret > 0 ? -EINVAL : ret;
2716 }
2717 return 0;
2718 }
2719
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2720 static int binder_fixup_parent(struct list_head *pf_head,
2721 struct binder_transaction *t,
2722 struct binder_thread *thread,
2723 struct binder_buffer_object *bp,
2724 binder_size_t off_start_offset,
2725 binder_size_t num_valid,
2726 binder_size_t last_fixup_obj_off,
2727 binder_size_t last_fixup_min_off)
2728 {
2729 struct binder_buffer_object *parent;
2730 struct binder_buffer *b = t->buffer;
2731 struct binder_proc *proc = thread->proc;
2732 struct binder_proc *target_proc = t->to_proc;
2733 struct binder_object object;
2734 binder_size_t buffer_offset;
2735 binder_size_t parent_offset;
2736
2737 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2738 return 0;
2739
2740 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2741 off_start_offset, &parent_offset,
2742 num_valid);
2743 if (!parent) {
2744 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2745 proc->pid, thread->pid);
2746 return -EINVAL;
2747 }
2748
2749 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2750 parent_offset, bp->parent_offset,
2751 last_fixup_obj_off,
2752 last_fixup_min_off)) {
2753 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2754 proc->pid, thread->pid);
2755 return -EINVAL;
2756 }
2757
2758 if (parent->length < sizeof(binder_uintptr_t) ||
2759 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2760 /* No space for a pointer here! */
2761 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2762 proc->pid, thread->pid);
2763 return -EINVAL;
2764 }
2765
2766 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2767
2768 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2769 }
2770
2771 /**
2772 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2773 * @t1: the pending async txn in the frozen process
2774 * @t2: the new async txn to supersede the outdated pending one
2775 *
2776 * Return: true if t2 can supersede t1
2777 * false if t2 can not supersede t1
2778 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2779 static bool binder_can_update_transaction(struct binder_transaction *t1,
2780 struct binder_transaction *t2)
2781 {
2782 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2783 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2784 return false;
2785 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2786 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2787 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2788 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2789 return true;
2790 return false;
2791 }
2792
2793 /**
2794 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2795 * @t: new async transaction
2796 * @target_list: list to find outdated transaction
2797 *
2798 * Return: the outdated transaction if found
2799 * NULL if no outdated transacton can be found
2800 *
2801 * Requires the proc->inner_lock to be held.
2802 */
2803 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2804 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2805 struct list_head *target_list)
2806 {
2807 struct binder_work *w;
2808
2809 list_for_each_entry(w, target_list, entry) {
2810 struct binder_transaction *t_queued;
2811
2812 if (w->type != BINDER_WORK_TRANSACTION)
2813 continue;
2814 t_queued = container_of(w, struct binder_transaction, work);
2815 if (binder_can_update_transaction(t_queued, t))
2816 return t_queued;
2817 }
2818 return NULL;
2819 }
2820
2821 /**
2822 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2823 * @t: transaction to send
2824 * @proc: process to send the transaction to
2825 * @thread: thread in @proc to send the transaction to (may be NULL)
2826 *
2827 * This function queues a transaction to the specified process. It will try
2828 * to find a thread in the target process to handle the transaction and
2829 * wake it up. If no thread is found, the work is queued to the proc
2830 * waitqueue.
2831 *
2832 * If the @thread parameter is not NULL, the transaction is always queued
2833 * to the waitlist of that specific thread.
2834 *
2835 * Return: 0 if the transaction was successfully queued
2836 * BR_DEAD_REPLY if the target process or thread is dead
2837 * BR_FROZEN_REPLY if the target process or thread is frozen and
2838 * the sync transaction was rejected
2839 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2840 * and the async transaction was successfully queued
2841 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2842 static int binder_proc_transaction(struct binder_transaction *t,
2843 struct binder_proc *proc,
2844 struct binder_thread *thread)
2845 {
2846 struct binder_node *node = t->buffer->target_node;
2847 bool oneway = !!(t->flags & TF_ONE_WAY);
2848 bool pending_async = false;
2849 struct binder_transaction *t_outdated = NULL;
2850 bool frozen = false;
2851
2852 BUG_ON(!node);
2853 binder_node_lock(node);
2854 if (oneway) {
2855 BUG_ON(thread);
2856 if (node->has_async_transaction)
2857 pending_async = true;
2858 else
2859 node->has_async_transaction = true;
2860 }
2861
2862 binder_inner_proc_lock(proc);
2863 if (proc->is_frozen) {
2864 frozen = true;
2865 proc->sync_recv |= !oneway;
2866 proc->async_recv |= oneway;
2867 }
2868
2869 if ((frozen && !oneway) || proc->is_dead ||
2870 (thread && thread->is_dead)) {
2871 binder_inner_proc_unlock(proc);
2872 binder_node_unlock(node);
2873 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2874 }
2875
2876 if (!thread && !pending_async)
2877 thread = binder_select_thread_ilocked(proc);
2878
2879 if (thread) {
2880 binder_enqueue_thread_work_ilocked(thread, &t->work);
2881 } else if (!pending_async) {
2882 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2883 } else {
2884 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2885 t_outdated = binder_find_outdated_transaction_ilocked(t,
2886 &node->async_todo);
2887 if (t_outdated) {
2888 binder_debug(BINDER_DEBUG_TRANSACTION,
2889 "txn %d supersedes %d\n",
2890 t->debug_id, t_outdated->debug_id);
2891 list_del_init(&t_outdated->work.entry);
2892 proc->outstanding_txns--;
2893 }
2894 }
2895 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2896 }
2897
2898 if (!pending_async)
2899 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2900
2901 proc->outstanding_txns++;
2902 binder_inner_proc_unlock(proc);
2903 binder_node_unlock(node);
2904
2905 /*
2906 * To reduce potential contention, free the outdated transaction and
2907 * buffer after releasing the locks.
2908 */
2909 if (t_outdated) {
2910 struct binder_buffer *buffer = t_outdated->buffer;
2911
2912 t_outdated->buffer = NULL;
2913 buffer->transaction = NULL;
2914 trace_binder_transaction_update_buffer_release(buffer);
2915 binder_release_entire_buffer(proc, NULL, buffer, false);
2916 binder_alloc_free_buf(&proc->alloc, buffer);
2917 kfree(t_outdated);
2918 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2919 }
2920
2921 if (oneway && frozen)
2922 return BR_TRANSACTION_PENDING_FROZEN;
2923
2924 return 0;
2925 }
2926
2927 /**
2928 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2929 * @node: struct binder_node for which to get refs
2930 * @procp: returns @node->proc if valid
2931 * @error: if no @procp then returns BR_DEAD_REPLY
2932 *
2933 * User-space normally keeps the node alive when creating a transaction
2934 * since it has a reference to the target. The local strong ref keeps it
2935 * alive if the sending process dies before the target process processes
2936 * the transaction. If the source process is malicious or has a reference
2937 * counting bug, relying on the local strong ref can fail.
2938 *
2939 * Since user-space can cause the local strong ref to go away, we also take
2940 * a tmpref on the node to ensure it survives while we are constructing
2941 * the transaction. We also need a tmpref on the proc while we are
2942 * constructing the transaction, so we take that here as well.
2943 *
2944 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2945 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2946 * target proc has died, @error is set to BR_DEAD_REPLY.
2947 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2948 static struct binder_node *binder_get_node_refs_for_txn(
2949 struct binder_node *node,
2950 struct binder_proc **procp,
2951 uint32_t *error)
2952 {
2953 struct binder_node *target_node = NULL;
2954
2955 binder_node_inner_lock(node);
2956 if (node->proc) {
2957 target_node = node;
2958 binder_inc_node_nilocked(node, 1, 0, NULL);
2959 binder_inc_node_tmpref_ilocked(node);
2960 node->proc->tmp_ref++;
2961 *procp = node->proc;
2962 } else
2963 *error = BR_DEAD_REPLY;
2964 binder_node_inner_unlock(node);
2965
2966 return target_node;
2967 }
2968
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2969 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2970 uint32_t command, int32_t param)
2971 {
2972 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2973
2974 if (!from) {
2975 /* annotation for sparse */
2976 __release(&from->proc->inner_lock);
2977 return;
2978 }
2979
2980 /* don't override existing errors */
2981 if (from->ee.command == BR_OK)
2982 binder_set_extended_error(&from->ee, id, command, param);
2983 binder_inner_proc_unlock(from->proc);
2984 binder_thread_dec_tmpref(from);
2985 }
2986
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2987 static void binder_transaction(struct binder_proc *proc,
2988 struct binder_thread *thread,
2989 struct binder_transaction_data *tr, int reply,
2990 binder_size_t extra_buffers_size)
2991 {
2992 int ret;
2993 struct binder_transaction *t;
2994 struct binder_work *w;
2995 struct binder_work *tcomplete;
2996 binder_size_t buffer_offset = 0;
2997 binder_size_t off_start_offset, off_end_offset;
2998 binder_size_t off_min;
2999 binder_size_t sg_buf_offset, sg_buf_end_offset;
3000 binder_size_t user_offset = 0;
3001 struct binder_proc *target_proc = NULL;
3002 struct binder_thread *target_thread = NULL;
3003 struct binder_node *target_node = NULL;
3004 struct binder_transaction *in_reply_to = NULL;
3005 struct binder_transaction_log_entry *e;
3006 uint32_t return_error = 0;
3007 uint32_t return_error_param = 0;
3008 uint32_t return_error_line = 0;
3009 binder_size_t last_fixup_obj_off = 0;
3010 binder_size_t last_fixup_min_off = 0;
3011 struct binder_context *context = proc->context;
3012 int t_debug_id = atomic_inc_return(&binder_last_id);
3013 ktime_t t_start_time = ktime_get();
3014 char *secctx = NULL;
3015 u32 secctx_sz = 0;
3016 struct list_head sgc_head;
3017 struct list_head pf_head;
3018 const void __user *user_buffer = (const void __user *)
3019 (uintptr_t)tr->data.ptr.buffer;
3020 INIT_LIST_HEAD(&sgc_head);
3021 INIT_LIST_HEAD(&pf_head);
3022
3023 e = binder_transaction_log_add(&binder_transaction_log);
3024 e->debug_id = t_debug_id;
3025 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3026 e->from_proc = proc->pid;
3027 e->from_thread = thread->pid;
3028 e->target_handle = tr->target.handle;
3029 e->data_size = tr->data_size;
3030 e->offsets_size = tr->offsets_size;
3031 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3032
3033 binder_inner_proc_lock(proc);
3034 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3035 binder_inner_proc_unlock(proc);
3036
3037 if (reply) {
3038 binder_inner_proc_lock(proc);
3039 in_reply_to = thread->transaction_stack;
3040 if (in_reply_to == NULL) {
3041 binder_inner_proc_unlock(proc);
3042 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3043 proc->pid, thread->pid);
3044 return_error = BR_FAILED_REPLY;
3045 return_error_param = -EPROTO;
3046 return_error_line = __LINE__;
3047 goto err_empty_call_stack;
3048 }
3049 if (in_reply_to->to_thread != thread) {
3050 spin_lock(&in_reply_to->lock);
3051 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3052 proc->pid, thread->pid, in_reply_to->debug_id,
3053 in_reply_to->to_proc ?
3054 in_reply_to->to_proc->pid : 0,
3055 in_reply_to->to_thread ?
3056 in_reply_to->to_thread->pid : 0);
3057 spin_unlock(&in_reply_to->lock);
3058 binder_inner_proc_unlock(proc);
3059 return_error = BR_FAILED_REPLY;
3060 return_error_param = -EPROTO;
3061 return_error_line = __LINE__;
3062 in_reply_to = NULL;
3063 goto err_bad_call_stack;
3064 }
3065 thread->transaction_stack = in_reply_to->to_parent;
3066 binder_inner_proc_unlock(proc);
3067 binder_set_nice(in_reply_to->saved_priority);
3068 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3069 if (target_thread == NULL) {
3070 /* annotation for sparse */
3071 __release(&target_thread->proc->inner_lock);
3072 binder_txn_error("%d:%d reply target not found\n",
3073 thread->pid, proc->pid);
3074 return_error = BR_DEAD_REPLY;
3075 return_error_line = __LINE__;
3076 goto err_dead_binder;
3077 }
3078 if (target_thread->transaction_stack != in_reply_to) {
3079 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3080 proc->pid, thread->pid,
3081 target_thread->transaction_stack ?
3082 target_thread->transaction_stack->debug_id : 0,
3083 in_reply_to->debug_id);
3084 binder_inner_proc_unlock(target_thread->proc);
3085 return_error = BR_FAILED_REPLY;
3086 return_error_param = -EPROTO;
3087 return_error_line = __LINE__;
3088 in_reply_to = NULL;
3089 target_thread = NULL;
3090 goto err_dead_binder;
3091 }
3092 target_proc = target_thread->proc;
3093 target_proc->tmp_ref++;
3094 binder_inner_proc_unlock(target_thread->proc);
3095 } else {
3096 if (tr->target.handle) {
3097 struct binder_ref *ref;
3098
3099 /*
3100 * There must already be a strong ref
3101 * on this node. If so, do a strong
3102 * increment on the node to ensure it
3103 * stays alive until the transaction is
3104 * done.
3105 */
3106 binder_proc_lock(proc);
3107 ref = binder_get_ref_olocked(proc, tr->target.handle,
3108 true);
3109 if (ref) {
3110 target_node = binder_get_node_refs_for_txn(
3111 ref->node, &target_proc,
3112 &return_error);
3113 } else {
3114 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3115 proc->pid, thread->pid, tr->target.handle);
3116 return_error = BR_FAILED_REPLY;
3117 }
3118 binder_proc_unlock(proc);
3119 } else {
3120 mutex_lock(&context->context_mgr_node_lock);
3121 target_node = context->binder_context_mgr_node;
3122 if (target_node)
3123 target_node = binder_get_node_refs_for_txn(
3124 target_node, &target_proc,
3125 &return_error);
3126 else
3127 return_error = BR_DEAD_REPLY;
3128 mutex_unlock(&context->context_mgr_node_lock);
3129 if (target_node && target_proc->pid == proc->pid) {
3130 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3131 proc->pid, thread->pid);
3132 return_error = BR_FAILED_REPLY;
3133 return_error_param = -EINVAL;
3134 return_error_line = __LINE__;
3135 goto err_invalid_target_handle;
3136 }
3137 }
3138 if (!target_node) {
3139 binder_txn_error("%d:%d cannot find target node\n",
3140 thread->pid, proc->pid);
3141 /*
3142 * return_error is set above
3143 */
3144 return_error_param = -EINVAL;
3145 return_error_line = __LINE__;
3146 goto err_dead_binder;
3147 }
3148 e->to_node = target_node->debug_id;
3149 if (WARN_ON(proc == target_proc)) {
3150 binder_txn_error("%d:%d self transactions not allowed\n",
3151 thread->pid, proc->pid);
3152 return_error = BR_FAILED_REPLY;
3153 return_error_param = -EINVAL;
3154 return_error_line = __LINE__;
3155 goto err_invalid_target_handle;
3156 }
3157 if (security_binder_transaction(proc->cred,
3158 target_proc->cred) < 0) {
3159 binder_txn_error("%d:%d transaction credentials failed\n",
3160 thread->pid, proc->pid);
3161 return_error = BR_FAILED_REPLY;
3162 return_error_param = -EPERM;
3163 return_error_line = __LINE__;
3164 goto err_invalid_target_handle;
3165 }
3166 binder_inner_proc_lock(proc);
3167
3168 w = list_first_entry_or_null(&thread->todo,
3169 struct binder_work, entry);
3170 if (!(tr->flags & TF_ONE_WAY) && w &&
3171 w->type == BINDER_WORK_TRANSACTION) {
3172 /*
3173 * Do not allow new outgoing transaction from a
3174 * thread that has a transaction at the head of
3175 * its todo list. Only need to check the head
3176 * because binder_select_thread_ilocked picks a
3177 * thread from proc->waiting_threads to enqueue
3178 * the transaction, and nothing is queued to the
3179 * todo list while the thread is on waiting_threads.
3180 */
3181 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3182 proc->pid, thread->pid);
3183 binder_inner_proc_unlock(proc);
3184 return_error = BR_FAILED_REPLY;
3185 return_error_param = -EPROTO;
3186 return_error_line = __LINE__;
3187 goto err_bad_todo_list;
3188 }
3189
3190 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3191 struct binder_transaction *tmp;
3192
3193 tmp = thread->transaction_stack;
3194 if (tmp->to_thread != thread) {
3195 spin_lock(&tmp->lock);
3196 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3197 proc->pid, thread->pid, tmp->debug_id,
3198 tmp->to_proc ? tmp->to_proc->pid : 0,
3199 tmp->to_thread ?
3200 tmp->to_thread->pid : 0);
3201 spin_unlock(&tmp->lock);
3202 binder_inner_proc_unlock(proc);
3203 return_error = BR_FAILED_REPLY;
3204 return_error_param = -EPROTO;
3205 return_error_line = __LINE__;
3206 goto err_bad_call_stack;
3207 }
3208 while (tmp) {
3209 struct binder_thread *from;
3210
3211 spin_lock(&tmp->lock);
3212 from = tmp->from;
3213 if (from && from->proc == target_proc) {
3214 atomic_inc(&from->tmp_ref);
3215 target_thread = from;
3216 spin_unlock(&tmp->lock);
3217 break;
3218 }
3219 spin_unlock(&tmp->lock);
3220 tmp = tmp->from_parent;
3221 }
3222 }
3223 binder_inner_proc_unlock(proc);
3224 }
3225 if (target_thread)
3226 e->to_thread = target_thread->pid;
3227 e->to_proc = target_proc->pid;
3228
3229 /* TODO: reuse incoming transaction for reply */
3230 t = kzalloc(sizeof(*t), GFP_KERNEL);
3231 if (t == NULL) {
3232 binder_txn_error("%d:%d cannot allocate transaction\n",
3233 thread->pid, proc->pid);
3234 return_error = BR_FAILED_REPLY;
3235 return_error_param = -ENOMEM;
3236 return_error_line = __LINE__;
3237 goto err_alloc_t_failed;
3238 }
3239 INIT_LIST_HEAD(&t->fd_fixups);
3240 binder_stats_created(BINDER_STAT_TRANSACTION);
3241 spin_lock_init(&t->lock);
3242
3243 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3244 if (tcomplete == NULL) {
3245 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3246 thread->pid, proc->pid);
3247 return_error = BR_FAILED_REPLY;
3248 return_error_param = -ENOMEM;
3249 return_error_line = __LINE__;
3250 goto err_alloc_tcomplete_failed;
3251 }
3252 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3253
3254 t->debug_id = t_debug_id;
3255 t->start_time = t_start_time;
3256
3257 if (reply)
3258 binder_debug(BINDER_DEBUG_TRANSACTION,
3259 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3260 proc->pid, thread->pid, t->debug_id,
3261 target_proc->pid, target_thread->pid,
3262 (u64)tr->data.ptr.buffer,
3263 (u64)tr->data.ptr.offsets,
3264 (u64)tr->data_size, (u64)tr->offsets_size,
3265 (u64)extra_buffers_size);
3266 else
3267 binder_debug(BINDER_DEBUG_TRANSACTION,
3268 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3269 proc->pid, thread->pid, t->debug_id,
3270 target_proc->pid, target_node->debug_id,
3271 (u64)tr->data.ptr.buffer,
3272 (u64)tr->data.ptr.offsets,
3273 (u64)tr->data_size, (u64)tr->offsets_size,
3274 (u64)extra_buffers_size);
3275
3276 if (!reply && !(tr->flags & TF_ONE_WAY))
3277 t->from = thread;
3278 else
3279 t->from = NULL;
3280 t->from_pid = proc->pid;
3281 t->from_tid = thread->pid;
3282 t->sender_euid = task_euid(proc->tsk);
3283 t->to_proc = target_proc;
3284 t->to_thread = target_thread;
3285 t->code = tr->code;
3286 t->flags = tr->flags;
3287 t->priority = task_nice(current);
3288
3289 if (target_node && target_node->txn_security_ctx) {
3290 u32 secid;
3291 size_t added_size;
3292
3293 security_cred_getsecid(proc->cred, &secid);
3294 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3295 if (ret) {
3296 binder_txn_error("%d:%d failed to get security context\n",
3297 thread->pid, proc->pid);
3298 return_error = BR_FAILED_REPLY;
3299 return_error_param = ret;
3300 return_error_line = __LINE__;
3301 goto err_get_secctx_failed;
3302 }
3303 added_size = ALIGN(secctx_sz, sizeof(u64));
3304 extra_buffers_size += added_size;
3305 if (extra_buffers_size < added_size) {
3306 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3307 thread->pid, proc->pid);
3308 return_error = BR_FAILED_REPLY;
3309 return_error_param = -EINVAL;
3310 return_error_line = __LINE__;
3311 goto err_bad_extra_size;
3312 }
3313 }
3314
3315 trace_binder_transaction(reply, t, target_node);
3316
3317 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3318 tr->offsets_size, extra_buffers_size,
3319 !reply && (t->flags & TF_ONE_WAY));
3320 if (IS_ERR(t->buffer)) {
3321 char *s;
3322
3323 ret = PTR_ERR(t->buffer);
3324 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3325 : (ret == -ENOSPC) ? ": no space left"
3326 : (ret == -ENOMEM) ? ": memory allocation failed"
3327 : "";
3328 binder_txn_error("cannot allocate buffer%s", s);
3329
3330 return_error_param = PTR_ERR(t->buffer);
3331 return_error = return_error_param == -ESRCH ?
3332 BR_DEAD_REPLY : BR_FAILED_REPLY;
3333 return_error_line = __LINE__;
3334 t->buffer = NULL;
3335 goto err_binder_alloc_buf_failed;
3336 }
3337 if (secctx) {
3338 int err;
3339 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3340 ALIGN(tr->offsets_size, sizeof(void *)) +
3341 ALIGN(extra_buffers_size, sizeof(void *)) -
3342 ALIGN(secctx_sz, sizeof(u64));
3343
3344 t->security_ctx = t->buffer->user_data + buf_offset;
3345 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3346 t->buffer, buf_offset,
3347 secctx, secctx_sz);
3348 if (err) {
3349 t->security_ctx = 0;
3350 WARN_ON(1);
3351 }
3352 security_release_secctx(secctx, secctx_sz);
3353 secctx = NULL;
3354 }
3355 t->buffer->debug_id = t->debug_id;
3356 t->buffer->transaction = t;
3357 t->buffer->target_node = target_node;
3358 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3359 trace_binder_transaction_alloc_buf(t->buffer);
3360
3361 if (binder_alloc_copy_user_to_buffer(
3362 &target_proc->alloc,
3363 t->buffer,
3364 ALIGN(tr->data_size, sizeof(void *)),
3365 (const void __user *)
3366 (uintptr_t)tr->data.ptr.offsets,
3367 tr->offsets_size)) {
3368 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3369 proc->pid, thread->pid);
3370 return_error = BR_FAILED_REPLY;
3371 return_error_param = -EFAULT;
3372 return_error_line = __LINE__;
3373 goto err_copy_data_failed;
3374 }
3375 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3376 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3377 proc->pid, thread->pid, (u64)tr->offsets_size);
3378 return_error = BR_FAILED_REPLY;
3379 return_error_param = -EINVAL;
3380 return_error_line = __LINE__;
3381 goto err_bad_offset;
3382 }
3383 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3384 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3385 proc->pid, thread->pid,
3386 (u64)extra_buffers_size);
3387 return_error = BR_FAILED_REPLY;
3388 return_error_param = -EINVAL;
3389 return_error_line = __LINE__;
3390 goto err_bad_offset;
3391 }
3392 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3393 buffer_offset = off_start_offset;
3394 off_end_offset = off_start_offset + tr->offsets_size;
3395 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3396 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3397 ALIGN(secctx_sz, sizeof(u64));
3398 off_min = 0;
3399 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3400 buffer_offset += sizeof(binder_size_t)) {
3401 struct binder_object_header *hdr;
3402 size_t object_size;
3403 struct binder_object object;
3404 binder_size_t object_offset;
3405 binder_size_t copy_size;
3406
3407 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3408 &object_offset,
3409 t->buffer,
3410 buffer_offset,
3411 sizeof(object_offset))) {
3412 binder_txn_error("%d:%d copy offset from buffer failed\n",
3413 thread->pid, proc->pid);
3414 return_error = BR_FAILED_REPLY;
3415 return_error_param = -EINVAL;
3416 return_error_line = __LINE__;
3417 goto err_bad_offset;
3418 }
3419
3420 /*
3421 * Copy the source user buffer up to the next object
3422 * that will be processed.
3423 */
3424 copy_size = object_offset - user_offset;
3425 if (copy_size && (user_offset > object_offset ||
3426 object_offset > tr->data_size ||
3427 binder_alloc_copy_user_to_buffer(
3428 &target_proc->alloc,
3429 t->buffer, user_offset,
3430 user_buffer + user_offset,
3431 copy_size))) {
3432 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3433 proc->pid, thread->pid);
3434 return_error = BR_FAILED_REPLY;
3435 return_error_param = -EFAULT;
3436 return_error_line = __LINE__;
3437 goto err_copy_data_failed;
3438 }
3439 object_size = binder_get_object(target_proc, user_buffer,
3440 t->buffer, object_offset, &object);
3441 if (object_size == 0 || object_offset < off_min) {
3442 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3443 proc->pid, thread->pid,
3444 (u64)object_offset,
3445 (u64)off_min,
3446 (u64)t->buffer->data_size);
3447 return_error = BR_FAILED_REPLY;
3448 return_error_param = -EINVAL;
3449 return_error_line = __LINE__;
3450 goto err_bad_offset;
3451 }
3452 /*
3453 * Set offset to the next buffer fragment to be
3454 * copied
3455 */
3456 user_offset = object_offset + object_size;
3457
3458 hdr = &object.hdr;
3459 off_min = object_offset + object_size;
3460 switch (hdr->type) {
3461 case BINDER_TYPE_BINDER:
3462 case BINDER_TYPE_WEAK_BINDER: {
3463 struct flat_binder_object *fp;
3464
3465 fp = to_flat_binder_object(hdr);
3466 ret = binder_translate_binder(fp, t, thread);
3467
3468 if (ret < 0 ||
3469 binder_alloc_copy_to_buffer(&target_proc->alloc,
3470 t->buffer,
3471 object_offset,
3472 fp, sizeof(*fp))) {
3473 binder_txn_error("%d:%d translate binder failed\n",
3474 thread->pid, proc->pid);
3475 return_error = BR_FAILED_REPLY;
3476 return_error_param = ret;
3477 return_error_line = __LINE__;
3478 goto err_translate_failed;
3479 }
3480 } break;
3481 case BINDER_TYPE_HANDLE:
3482 case BINDER_TYPE_WEAK_HANDLE: {
3483 struct flat_binder_object *fp;
3484
3485 fp = to_flat_binder_object(hdr);
3486 ret = binder_translate_handle(fp, t, thread);
3487 if (ret < 0 ||
3488 binder_alloc_copy_to_buffer(&target_proc->alloc,
3489 t->buffer,
3490 object_offset,
3491 fp, sizeof(*fp))) {
3492 binder_txn_error("%d:%d translate handle failed\n",
3493 thread->pid, proc->pid);
3494 return_error = BR_FAILED_REPLY;
3495 return_error_param = ret;
3496 return_error_line = __LINE__;
3497 goto err_translate_failed;
3498 }
3499 } break;
3500
3501 case BINDER_TYPE_FD: {
3502 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3503 binder_size_t fd_offset = object_offset +
3504 (uintptr_t)&fp->fd - (uintptr_t)fp;
3505 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3506 thread, in_reply_to);
3507
3508 fp->pad_binder = 0;
3509 if (ret < 0 ||
3510 binder_alloc_copy_to_buffer(&target_proc->alloc,
3511 t->buffer,
3512 object_offset,
3513 fp, sizeof(*fp))) {
3514 binder_txn_error("%d:%d translate fd failed\n",
3515 thread->pid, proc->pid);
3516 return_error = BR_FAILED_REPLY;
3517 return_error_param = ret;
3518 return_error_line = __LINE__;
3519 goto err_translate_failed;
3520 }
3521 } break;
3522 case BINDER_TYPE_FDA: {
3523 struct binder_object ptr_object;
3524 binder_size_t parent_offset;
3525 struct binder_object user_object;
3526 size_t user_parent_size;
3527 struct binder_fd_array_object *fda =
3528 to_binder_fd_array_object(hdr);
3529 size_t num_valid = (buffer_offset - off_start_offset) /
3530 sizeof(binder_size_t);
3531 struct binder_buffer_object *parent =
3532 binder_validate_ptr(target_proc, t->buffer,
3533 &ptr_object, fda->parent,
3534 off_start_offset,
3535 &parent_offset,
3536 num_valid);
3537 if (!parent) {
3538 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3539 proc->pid, thread->pid);
3540 return_error = BR_FAILED_REPLY;
3541 return_error_param = -EINVAL;
3542 return_error_line = __LINE__;
3543 goto err_bad_parent;
3544 }
3545 if (!binder_validate_fixup(target_proc, t->buffer,
3546 off_start_offset,
3547 parent_offset,
3548 fda->parent_offset,
3549 last_fixup_obj_off,
3550 last_fixup_min_off)) {
3551 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3552 proc->pid, thread->pid);
3553 return_error = BR_FAILED_REPLY;
3554 return_error_param = -EINVAL;
3555 return_error_line = __LINE__;
3556 goto err_bad_parent;
3557 }
3558 /*
3559 * We need to read the user version of the parent
3560 * object to get the original user offset
3561 */
3562 user_parent_size =
3563 binder_get_object(proc, user_buffer, t->buffer,
3564 parent_offset, &user_object);
3565 if (user_parent_size != sizeof(user_object.bbo)) {
3566 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3567 proc->pid, thread->pid,
3568 user_parent_size,
3569 sizeof(user_object.bbo));
3570 return_error = BR_FAILED_REPLY;
3571 return_error_param = -EINVAL;
3572 return_error_line = __LINE__;
3573 goto err_bad_parent;
3574 }
3575 ret = binder_translate_fd_array(&pf_head, fda,
3576 user_buffer, parent,
3577 &user_object.bbo, t,
3578 thread, in_reply_to);
3579 if (!ret)
3580 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3581 t->buffer,
3582 object_offset,
3583 fda, sizeof(*fda));
3584 if (ret) {
3585 binder_txn_error("%d:%d translate fd array failed\n",
3586 thread->pid, proc->pid);
3587 return_error = BR_FAILED_REPLY;
3588 return_error_param = ret > 0 ? -EINVAL : ret;
3589 return_error_line = __LINE__;
3590 goto err_translate_failed;
3591 }
3592 last_fixup_obj_off = parent_offset;
3593 last_fixup_min_off =
3594 fda->parent_offset + sizeof(u32) * fda->num_fds;
3595 } break;
3596 case BINDER_TYPE_PTR: {
3597 struct binder_buffer_object *bp =
3598 to_binder_buffer_object(hdr);
3599 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3600 size_t num_valid;
3601
3602 if (bp->length > buf_left) {
3603 binder_user_error("%d:%d got transaction with too large buffer\n",
3604 proc->pid, thread->pid);
3605 return_error = BR_FAILED_REPLY;
3606 return_error_param = -EINVAL;
3607 return_error_line = __LINE__;
3608 goto err_bad_offset;
3609 }
3610 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3611 (const void __user *)(uintptr_t)bp->buffer,
3612 bp->length);
3613 if (ret) {
3614 binder_txn_error("%d:%d deferred copy failed\n",
3615 thread->pid, proc->pid);
3616 return_error = BR_FAILED_REPLY;
3617 return_error_param = ret;
3618 return_error_line = __LINE__;
3619 goto err_translate_failed;
3620 }
3621 /* Fixup buffer pointer to target proc address space */
3622 bp->buffer = t->buffer->user_data + sg_buf_offset;
3623 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3624
3625 num_valid = (buffer_offset - off_start_offset) /
3626 sizeof(binder_size_t);
3627 ret = binder_fixup_parent(&pf_head, t,
3628 thread, bp,
3629 off_start_offset,
3630 num_valid,
3631 last_fixup_obj_off,
3632 last_fixup_min_off);
3633 if (ret < 0 ||
3634 binder_alloc_copy_to_buffer(&target_proc->alloc,
3635 t->buffer,
3636 object_offset,
3637 bp, sizeof(*bp))) {
3638 binder_txn_error("%d:%d failed to fixup parent\n",
3639 thread->pid, proc->pid);
3640 return_error = BR_FAILED_REPLY;
3641 return_error_param = ret;
3642 return_error_line = __LINE__;
3643 goto err_translate_failed;
3644 }
3645 last_fixup_obj_off = object_offset;
3646 last_fixup_min_off = 0;
3647 } break;
3648 default:
3649 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3650 proc->pid, thread->pid, hdr->type);
3651 return_error = BR_FAILED_REPLY;
3652 return_error_param = -EINVAL;
3653 return_error_line = __LINE__;
3654 goto err_bad_object_type;
3655 }
3656 }
3657 /* Done processing objects, copy the rest of the buffer */
3658 if (binder_alloc_copy_user_to_buffer(
3659 &target_proc->alloc,
3660 t->buffer, user_offset,
3661 user_buffer + user_offset,
3662 tr->data_size - user_offset)) {
3663 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3664 proc->pid, thread->pid);
3665 return_error = BR_FAILED_REPLY;
3666 return_error_param = -EFAULT;
3667 return_error_line = __LINE__;
3668 goto err_copy_data_failed;
3669 }
3670
3671 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3672 &sgc_head, &pf_head);
3673 if (ret) {
3674 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3675 proc->pid, thread->pid);
3676 return_error = BR_FAILED_REPLY;
3677 return_error_param = ret;
3678 return_error_line = __LINE__;
3679 goto err_copy_data_failed;
3680 }
3681 if (t->buffer->oneway_spam_suspect)
3682 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3683 else
3684 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3685 t->work.type = BINDER_WORK_TRANSACTION;
3686
3687 if (reply) {
3688 binder_enqueue_thread_work(thread, tcomplete);
3689 binder_inner_proc_lock(target_proc);
3690 if (target_thread->is_dead) {
3691 return_error = BR_DEAD_REPLY;
3692 binder_inner_proc_unlock(target_proc);
3693 goto err_dead_proc_or_thread;
3694 }
3695 BUG_ON(t->buffer->async_transaction != 0);
3696 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3697 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3698 target_proc->outstanding_txns++;
3699 binder_inner_proc_unlock(target_proc);
3700 wake_up_interruptible_sync(&target_thread->wait);
3701 binder_free_transaction(in_reply_to);
3702 } else if (!(t->flags & TF_ONE_WAY)) {
3703 BUG_ON(t->buffer->async_transaction != 0);
3704 binder_inner_proc_lock(proc);
3705 /*
3706 * Defer the TRANSACTION_COMPLETE, so we don't return to
3707 * userspace immediately; this allows the target process to
3708 * immediately start processing this transaction, reducing
3709 * latency. We will then return the TRANSACTION_COMPLETE when
3710 * the target replies (or there is an error).
3711 */
3712 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3713 t->need_reply = 1;
3714 t->from_parent = thread->transaction_stack;
3715 thread->transaction_stack = t;
3716 binder_inner_proc_unlock(proc);
3717 return_error = binder_proc_transaction(t,
3718 target_proc, target_thread);
3719 if (return_error) {
3720 binder_inner_proc_lock(proc);
3721 binder_pop_transaction_ilocked(thread, t);
3722 binder_inner_proc_unlock(proc);
3723 goto err_dead_proc_or_thread;
3724 }
3725 } else {
3726 BUG_ON(target_node == NULL);
3727 BUG_ON(t->buffer->async_transaction != 1);
3728 return_error = binder_proc_transaction(t, target_proc, NULL);
3729 /*
3730 * Let the caller know when async transaction reaches a frozen
3731 * process and is put in a pending queue, waiting for the target
3732 * process to be unfrozen.
3733 */
3734 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3735 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3736 binder_enqueue_thread_work(thread, tcomplete);
3737 if (return_error &&
3738 return_error != BR_TRANSACTION_PENDING_FROZEN)
3739 goto err_dead_proc_or_thread;
3740 }
3741 if (target_thread)
3742 binder_thread_dec_tmpref(target_thread);
3743 binder_proc_dec_tmpref(target_proc);
3744 if (target_node)
3745 binder_dec_node_tmpref(target_node);
3746 /*
3747 * write barrier to synchronize with initialization
3748 * of log entry
3749 */
3750 smp_wmb();
3751 WRITE_ONCE(e->debug_id_done, t_debug_id);
3752 return;
3753
3754 err_dead_proc_or_thread:
3755 binder_txn_error("%d:%d dead process or thread\n",
3756 thread->pid, proc->pid);
3757 return_error_line = __LINE__;
3758 binder_dequeue_work(proc, tcomplete);
3759 err_translate_failed:
3760 err_bad_object_type:
3761 err_bad_offset:
3762 err_bad_parent:
3763 err_copy_data_failed:
3764 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3765 binder_free_txn_fixups(t);
3766 trace_binder_transaction_failed_buffer_release(t->buffer);
3767 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3768 buffer_offset, true);
3769 if (target_node)
3770 binder_dec_node_tmpref(target_node);
3771 target_node = NULL;
3772 t->buffer->transaction = NULL;
3773 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3774 err_binder_alloc_buf_failed:
3775 err_bad_extra_size:
3776 if (secctx)
3777 security_release_secctx(secctx, secctx_sz);
3778 err_get_secctx_failed:
3779 kfree(tcomplete);
3780 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3781 err_alloc_tcomplete_failed:
3782 if (trace_binder_txn_latency_free_enabled())
3783 binder_txn_latency_free(t);
3784 kfree(t);
3785 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3786 err_alloc_t_failed:
3787 err_bad_todo_list:
3788 err_bad_call_stack:
3789 err_empty_call_stack:
3790 err_dead_binder:
3791 err_invalid_target_handle:
3792 if (target_node) {
3793 binder_dec_node(target_node, 1, 0);
3794 binder_dec_node_tmpref(target_node);
3795 }
3796
3797 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3798 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3799 proc->pid, thread->pid, reply ? "reply" :
3800 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3801 target_proc ? target_proc->pid : 0,
3802 target_thread ? target_thread->pid : 0,
3803 t_debug_id, return_error, return_error_param,
3804 (u64)tr->data_size, (u64)tr->offsets_size,
3805 return_error_line);
3806
3807 if (target_thread)
3808 binder_thread_dec_tmpref(target_thread);
3809 if (target_proc)
3810 binder_proc_dec_tmpref(target_proc);
3811
3812 {
3813 struct binder_transaction_log_entry *fe;
3814
3815 e->return_error = return_error;
3816 e->return_error_param = return_error_param;
3817 e->return_error_line = return_error_line;
3818 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3819 *fe = *e;
3820 /*
3821 * write barrier to synchronize with initialization
3822 * of log entry
3823 */
3824 smp_wmb();
3825 WRITE_ONCE(e->debug_id_done, t_debug_id);
3826 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3827 }
3828
3829 BUG_ON(thread->return_error.cmd != BR_OK);
3830 if (in_reply_to) {
3831 binder_set_txn_from_error(in_reply_to, t_debug_id,
3832 return_error, return_error_param);
3833 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3834 binder_enqueue_thread_work(thread, &thread->return_error.work);
3835 binder_send_failed_reply(in_reply_to, return_error);
3836 } else {
3837 binder_inner_proc_lock(proc);
3838 binder_set_extended_error(&thread->ee, t_debug_id,
3839 return_error, return_error_param);
3840 binder_inner_proc_unlock(proc);
3841 thread->return_error.cmd = return_error;
3842 binder_enqueue_thread_work(thread, &thread->return_error.work);
3843 }
3844 }
3845
3846 static int
binder_request_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3847 binder_request_freeze_notification(struct binder_proc *proc,
3848 struct binder_thread *thread,
3849 struct binder_handle_cookie *handle_cookie)
3850 {
3851 struct binder_ref_freeze *freeze;
3852 struct binder_ref *ref;
3853 bool is_frozen;
3854
3855 freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3856 if (!freeze)
3857 return -ENOMEM;
3858 binder_proc_lock(proc);
3859 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3860 if (!ref) {
3861 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3862 proc->pid, thread->pid, handle_cookie->handle);
3863 binder_proc_unlock(proc);
3864 kfree(freeze);
3865 return -EINVAL;
3866 }
3867
3868 binder_node_lock(ref->node);
3869
3870 if (ref->freeze || !ref->node->proc) {
3871 binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
3872 proc->pid, thread->pid,
3873 ref->freeze ? "already set" : "dead node");
3874 binder_node_unlock(ref->node);
3875 binder_proc_unlock(proc);
3876 kfree(freeze);
3877 return -EINVAL;
3878 }
3879 binder_inner_proc_lock(ref->node->proc);
3880 is_frozen = ref->node->proc->is_frozen;
3881 binder_inner_proc_unlock(ref->node->proc);
3882
3883 binder_stats_created(BINDER_STAT_FREEZE);
3884 INIT_LIST_HEAD(&freeze->work.entry);
3885 freeze->cookie = handle_cookie->cookie;
3886 freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3887 freeze->is_frozen = is_frozen;
3888
3889 ref->freeze = freeze;
3890
3891 binder_inner_proc_lock(proc);
3892 binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
3893 binder_wakeup_proc_ilocked(proc);
3894 binder_inner_proc_unlock(proc);
3895
3896 binder_node_unlock(ref->node);
3897 binder_proc_unlock(proc);
3898 return 0;
3899 }
3900
3901 static int
binder_clear_freeze_notification(struct binder_proc * proc,struct binder_thread * thread,struct binder_handle_cookie * handle_cookie)3902 binder_clear_freeze_notification(struct binder_proc *proc,
3903 struct binder_thread *thread,
3904 struct binder_handle_cookie *handle_cookie)
3905 {
3906 struct binder_ref_freeze *freeze;
3907 struct binder_ref *ref;
3908
3909 binder_proc_lock(proc);
3910 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3911 if (!ref) {
3912 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3913 proc->pid, thread->pid, handle_cookie->handle);
3914 binder_proc_unlock(proc);
3915 return -EINVAL;
3916 }
3917
3918 binder_node_lock(ref->node);
3919
3920 if (!ref->freeze) {
3921 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3922 proc->pid, thread->pid);
3923 binder_node_unlock(ref->node);
3924 binder_proc_unlock(proc);
3925 return -EINVAL;
3926 }
3927 freeze = ref->freeze;
3928 binder_inner_proc_lock(proc);
3929 if (freeze->cookie != handle_cookie->cookie) {
3930 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3931 proc->pid, thread->pid, (u64)freeze->cookie,
3932 (u64)handle_cookie->cookie);
3933 binder_inner_proc_unlock(proc);
3934 binder_node_unlock(ref->node);
3935 binder_proc_unlock(proc);
3936 return -EINVAL;
3937 }
3938 ref->freeze = NULL;
3939 /*
3940 * Take the existing freeze object and overwrite its work type. There are three cases here:
3941 * 1. No pending notification. In this case just add the work to the queue.
3942 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3943 * should resend with the new work type.
3944 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3945 * needs to be done here.
3946 */
3947 freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
3948 if (list_empty(&freeze->work.entry)) {
3949 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3950 binder_wakeup_proc_ilocked(proc);
3951 } else if (freeze->sent) {
3952 freeze->resend = true;
3953 }
3954 binder_inner_proc_unlock(proc);
3955 binder_node_unlock(ref->node);
3956 binder_proc_unlock(proc);
3957 return 0;
3958 }
3959
3960 static int
binder_freeze_notification_done(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t cookie)3961 binder_freeze_notification_done(struct binder_proc *proc,
3962 struct binder_thread *thread,
3963 binder_uintptr_t cookie)
3964 {
3965 struct binder_ref_freeze *freeze = NULL;
3966 struct binder_work *w;
3967
3968 binder_inner_proc_lock(proc);
3969 list_for_each_entry(w, &proc->delivered_freeze, entry) {
3970 struct binder_ref_freeze *tmp_freeze =
3971 container_of(w, struct binder_ref_freeze, work);
3972
3973 if (tmp_freeze->cookie == cookie) {
3974 freeze = tmp_freeze;
3975 break;
3976 }
3977 }
3978 if (!freeze) {
3979 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3980 proc->pid, thread->pid, (u64)cookie);
3981 binder_inner_proc_unlock(proc);
3982 return -EINVAL;
3983 }
3984 binder_dequeue_work_ilocked(&freeze->work);
3985 freeze->sent = false;
3986 if (freeze->resend) {
3987 freeze->resend = false;
3988 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3989 binder_wakeup_proc_ilocked(proc);
3990 }
3991 binder_inner_proc_unlock(proc);
3992 return 0;
3993 }
3994
3995 /**
3996 * binder_free_buf() - free the specified buffer
3997 * @proc: binder proc that owns buffer
3998 * @buffer: buffer to be freed
3999 * @is_failure: failed to send transaction
4000 *
4001 * If buffer for an async transaction, enqueue the next async
4002 * transaction from the node.
4003 *
4004 * Cleanup buffer and free it.
4005 */
4006 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)4007 binder_free_buf(struct binder_proc *proc,
4008 struct binder_thread *thread,
4009 struct binder_buffer *buffer, bool is_failure)
4010 {
4011 binder_inner_proc_lock(proc);
4012 if (buffer->transaction) {
4013 buffer->transaction->buffer = NULL;
4014 buffer->transaction = NULL;
4015 }
4016 binder_inner_proc_unlock(proc);
4017 if (buffer->async_transaction && buffer->target_node) {
4018 struct binder_node *buf_node;
4019 struct binder_work *w;
4020
4021 buf_node = buffer->target_node;
4022 binder_node_inner_lock(buf_node);
4023 BUG_ON(!buf_node->has_async_transaction);
4024 BUG_ON(buf_node->proc != proc);
4025 w = binder_dequeue_work_head_ilocked(
4026 &buf_node->async_todo);
4027 if (!w) {
4028 buf_node->has_async_transaction = false;
4029 } else {
4030 binder_enqueue_work_ilocked(
4031 w, &proc->todo);
4032 binder_wakeup_proc_ilocked(proc);
4033 }
4034 binder_node_inner_unlock(buf_node);
4035 }
4036 trace_binder_transaction_buffer_release(buffer);
4037 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4038 binder_alloc_free_buf(&proc->alloc, buffer);
4039 }
4040
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)4041 static int binder_thread_write(struct binder_proc *proc,
4042 struct binder_thread *thread,
4043 binder_uintptr_t binder_buffer, size_t size,
4044 binder_size_t *consumed)
4045 {
4046 uint32_t cmd;
4047 struct binder_context *context = proc->context;
4048 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4049 void __user *ptr = buffer + *consumed;
4050 void __user *end = buffer + size;
4051
4052 while (ptr < end && thread->return_error.cmd == BR_OK) {
4053 int ret;
4054
4055 if (get_user(cmd, (uint32_t __user *)ptr))
4056 return -EFAULT;
4057 ptr += sizeof(uint32_t);
4058 trace_binder_command(cmd);
4059 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4060 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4061 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4062 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4063 }
4064 switch (cmd) {
4065 case BC_INCREFS:
4066 case BC_ACQUIRE:
4067 case BC_RELEASE:
4068 case BC_DECREFS: {
4069 uint32_t target;
4070 const char *debug_string;
4071 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4072 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4073 struct binder_ref_data rdata;
4074
4075 if (get_user(target, (uint32_t __user *)ptr))
4076 return -EFAULT;
4077
4078 ptr += sizeof(uint32_t);
4079 ret = -1;
4080 if (increment && !target) {
4081 struct binder_node *ctx_mgr_node;
4082
4083 mutex_lock(&context->context_mgr_node_lock);
4084 ctx_mgr_node = context->binder_context_mgr_node;
4085 if (ctx_mgr_node) {
4086 if (ctx_mgr_node->proc == proc) {
4087 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4088 proc->pid, thread->pid);
4089 mutex_unlock(&context->context_mgr_node_lock);
4090 return -EINVAL;
4091 }
4092 ret = binder_inc_ref_for_node(
4093 proc, ctx_mgr_node,
4094 strong, NULL, &rdata);
4095 }
4096 mutex_unlock(&context->context_mgr_node_lock);
4097 }
4098 if (ret)
4099 ret = binder_update_ref_for_handle(
4100 proc, target, increment, strong,
4101 &rdata);
4102 if (!ret && rdata.desc != target) {
4103 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4104 proc->pid, thread->pid,
4105 target, rdata.desc);
4106 }
4107 switch (cmd) {
4108 case BC_INCREFS:
4109 debug_string = "IncRefs";
4110 break;
4111 case BC_ACQUIRE:
4112 debug_string = "Acquire";
4113 break;
4114 case BC_RELEASE:
4115 debug_string = "Release";
4116 break;
4117 case BC_DECREFS:
4118 default:
4119 debug_string = "DecRefs";
4120 break;
4121 }
4122 if (ret) {
4123 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4124 proc->pid, thread->pid, debug_string,
4125 strong, target, ret);
4126 break;
4127 }
4128 binder_debug(BINDER_DEBUG_USER_REFS,
4129 "%d:%d %s ref %d desc %d s %d w %d\n",
4130 proc->pid, thread->pid, debug_string,
4131 rdata.debug_id, rdata.desc, rdata.strong,
4132 rdata.weak);
4133 break;
4134 }
4135 case BC_INCREFS_DONE:
4136 case BC_ACQUIRE_DONE: {
4137 binder_uintptr_t node_ptr;
4138 binder_uintptr_t cookie;
4139 struct binder_node *node;
4140 bool free_node;
4141
4142 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4143 return -EFAULT;
4144 ptr += sizeof(binder_uintptr_t);
4145 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4146 return -EFAULT;
4147 ptr += sizeof(binder_uintptr_t);
4148 node = binder_get_node(proc, node_ptr);
4149 if (node == NULL) {
4150 binder_user_error("%d:%d %s u%016llx no match\n",
4151 proc->pid, thread->pid,
4152 cmd == BC_INCREFS_DONE ?
4153 "BC_INCREFS_DONE" :
4154 "BC_ACQUIRE_DONE",
4155 (u64)node_ptr);
4156 break;
4157 }
4158 if (cookie != node->cookie) {
4159 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4160 proc->pid, thread->pid,
4161 cmd == BC_INCREFS_DONE ?
4162 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4163 (u64)node_ptr, node->debug_id,
4164 (u64)cookie, (u64)node->cookie);
4165 binder_put_node(node);
4166 break;
4167 }
4168 binder_node_inner_lock(node);
4169 if (cmd == BC_ACQUIRE_DONE) {
4170 if (node->pending_strong_ref == 0) {
4171 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4172 proc->pid, thread->pid,
4173 node->debug_id);
4174 binder_node_inner_unlock(node);
4175 binder_put_node(node);
4176 break;
4177 }
4178 node->pending_strong_ref = 0;
4179 } else {
4180 if (node->pending_weak_ref == 0) {
4181 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4182 proc->pid, thread->pid,
4183 node->debug_id);
4184 binder_node_inner_unlock(node);
4185 binder_put_node(node);
4186 break;
4187 }
4188 node->pending_weak_ref = 0;
4189 }
4190 free_node = binder_dec_node_nilocked(node,
4191 cmd == BC_ACQUIRE_DONE, 0);
4192 WARN_ON(free_node);
4193 binder_debug(BINDER_DEBUG_USER_REFS,
4194 "%d:%d %s node %d ls %d lw %d tr %d\n",
4195 proc->pid, thread->pid,
4196 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4197 node->debug_id, node->local_strong_refs,
4198 node->local_weak_refs, node->tmp_refs);
4199 binder_node_inner_unlock(node);
4200 binder_put_node(node);
4201 break;
4202 }
4203 case BC_ATTEMPT_ACQUIRE:
4204 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4205 return -EINVAL;
4206 case BC_ACQUIRE_RESULT:
4207 pr_err("BC_ACQUIRE_RESULT not supported\n");
4208 return -EINVAL;
4209
4210 case BC_FREE_BUFFER: {
4211 binder_uintptr_t data_ptr;
4212 struct binder_buffer *buffer;
4213
4214 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4215 return -EFAULT;
4216 ptr += sizeof(binder_uintptr_t);
4217
4218 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4219 data_ptr);
4220 if (IS_ERR_OR_NULL(buffer)) {
4221 if (PTR_ERR(buffer) == -EPERM) {
4222 binder_user_error(
4223 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4224 proc->pid, thread->pid,
4225 (u64)data_ptr);
4226 } else {
4227 binder_user_error(
4228 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4229 proc->pid, thread->pid,
4230 (u64)data_ptr);
4231 }
4232 break;
4233 }
4234 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4235 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4236 proc->pid, thread->pid, (u64)data_ptr,
4237 buffer->debug_id,
4238 buffer->transaction ? "active" : "finished");
4239 binder_free_buf(proc, thread, buffer, false);
4240 break;
4241 }
4242
4243 case BC_TRANSACTION_SG:
4244 case BC_REPLY_SG: {
4245 struct binder_transaction_data_sg tr;
4246
4247 if (copy_from_user(&tr, ptr, sizeof(tr)))
4248 return -EFAULT;
4249 ptr += sizeof(tr);
4250 binder_transaction(proc, thread, &tr.transaction_data,
4251 cmd == BC_REPLY_SG, tr.buffers_size);
4252 break;
4253 }
4254 case BC_TRANSACTION:
4255 case BC_REPLY: {
4256 struct binder_transaction_data tr;
4257
4258 if (copy_from_user(&tr, ptr, sizeof(tr)))
4259 return -EFAULT;
4260 ptr += sizeof(tr);
4261 binder_transaction(proc, thread, &tr,
4262 cmd == BC_REPLY, 0);
4263 break;
4264 }
4265
4266 case BC_REGISTER_LOOPER:
4267 binder_debug(BINDER_DEBUG_THREADS,
4268 "%d:%d BC_REGISTER_LOOPER\n",
4269 proc->pid, thread->pid);
4270 binder_inner_proc_lock(proc);
4271 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4272 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4273 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4274 proc->pid, thread->pid);
4275 } else if (proc->requested_threads == 0) {
4276 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4277 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4278 proc->pid, thread->pid);
4279 } else {
4280 proc->requested_threads--;
4281 proc->requested_threads_started++;
4282 }
4283 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4284 binder_inner_proc_unlock(proc);
4285 break;
4286 case BC_ENTER_LOOPER:
4287 binder_debug(BINDER_DEBUG_THREADS,
4288 "%d:%d BC_ENTER_LOOPER\n",
4289 proc->pid, thread->pid);
4290 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4291 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4292 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4293 proc->pid, thread->pid);
4294 }
4295 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4296 break;
4297 case BC_EXIT_LOOPER:
4298 binder_debug(BINDER_DEBUG_THREADS,
4299 "%d:%d BC_EXIT_LOOPER\n",
4300 proc->pid, thread->pid);
4301 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4302 break;
4303
4304 case BC_REQUEST_DEATH_NOTIFICATION:
4305 case BC_CLEAR_DEATH_NOTIFICATION: {
4306 uint32_t target;
4307 binder_uintptr_t cookie;
4308 struct binder_ref *ref;
4309 struct binder_ref_death *death = NULL;
4310
4311 if (get_user(target, (uint32_t __user *)ptr))
4312 return -EFAULT;
4313 ptr += sizeof(uint32_t);
4314 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4315 return -EFAULT;
4316 ptr += sizeof(binder_uintptr_t);
4317 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4318 /*
4319 * Allocate memory for death notification
4320 * before taking lock
4321 */
4322 death = kzalloc(sizeof(*death), GFP_KERNEL);
4323 if (death == NULL) {
4324 WARN_ON(thread->return_error.cmd !=
4325 BR_OK);
4326 thread->return_error.cmd = BR_ERROR;
4327 binder_enqueue_thread_work(
4328 thread,
4329 &thread->return_error.work);
4330 binder_debug(
4331 BINDER_DEBUG_FAILED_TRANSACTION,
4332 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4333 proc->pid, thread->pid);
4334 break;
4335 }
4336 }
4337 binder_proc_lock(proc);
4338 ref = binder_get_ref_olocked(proc, target, false);
4339 if (ref == NULL) {
4340 binder_user_error("%d:%d %s invalid ref %d\n",
4341 proc->pid, thread->pid,
4342 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4343 "BC_REQUEST_DEATH_NOTIFICATION" :
4344 "BC_CLEAR_DEATH_NOTIFICATION",
4345 target);
4346 binder_proc_unlock(proc);
4347 kfree(death);
4348 break;
4349 }
4350
4351 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4352 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4353 proc->pid, thread->pid,
4354 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4355 "BC_REQUEST_DEATH_NOTIFICATION" :
4356 "BC_CLEAR_DEATH_NOTIFICATION",
4357 (u64)cookie, ref->data.debug_id,
4358 ref->data.desc, ref->data.strong,
4359 ref->data.weak, ref->node->debug_id);
4360
4361 binder_node_lock(ref->node);
4362 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4363 if (ref->death) {
4364 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4365 proc->pid, thread->pid);
4366 binder_node_unlock(ref->node);
4367 binder_proc_unlock(proc);
4368 kfree(death);
4369 break;
4370 }
4371 binder_stats_created(BINDER_STAT_DEATH);
4372 INIT_LIST_HEAD(&death->work.entry);
4373 death->cookie = cookie;
4374 ref->death = death;
4375 if (ref->node->proc == NULL) {
4376 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4377
4378 binder_inner_proc_lock(proc);
4379 binder_enqueue_work_ilocked(
4380 &ref->death->work, &proc->todo);
4381 binder_wakeup_proc_ilocked(proc);
4382 binder_inner_proc_unlock(proc);
4383 }
4384 } else {
4385 if (ref->death == NULL) {
4386 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4387 proc->pid, thread->pid);
4388 binder_node_unlock(ref->node);
4389 binder_proc_unlock(proc);
4390 break;
4391 }
4392 death = ref->death;
4393 if (death->cookie != cookie) {
4394 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4395 proc->pid, thread->pid,
4396 (u64)death->cookie,
4397 (u64)cookie);
4398 binder_node_unlock(ref->node);
4399 binder_proc_unlock(proc);
4400 break;
4401 }
4402 ref->death = NULL;
4403 binder_inner_proc_lock(proc);
4404 if (list_empty(&death->work.entry)) {
4405 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4406 if (thread->looper &
4407 (BINDER_LOOPER_STATE_REGISTERED |
4408 BINDER_LOOPER_STATE_ENTERED))
4409 binder_enqueue_thread_work_ilocked(
4410 thread,
4411 &death->work);
4412 else {
4413 binder_enqueue_work_ilocked(
4414 &death->work,
4415 &proc->todo);
4416 binder_wakeup_proc_ilocked(
4417 proc);
4418 }
4419 } else {
4420 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4421 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4422 }
4423 binder_inner_proc_unlock(proc);
4424 }
4425 binder_node_unlock(ref->node);
4426 binder_proc_unlock(proc);
4427 } break;
4428 case BC_DEAD_BINDER_DONE: {
4429 struct binder_work *w;
4430 binder_uintptr_t cookie;
4431 struct binder_ref_death *death = NULL;
4432
4433 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4434 return -EFAULT;
4435
4436 ptr += sizeof(cookie);
4437 binder_inner_proc_lock(proc);
4438 list_for_each_entry(w, &proc->delivered_death,
4439 entry) {
4440 struct binder_ref_death *tmp_death =
4441 container_of(w,
4442 struct binder_ref_death,
4443 work);
4444
4445 if (tmp_death->cookie == cookie) {
4446 death = tmp_death;
4447 break;
4448 }
4449 }
4450 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4451 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4452 proc->pid, thread->pid, (u64)cookie,
4453 death);
4454 if (death == NULL) {
4455 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4456 proc->pid, thread->pid, (u64)cookie);
4457 binder_inner_proc_unlock(proc);
4458 break;
4459 }
4460 binder_dequeue_work_ilocked(&death->work);
4461 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4462 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4463 if (thread->looper &
4464 (BINDER_LOOPER_STATE_REGISTERED |
4465 BINDER_LOOPER_STATE_ENTERED))
4466 binder_enqueue_thread_work_ilocked(
4467 thread, &death->work);
4468 else {
4469 binder_enqueue_work_ilocked(
4470 &death->work,
4471 &proc->todo);
4472 binder_wakeup_proc_ilocked(proc);
4473 }
4474 }
4475 binder_inner_proc_unlock(proc);
4476 } break;
4477
4478 case BC_REQUEST_FREEZE_NOTIFICATION: {
4479 struct binder_handle_cookie handle_cookie;
4480 int error;
4481
4482 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4483 return -EFAULT;
4484 ptr += sizeof(handle_cookie);
4485 error = binder_request_freeze_notification(proc, thread,
4486 &handle_cookie);
4487 if (error)
4488 return error;
4489 } break;
4490
4491 case BC_CLEAR_FREEZE_NOTIFICATION: {
4492 struct binder_handle_cookie handle_cookie;
4493 int error;
4494
4495 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4496 return -EFAULT;
4497 ptr += sizeof(handle_cookie);
4498 error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4499 if (error)
4500 return error;
4501 } break;
4502
4503 case BC_FREEZE_NOTIFICATION_DONE: {
4504 binder_uintptr_t cookie;
4505 int error;
4506
4507 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4508 return -EFAULT;
4509
4510 ptr += sizeof(cookie);
4511 error = binder_freeze_notification_done(proc, thread, cookie);
4512 if (error)
4513 return error;
4514 } break;
4515
4516 default:
4517 pr_err("%d:%d unknown command %u\n",
4518 proc->pid, thread->pid, cmd);
4519 return -EINVAL;
4520 }
4521 *consumed = ptr - buffer;
4522 }
4523 return 0;
4524 }
4525
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4526 static void binder_stat_br(struct binder_proc *proc,
4527 struct binder_thread *thread, uint32_t cmd)
4528 {
4529 trace_binder_return(cmd);
4530 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4531 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4532 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4533 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4534 }
4535 }
4536
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4537 static int binder_put_node_cmd(struct binder_proc *proc,
4538 struct binder_thread *thread,
4539 void __user **ptrp,
4540 binder_uintptr_t node_ptr,
4541 binder_uintptr_t node_cookie,
4542 int node_debug_id,
4543 uint32_t cmd, const char *cmd_name)
4544 {
4545 void __user *ptr = *ptrp;
4546
4547 if (put_user(cmd, (uint32_t __user *)ptr))
4548 return -EFAULT;
4549 ptr += sizeof(uint32_t);
4550
4551 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4552 return -EFAULT;
4553 ptr += sizeof(binder_uintptr_t);
4554
4555 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4556 return -EFAULT;
4557 ptr += sizeof(binder_uintptr_t);
4558
4559 binder_stat_br(proc, thread, cmd);
4560 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4561 proc->pid, thread->pid, cmd_name, node_debug_id,
4562 (u64)node_ptr, (u64)node_cookie);
4563
4564 *ptrp = ptr;
4565 return 0;
4566 }
4567
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4568 static int binder_wait_for_work(struct binder_thread *thread,
4569 bool do_proc_work)
4570 {
4571 DEFINE_WAIT(wait);
4572 struct binder_proc *proc = thread->proc;
4573 int ret = 0;
4574
4575 binder_inner_proc_lock(proc);
4576 for (;;) {
4577 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4578 if (binder_has_work_ilocked(thread, do_proc_work))
4579 break;
4580 if (do_proc_work)
4581 list_add(&thread->waiting_thread_node,
4582 &proc->waiting_threads);
4583 binder_inner_proc_unlock(proc);
4584 schedule();
4585 binder_inner_proc_lock(proc);
4586 list_del_init(&thread->waiting_thread_node);
4587 if (signal_pending(current)) {
4588 ret = -EINTR;
4589 break;
4590 }
4591 }
4592 finish_wait(&thread->wait, &wait);
4593 binder_inner_proc_unlock(proc);
4594
4595 return ret;
4596 }
4597
4598 /**
4599 * binder_apply_fd_fixups() - finish fd translation
4600 * @proc: binder_proc associated @t->buffer
4601 * @t: binder transaction with list of fd fixups
4602 *
4603 * Now that we are in the context of the transaction target
4604 * process, we can allocate and install fds. Process the
4605 * list of fds to translate and fixup the buffer with the
4606 * new fds first and only then install the files.
4607 *
4608 * If we fail to allocate an fd, skip the install and release
4609 * any fds that have already been allocated.
4610 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4611 static int binder_apply_fd_fixups(struct binder_proc *proc,
4612 struct binder_transaction *t)
4613 {
4614 struct binder_txn_fd_fixup *fixup, *tmp;
4615 int ret = 0;
4616
4617 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4618 int fd = get_unused_fd_flags(O_CLOEXEC);
4619
4620 if (fd < 0) {
4621 binder_debug(BINDER_DEBUG_TRANSACTION,
4622 "failed fd fixup txn %d fd %d\n",
4623 t->debug_id, fd);
4624 ret = -ENOMEM;
4625 goto err;
4626 }
4627 binder_debug(BINDER_DEBUG_TRANSACTION,
4628 "fd fixup txn %d fd %d\n",
4629 t->debug_id, fd);
4630 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4631 fixup->target_fd = fd;
4632 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4633 fixup->offset, &fd,
4634 sizeof(u32))) {
4635 ret = -EINVAL;
4636 goto err;
4637 }
4638 }
4639 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4640 fd_install(fixup->target_fd, fixup->file);
4641 list_del(&fixup->fixup_entry);
4642 kfree(fixup);
4643 }
4644
4645 return ret;
4646
4647 err:
4648 binder_free_txn_fixups(t);
4649 return ret;
4650 }
4651
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4652 static int binder_thread_read(struct binder_proc *proc,
4653 struct binder_thread *thread,
4654 binder_uintptr_t binder_buffer, size_t size,
4655 binder_size_t *consumed, int non_block)
4656 {
4657 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4658 void __user *ptr = buffer + *consumed;
4659 void __user *end = buffer + size;
4660
4661 int ret = 0;
4662 int wait_for_proc_work;
4663
4664 if (*consumed == 0) {
4665 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4666 return -EFAULT;
4667 ptr += sizeof(uint32_t);
4668 }
4669
4670 retry:
4671 binder_inner_proc_lock(proc);
4672 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4673 binder_inner_proc_unlock(proc);
4674
4675 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4676
4677 trace_binder_wait_for_work(wait_for_proc_work,
4678 !!thread->transaction_stack,
4679 !binder_worklist_empty(proc, &thread->todo));
4680 if (wait_for_proc_work) {
4681 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4682 BINDER_LOOPER_STATE_ENTERED))) {
4683 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4684 proc->pid, thread->pid, thread->looper);
4685 wait_event_interruptible(binder_user_error_wait,
4686 binder_stop_on_user_error < 2);
4687 }
4688 binder_set_nice(proc->default_priority);
4689 }
4690
4691 if (non_block) {
4692 if (!binder_has_work(thread, wait_for_proc_work))
4693 ret = -EAGAIN;
4694 } else {
4695 ret = binder_wait_for_work(thread, wait_for_proc_work);
4696 }
4697
4698 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4699
4700 if (ret)
4701 return ret;
4702
4703 while (1) {
4704 uint32_t cmd;
4705 struct binder_transaction_data_secctx tr;
4706 struct binder_transaction_data *trd = &tr.transaction_data;
4707 struct binder_work *w = NULL;
4708 struct list_head *list = NULL;
4709 struct binder_transaction *t = NULL;
4710 struct binder_thread *t_from;
4711 size_t trsize = sizeof(*trd);
4712
4713 binder_inner_proc_lock(proc);
4714 if (!binder_worklist_empty_ilocked(&thread->todo))
4715 list = &thread->todo;
4716 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4717 wait_for_proc_work)
4718 list = &proc->todo;
4719 else {
4720 binder_inner_proc_unlock(proc);
4721
4722 /* no data added */
4723 if (ptr - buffer == 4 && !thread->looper_need_return)
4724 goto retry;
4725 break;
4726 }
4727
4728 if (end - ptr < sizeof(tr) + 4) {
4729 binder_inner_proc_unlock(proc);
4730 break;
4731 }
4732 w = binder_dequeue_work_head_ilocked(list);
4733 if (binder_worklist_empty_ilocked(&thread->todo))
4734 thread->process_todo = false;
4735
4736 switch (w->type) {
4737 case BINDER_WORK_TRANSACTION: {
4738 binder_inner_proc_unlock(proc);
4739 t = container_of(w, struct binder_transaction, work);
4740 } break;
4741 case BINDER_WORK_RETURN_ERROR: {
4742 struct binder_error *e = container_of(
4743 w, struct binder_error, work);
4744
4745 WARN_ON(e->cmd == BR_OK);
4746 binder_inner_proc_unlock(proc);
4747 if (put_user(e->cmd, (uint32_t __user *)ptr))
4748 return -EFAULT;
4749 cmd = e->cmd;
4750 e->cmd = BR_OK;
4751 ptr += sizeof(uint32_t);
4752
4753 binder_stat_br(proc, thread, cmd);
4754 } break;
4755 case BINDER_WORK_TRANSACTION_COMPLETE:
4756 case BINDER_WORK_TRANSACTION_PENDING:
4757 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4758 if (proc->oneway_spam_detection_enabled &&
4759 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4760 cmd = BR_ONEWAY_SPAM_SUSPECT;
4761 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4762 cmd = BR_TRANSACTION_PENDING_FROZEN;
4763 else
4764 cmd = BR_TRANSACTION_COMPLETE;
4765 binder_inner_proc_unlock(proc);
4766 kfree(w);
4767 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4768 if (put_user(cmd, (uint32_t __user *)ptr))
4769 return -EFAULT;
4770 ptr += sizeof(uint32_t);
4771
4772 binder_stat_br(proc, thread, cmd);
4773 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4774 "%d:%d BR_TRANSACTION_COMPLETE\n",
4775 proc->pid, thread->pid);
4776 } break;
4777 case BINDER_WORK_NODE: {
4778 struct binder_node *node = container_of(w, struct binder_node, work);
4779 int strong, weak;
4780 binder_uintptr_t node_ptr = node->ptr;
4781 binder_uintptr_t node_cookie = node->cookie;
4782 int node_debug_id = node->debug_id;
4783 int has_weak_ref;
4784 int has_strong_ref;
4785 void __user *orig_ptr = ptr;
4786
4787 BUG_ON(proc != node->proc);
4788 strong = node->internal_strong_refs ||
4789 node->local_strong_refs;
4790 weak = !hlist_empty(&node->refs) ||
4791 node->local_weak_refs ||
4792 node->tmp_refs || strong;
4793 has_strong_ref = node->has_strong_ref;
4794 has_weak_ref = node->has_weak_ref;
4795
4796 if (weak && !has_weak_ref) {
4797 node->has_weak_ref = 1;
4798 node->pending_weak_ref = 1;
4799 node->local_weak_refs++;
4800 }
4801 if (strong && !has_strong_ref) {
4802 node->has_strong_ref = 1;
4803 node->pending_strong_ref = 1;
4804 node->local_strong_refs++;
4805 }
4806 if (!strong && has_strong_ref)
4807 node->has_strong_ref = 0;
4808 if (!weak && has_weak_ref)
4809 node->has_weak_ref = 0;
4810 if (!weak && !strong) {
4811 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4812 "%d:%d node %d u%016llx c%016llx deleted\n",
4813 proc->pid, thread->pid,
4814 node_debug_id,
4815 (u64)node_ptr,
4816 (u64)node_cookie);
4817 rb_erase(&node->rb_node, &proc->nodes);
4818 binder_inner_proc_unlock(proc);
4819 binder_node_lock(node);
4820 /*
4821 * Acquire the node lock before freeing the
4822 * node to serialize with other threads that
4823 * may have been holding the node lock while
4824 * decrementing this node (avoids race where
4825 * this thread frees while the other thread
4826 * is unlocking the node after the final
4827 * decrement)
4828 */
4829 binder_node_unlock(node);
4830 binder_free_node(node);
4831 } else
4832 binder_inner_proc_unlock(proc);
4833
4834 if (weak && !has_weak_ref)
4835 ret = binder_put_node_cmd(
4836 proc, thread, &ptr, node_ptr,
4837 node_cookie, node_debug_id,
4838 BR_INCREFS, "BR_INCREFS");
4839 if (!ret && strong && !has_strong_ref)
4840 ret = binder_put_node_cmd(
4841 proc, thread, &ptr, node_ptr,
4842 node_cookie, node_debug_id,
4843 BR_ACQUIRE, "BR_ACQUIRE");
4844 if (!ret && !strong && has_strong_ref)
4845 ret = binder_put_node_cmd(
4846 proc, thread, &ptr, node_ptr,
4847 node_cookie, node_debug_id,
4848 BR_RELEASE, "BR_RELEASE");
4849 if (!ret && !weak && has_weak_ref)
4850 ret = binder_put_node_cmd(
4851 proc, thread, &ptr, node_ptr,
4852 node_cookie, node_debug_id,
4853 BR_DECREFS, "BR_DECREFS");
4854 if (orig_ptr == ptr)
4855 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4856 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4857 proc->pid, thread->pid,
4858 node_debug_id,
4859 (u64)node_ptr,
4860 (u64)node_cookie);
4861 if (ret)
4862 return ret;
4863 } break;
4864 case BINDER_WORK_DEAD_BINDER:
4865 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4866 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4867 struct binder_ref_death *death;
4868 uint32_t cmd;
4869 binder_uintptr_t cookie;
4870
4871 death = container_of(w, struct binder_ref_death, work);
4872 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4873 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4874 else
4875 cmd = BR_DEAD_BINDER;
4876 cookie = death->cookie;
4877
4878 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4879 "%d:%d %s %016llx\n",
4880 proc->pid, thread->pid,
4881 cmd == BR_DEAD_BINDER ?
4882 "BR_DEAD_BINDER" :
4883 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4884 (u64)cookie);
4885 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4886 binder_inner_proc_unlock(proc);
4887 kfree(death);
4888 binder_stats_deleted(BINDER_STAT_DEATH);
4889 } else {
4890 binder_enqueue_work_ilocked(
4891 w, &proc->delivered_death);
4892 binder_inner_proc_unlock(proc);
4893 }
4894 if (put_user(cmd, (uint32_t __user *)ptr))
4895 return -EFAULT;
4896 ptr += sizeof(uint32_t);
4897 if (put_user(cookie,
4898 (binder_uintptr_t __user *)ptr))
4899 return -EFAULT;
4900 ptr += sizeof(binder_uintptr_t);
4901 binder_stat_br(proc, thread, cmd);
4902 if (cmd == BR_DEAD_BINDER)
4903 goto done; /* DEAD_BINDER notifications can cause transactions */
4904 } break;
4905
4906 case BINDER_WORK_FROZEN_BINDER: {
4907 struct binder_ref_freeze *freeze;
4908 struct binder_frozen_state_info info;
4909
4910 memset(&info, 0, sizeof(info));
4911 freeze = container_of(w, struct binder_ref_freeze, work);
4912 info.is_frozen = freeze->is_frozen;
4913 info.cookie = freeze->cookie;
4914 freeze->sent = true;
4915 binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4916 binder_inner_proc_unlock(proc);
4917
4918 if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4919 return -EFAULT;
4920 ptr += sizeof(uint32_t);
4921 if (copy_to_user(ptr, &info, sizeof(info)))
4922 return -EFAULT;
4923 ptr += sizeof(info);
4924 binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4925 goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4926 } break;
4927
4928 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4929 struct binder_ref_freeze *freeze =
4930 container_of(w, struct binder_ref_freeze, work);
4931 binder_uintptr_t cookie = freeze->cookie;
4932
4933 binder_inner_proc_unlock(proc);
4934 kfree(freeze);
4935 binder_stats_deleted(BINDER_STAT_FREEZE);
4936 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4937 return -EFAULT;
4938 ptr += sizeof(uint32_t);
4939 if (put_user(cookie, (binder_uintptr_t __user *)ptr))
4940 return -EFAULT;
4941 ptr += sizeof(binder_uintptr_t);
4942 binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
4943 } break;
4944
4945 default:
4946 binder_inner_proc_unlock(proc);
4947 pr_err("%d:%d: bad work type %d\n",
4948 proc->pid, thread->pid, w->type);
4949 break;
4950 }
4951
4952 if (!t)
4953 continue;
4954
4955 BUG_ON(t->buffer == NULL);
4956 if (t->buffer->target_node) {
4957 struct binder_node *target_node = t->buffer->target_node;
4958
4959 trd->target.ptr = target_node->ptr;
4960 trd->cookie = target_node->cookie;
4961 t->saved_priority = task_nice(current);
4962 if (t->priority < target_node->min_priority &&
4963 !(t->flags & TF_ONE_WAY))
4964 binder_set_nice(t->priority);
4965 else if (!(t->flags & TF_ONE_WAY) ||
4966 t->saved_priority > target_node->min_priority)
4967 binder_set_nice(target_node->min_priority);
4968 cmd = BR_TRANSACTION;
4969 } else {
4970 trd->target.ptr = 0;
4971 trd->cookie = 0;
4972 cmd = BR_REPLY;
4973 }
4974 trd->code = t->code;
4975 trd->flags = t->flags;
4976 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4977
4978 t_from = binder_get_txn_from(t);
4979 if (t_from) {
4980 struct task_struct *sender = t_from->proc->tsk;
4981
4982 trd->sender_pid =
4983 task_tgid_nr_ns(sender,
4984 task_active_pid_ns(current));
4985 } else {
4986 trd->sender_pid = 0;
4987 }
4988
4989 ret = binder_apply_fd_fixups(proc, t);
4990 if (ret) {
4991 struct binder_buffer *buffer = t->buffer;
4992 bool oneway = !!(t->flags & TF_ONE_WAY);
4993 int tid = t->debug_id;
4994
4995 if (t_from)
4996 binder_thread_dec_tmpref(t_from);
4997 buffer->transaction = NULL;
4998 binder_cleanup_transaction(t, "fd fixups failed",
4999 BR_FAILED_REPLY);
5000 binder_free_buf(proc, thread, buffer, true);
5001 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5002 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5003 proc->pid, thread->pid,
5004 oneway ? "async " :
5005 (cmd == BR_REPLY ? "reply " : ""),
5006 tid, BR_FAILED_REPLY, ret, __LINE__);
5007 if (cmd == BR_REPLY) {
5008 cmd = BR_FAILED_REPLY;
5009 if (put_user(cmd, (uint32_t __user *)ptr))
5010 return -EFAULT;
5011 ptr += sizeof(uint32_t);
5012 binder_stat_br(proc, thread, cmd);
5013 break;
5014 }
5015 continue;
5016 }
5017 trd->data_size = t->buffer->data_size;
5018 trd->offsets_size = t->buffer->offsets_size;
5019 trd->data.ptr.buffer = t->buffer->user_data;
5020 trd->data.ptr.offsets = trd->data.ptr.buffer +
5021 ALIGN(t->buffer->data_size,
5022 sizeof(void *));
5023
5024 tr.secctx = t->security_ctx;
5025 if (t->security_ctx) {
5026 cmd = BR_TRANSACTION_SEC_CTX;
5027 trsize = sizeof(tr);
5028 }
5029 if (put_user(cmd, (uint32_t __user *)ptr)) {
5030 if (t_from)
5031 binder_thread_dec_tmpref(t_from);
5032
5033 binder_cleanup_transaction(t, "put_user failed",
5034 BR_FAILED_REPLY);
5035
5036 return -EFAULT;
5037 }
5038 ptr += sizeof(uint32_t);
5039 if (copy_to_user(ptr, &tr, trsize)) {
5040 if (t_from)
5041 binder_thread_dec_tmpref(t_from);
5042
5043 binder_cleanup_transaction(t, "copy_to_user failed",
5044 BR_FAILED_REPLY);
5045
5046 return -EFAULT;
5047 }
5048 ptr += trsize;
5049
5050 trace_binder_transaction_received(t);
5051 binder_stat_br(proc, thread, cmd);
5052 binder_debug(BINDER_DEBUG_TRANSACTION,
5053 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
5054 proc->pid, thread->pid,
5055 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5056 (cmd == BR_TRANSACTION_SEC_CTX) ?
5057 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5058 t->debug_id, t_from ? t_from->proc->pid : 0,
5059 t_from ? t_from->pid : 0, cmd,
5060 t->buffer->data_size, t->buffer->offsets_size,
5061 (u64)trd->data.ptr.buffer,
5062 (u64)trd->data.ptr.offsets);
5063
5064 if (t_from)
5065 binder_thread_dec_tmpref(t_from);
5066 t->buffer->allow_user_free = 1;
5067 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5068 binder_inner_proc_lock(thread->proc);
5069 t->to_parent = thread->transaction_stack;
5070 t->to_thread = thread;
5071 thread->transaction_stack = t;
5072 binder_inner_proc_unlock(thread->proc);
5073 } else {
5074 binder_free_transaction(t);
5075 }
5076 break;
5077 }
5078
5079 done:
5080
5081 *consumed = ptr - buffer;
5082 binder_inner_proc_lock(proc);
5083 if (proc->requested_threads == 0 &&
5084 list_empty(&thread->proc->waiting_threads) &&
5085 proc->requested_threads_started < proc->max_threads &&
5086 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5087 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5088 /*spawn a new thread if we leave this out */) {
5089 proc->requested_threads++;
5090 binder_inner_proc_unlock(proc);
5091 binder_debug(BINDER_DEBUG_THREADS,
5092 "%d:%d BR_SPAWN_LOOPER\n",
5093 proc->pid, thread->pid);
5094 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5095 return -EFAULT;
5096 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5097 } else
5098 binder_inner_proc_unlock(proc);
5099 return 0;
5100 }
5101
binder_release_work(struct binder_proc * proc,struct list_head * list)5102 static void binder_release_work(struct binder_proc *proc,
5103 struct list_head *list)
5104 {
5105 struct binder_work *w;
5106 enum binder_work_type wtype;
5107
5108 while (1) {
5109 binder_inner_proc_lock(proc);
5110 w = binder_dequeue_work_head_ilocked(list);
5111 wtype = w ? w->type : 0;
5112 binder_inner_proc_unlock(proc);
5113 if (!w)
5114 return;
5115
5116 switch (wtype) {
5117 case BINDER_WORK_TRANSACTION: {
5118 struct binder_transaction *t;
5119
5120 t = container_of(w, struct binder_transaction, work);
5121
5122 binder_cleanup_transaction(t, "process died.",
5123 BR_DEAD_REPLY);
5124 } break;
5125 case BINDER_WORK_RETURN_ERROR: {
5126 struct binder_error *e = container_of(
5127 w, struct binder_error, work);
5128
5129 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5130 "undelivered TRANSACTION_ERROR: %u\n",
5131 e->cmd);
5132 } break;
5133 case BINDER_WORK_TRANSACTION_PENDING:
5134 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5135 case BINDER_WORK_TRANSACTION_COMPLETE: {
5136 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5137 "undelivered TRANSACTION_COMPLETE\n");
5138 kfree(w);
5139 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5140 } break;
5141 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5142 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5143 struct binder_ref_death *death;
5144
5145 death = container_of(w, struct binder_ref_death, work);
5146 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5147 "undelivered death notification, %016llx\n",
5148 (u64)death->cookie);
5149 kfree(death);
5150 binder_stats_deleted(BINDER_STAT_DEATH);
5151 } break;
5152 case BINDER_WORK_NODE:
5153 break;
5154 default:
5155 pr_err("unexpected work type, %d, not freed\n",
5156 wtype);
5157 break;
5158 }
5159 }
5160
5161 }
5162
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)5163 static struct binder_thread *binder_get_thread_ilocked(
5164 struct binder_proc *proc, struct binder_thread *new_thread)
5165 {
5166 struct binder_thread *thread = NULL;
5167 struct rb_node *parent = NULL;
5168 struct rb_node **p = &proc->threads.rb_node;
5169
5170 while (*p) {
5171 parent = *p;
5172 thread = rb_entry(parent, struct binder_thread, rb_node);
5173
5174 if (current->pid < thread->pid)
5175 p = &(*p)->rb_left;
5176 else if (current->pid > thread->pid)
5177 p = &(*p)->rb_right;
5178 else
5179 return thread;
5180 }
5181 if (!new_thread)
5182 return NULL;
5183 thread = new_thread;
5184 binder_stats_created(BINDER_STAT_THREAD);
5185 thread->proc = proc;
5186 thread->pid = current->pid;
5187 atomic_set(&thread->tmp_ref, 0);
5188 init_waitqueue_head(&thread->wait);
5189 INIT_LIST_HEAD(&thread->todo);
5190 rb_link_node(&thread->rb_node, parent, p);
5191 rb_insert_color(&thread->rb_node, &proc->threads);
5192 thread->looper_need_return = true;
5193 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5194 thread->return_error.cmd = BR_OK;
5195 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5196 thread->reply_error.cmd = BR_OK;
5197 thread->ee.command = BR_OK;
5198 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5199 return thread;
5200 }
5201
binder_get_thread(struct binder_proc * proc)5202 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5203 {
5204 struct binder_thread *thread;
5205 struct binder_thread *new_thread;
5206
5207 binder_inner_proc_lock(proc);
5208 thread = binder_get_thread_ilocked(proc, NULL);
5209 binder_inner_proc_unlock(proc);
5210 if (!thread) {
5211 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5212 if (new_thread == NULL)
5213 return NULL;
5214 binder_inner_proc_lock(proc);
5215 thread = binder_get_thread_ilocked(proc, new_thread);
5216 binder_inner_proc_unlock(proc);
5217 if (thread != new_thread)
5218 kfree(new_thread);
5219 }
5220 return thread;
5221 }
5222
binder_free_proc(struct binder_proc * proc)5223 static void binder_free_proc(struct binder_proc *proc)
5224 {
5225 struct binder_device *device;
5226
5227 BUG_ON(!list_empty(&proc->todo));
5228 BUG_ON(!list_empty(&proc->delivered_death));
5229 if (proc->outstanding_txns)
5230 pr_warn("%s: Unexpected outstanding_txns %d\n",
5231 __func__, proc->outstanding_txns);
5232 device = container_of(proc->context, struct binder_device, context);
5233 if (refcount_dec_and_test(&device->ref)) {
5234 kfree(proc->context->name);
5235 kfree(device);
5236 }
5237 binder_alloc_deferred_release(&proc->alloc);
5238 put_task_struct(proc->tsk);
5239 put_cred(proc->cred);
5240 binder_stats_deleted(BINDER_STAT_PROC);
5241 dbitmap_free(&proc->dmap);
5242 kfree(proc);
5243 }
5244
binder_free_thread(struct binder_thread * thread)5245 static void binder_free_thread(struct binder_thread *thread)
5246 {
5247 BUG_ON(!list_empty(&thread->todo));
5248 binder_stats_deleted(BINDER_STAT_THREAD);
5249 binder_proc_dec_tmpref(thread->proc);
5250 kfree(thread);
5251 }
5252
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5253 static int binder_thread_release(struct binder_proc *proc,
5254 struct binder_thread *thread)
5255 {
5256 struct binder_transaction *t;
5257 struct binder_transaction *send_reply = NULL;
5258 int active_transactions = 0;
5259 struct binder_transaction *last_t = NULL;
5260
5261 binder_inner_proc_lock(thread->proc);
5262 /*
5263 * take a ref on the proc so it survives
5264 * after we remove this thread from proc->threads.
5265 * The corresponding dec is when we actually
5266 * free the thread in binder_free_thread()
5267 */
5268 proc->tmp_ref++;
5269 /*
5270 * take a ref on this thread to ensure it
5271 * survives while we are releasing it
5272 */
5273 atomic_inc(&thread->tmp_ref);
5274 rb_erase(&thread->rb_node, &proc->threads);
5275 t = thread->transaction_stack;
5276 if (t) {
5277 spin_lock(&t->lock);
5278 if (t->to_thread == thread)
5279 send_reply = t;
5280 } else {
5281 __acquire(&t->lock);
5282 }
5283 thread->is_dead = true;
5284
5285 while (t) {
5286 last_t = t;
5287 active_transactions++;
5288 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5289 "release %d:%d transaction %d %s, still active\n",
5290 proc->pid, thread->pid,
5291 t->debug_id,
5292 (t->to_thread == thread) ? "in" : "out");
5293
5294 if (t->to_thread == thread) {
5295 thread->proc->outstanding_txns--;
5296 t->to_proc = NULL;
5297 t->to_thread = NULL;
5298 if (t->buffer) {
5299 t->buffer->transaction = NULL;
5300 t->buffer = NULL;
5301 }
5302 t = t->to_parent;
5303 } else if (t->from == thread) {
5304 t->from = NULL;
5305 t = t->from_parent;
5306 } else
5307 BUG();
5308 spin_unlock(&last_t->lock);
5309 if (t)
5310 spin_lock(&t->lock);
5311 else
5312 __acquire(&t->lock);
5313 }
5314 /* annotation for sparse, lock not acquired in last iteration above */
5315 __release(&t->lock);
5316
5317 /*
5318 * If this thread used poll, make sure we remove the waitqueue from any
5319 * poll data structures holding it.
5320 */
5321 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5322 wake_up_pollfree(&thread->wait);
5323
5324 binder_inner_proc_unlock(thread->proc);
5325
5326 /*
5327 * This is needed to avoid races between wake_up_pollfree() above and
5328 * someone else removing the last entry from the queue for other reasons
5329 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5330 * descriptor being closed). Such other users hold an RCU read lock, so
5331 * we can be sure they're done after we call synchronize_rcu().
5332 */
5333 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5334 synchronize_rcu();
5335
5336 if (send_reply)
5337 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5338 binder_release_work(proc, &thread->todo);
5339 binder_thread_dec_tmpref(thread);
5340 return active_transactions;
5341 }
5342
binder_poll(struct file * filp,struct poll_table_struct * wait)5343 static __poll_t binder_poll(struct file *filp,
5344 struct poll_table_struct *wait)
5345 {
5346 struct binder_proc *proc = filp->private_data;
5347 struct binder_thread *thread = NULL;
5348 bool wait_for_proc_work;
5349
5350 thread = binder_get_thread(proc);
5351 if (!thread)
5352 return EPOLLERR;
5353
5354 binder_inner_proc_lock(thread->proc);
5355 thread->looper |= BINDER_LOOPER_STATE_POLL;
5356 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5357
5358 binder_inner_proc_unlock(thread->proc);
5359
5360 poll_wait(filp, &thread->wait, wait);
5361
5362 if (binder_has_work(thread, wait_for_proc_work))
5363 return EPOLLIN;
5364
5365 return 0;
5366 }
5367
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5368 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5369 struct binder_thread *thread)
5370 {
5371 int ret = 0;
5372 struct binder_proc *proc = filp->private_data;
5373 void __user *ubuf = (void __user *)arg;
5374 struct binder_write_read bwr;
5375
5376 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5377 ret = -EFAULT;
5378 goto out;
5379 }
5380 binder_debug(BINDER_DEBUG_READ_WRITE,
5381 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5382 proc->pid, thread->pid,
5383 (u64)bwr.write_size, (u64)bwr.write_buffer,
5384 (u64)bwr.read_size, (u64)bwr.read_buffer);
5385
5386 if (bwr.write_size > 0) {
5387 ret = binder_thread_write(proc, thread,
5388 bwr.write_buffer,
5389 bwr.write_size,
5390 &bwr.write_consumed);
5391 trace_binder_write_done(ret);
5392 if (ret < 0) {
5393 bwr.read_consumed = 0;
5394 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5395 ret = -EFAULT;
5396 goto out;
5397 }
5398 }
5399 if (bwr.read_size > 0) {
5400 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5401 bwr.read_size,
5402 &bwr.read_consumed,
5403 filp->f_flags & O_NONBLOCK);
5404 trace_binder_read_done(ret);
5405 binder_inner_proc_lock(proc);
5406 if (!binder_worklist_empty_ilocked(&proc->todo))
5407 binder_wakeup_proc_ilocked(proc);
5408 binder_inner_proc_unlock(proc);
5409 if (ret < 0) {
5410 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5411 ret = -EFAULT;
5412 goto out;
5413 }
5414 }
5415 binder_debug(BINDER_DEBUG_READ_WRITE,
5416 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5417 proc->pid, thread->pid,
5418 (u64)bwr.write_consumed, (u64)bwr.write_size,
5419 (u64)bwr.read_consumed, (u64)bwr.read_size);
5420 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5421 ret = -EFAULT;
5422 goto out;
5423 }
5424 out:
5425 return ret;
5426 }
5427
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5428 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5429 struct flat_binder_object *fbo)
5430 {
5431 int ret = 0;
5432 struct binder_proc *proc = filp->private_data;
5433 struct binder_context *context = proc->context;
5434 struct binder_node *new_node;
5435 kuid_t curr_euid = current_euid();
5436
5437 mutex_lock(&context->context_mgr_node_lock);
5438 if (context->binder_context_mgr_node) {
5439 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5440 ret = -EBUSY;
5441 goto out;
5442 }
5443 ret = security_binder_set_context_mgr(proc->cred);
5444 if (ret < 0)
5445 goto out;
5446 if (uid_valid(context->binder_context_mgr_uid)) {
5447 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5448 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5449 from_kuid(&init_user_ns, curr_euid),
5450 from_kuid(&init_user_ns,
5451 context->binder_context_mgr_uid));
5452 ret = -EPERM;
5453 goto out;
5454 }
5455 } else {
5456 context->binder_context_mgr_uid = curr_euid;
5457 }
5458 new_node = binder_new_node(proc, fbo);
5459 if (!new_node) {
5460 ret = -ENOMEM;
5461 goto out;
5462 }
5463 binder_node_lock(new_node);
5464 new_node->local_weak_refs++;
5465 new_node->local_strong_refs++;
5466 new_node->has_strong_ref = 1;
5467 new_node->has_weak_ref = 1;
5468 context->binder_context_mgr_node = new_node;
5469 binder_node_unlock(new_node);
5470 binder_put_node(new_node);
5471 out:
5472 mutex_unlock(&context->context_mgr_node_lock);
5473 return ret;
5474 }
5475
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5476 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5477 struct binder_node_info_for_ref *info)
5478 {
5479 struct binder_node *node;
5480 struct binder_context *context = proc->context;
5481 __u32 handle = info->handle;
5482
5483 if (info->strong_count || info->weak_count || info->reserved1 ||
5484 info->reserved2 || info->reserved3) {
5485 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5486 proc->pid);
5487 return -EINVAL;
5488 }
5489
5490 /* This ioctl may only be used by the context manager */
5491 mutex_lock(&context->context_mgr_node_lock);
5492 if (!context->binder_context_mgr_node ||
5493 context->binder_context_mgr_node->proc != proc) {
5494 mutex_unlock(&context->context_mgr_node_lock);
5495 return -EPERM;
5496 }
5497 mutex_unlock(&context->context_mgr_node_lock);
5498
5499 node = binder_get_node_from_ref(proc, handle, true, NULL);
5500 if (!node)
5501 return -EINVAL;
5502
5503 info->strong_count = node->local_strong_refs +
5504 node->internal_strong_refs;
5505 info->weak_count = node->local_weak_refs;
5506
5507 binder_put_node(node);
5508
5509 return 0;
5510 }
5511
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5512 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5513 struct binder_node_debug_info *info)
5514 {
5515 struct rb_node *n;
5516 binder_uintptr_t ptr = info->ptr;
5517
5518 memset(info, 0, sizeof(*info));
5519
5520 binder_inner_proc_lock(proc);
5521 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5522 struct binder_node *node = rb_entry(n, struct binder_node,
5523 rb_node);
5524 if (node->ptr > ptr) {
5525 info->ptr = node->ptr;
5526 info->cookie = node->cookie;
5527 info->has_strong_ref = node->has_strong_ref;
5528 info->has_weak_ref = node->has_weak_ref;
5529 break;
5530 }
5531 }
5532 binder_inner_proc_unlock(proc);
5533
5534 return 0;
5535 }
5536
binder_txns_pending_ilocked(struct binder_proc * proc)5537 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5538 {
5539 struct rb_node *n;
5540 struct binder_thread *thread;
5541
5542 if (proc->outstanding_txns > 0)
5543 return true;
5544
5545 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5546 thread = rb_entry(n, struct binder_thread, rb_node);
5547 if (thread->transaction_stack)
5548 return true;
5549 }
5550 return false;
5551 }
5552
binder_add_freeze_work(struct binder_proc * proc,bool is_frozen)5553 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5554 {
5555 struct rb_node *n;
5556 struct binder_ref *ref;
5557
5558 binder_inner_proc_lock(proc);
5559 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5560 struct binder_node *node;
5561
5562 node = rb_entry(n, struct binder_node, rb_node);
5563 binder_inner_proc_unlock(proc);
5564 binder_node_lock(node);
5565 hlist_for_each_entry(ref, &node->refs, node_entry) {
5566 /*
5567 * Need the node lock to synchronize
5568 * with new notification requests and the
5569 * inner lock to synchronize with queued
5570 * freeze notifications.
5571 */
5572 binder_inner_proc_lock(ref->proc);
5573 if (!ref->freeze) {
5574 binder_inner_proc_unlock(ref->proc);
5575 continue;
5576 }
5577 ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5578 if (list_empty(&ref->freeze->work.entry)) {
5579 ref->freeze->is_frozen = is_frozen;
5580 binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5581 binder_wakeup_proc_ilocked(ref->proc);
5582 } else {
5583 if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5584 ref->freeze->resend = true;
5585 ref->freeze->is_frozen = is_frozen;
5586 }
5587 binder_inner_proc_unlock(ref->proc);
5588 }
5589 binder_node_unlock(node);
5590 binder_inner_proc_lock(proc);
5591 }
5592 binder_inner_proc_unlock(proc);
5593 }
5594
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5595 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5596 struct binder_proc *target_proc)
5597 {
5598 int ret = 0;
5599
5600 if (!info->enable) {
5601 binder_inner_proc_lock(target_proc);
5602 target_proc->sync_recv = false;
5603 target_proc->async_recv = false;
5604 target_proc->is_frozen = false;
5605 binder_inner_proc_unlock(target_proc);
5606 binder_add_freeze_work(target_proc, false);
5607 return 0;
5608 }
5609
5610 /*
5611 * Freezing the target. Prevent new transactions by
5612 * setting frozen state. If timeout specified, wait
5613 * for transactions to drain.
5614 */
5615 binder_inner_proc_lock(target_proc);
5616 target_proc->sync_recv = false;
5617 target_proc->async_recv = false;
5618 target_proc->is_frozen = true;
5619 binder_inner_proc_unlock(target_proc);
5620
5621 if (info->timeout_ms > 0)
5622 ret = wait_event_interruptible_timeout(
5623 target_proc->freeze_wait,
5624 (!target_proc->outstanding_txns),
5625 msecs_to_jiffies(info->timeout_ms));
5626
5627 /* Check pending transactions that wait for reply */
5628 if (ret >= 0) {
5629 binder_inner_proc_lock(target_proc);
5630 if (binder_txns_pending_ilocked(target_proc))
5631 ret = -EAGAIN;
5632 binder_inner_proc_unlock(target_proc);
5633 }
5634
5635 if (ret < 0) {
5636 binder_inner_proc_lock(target_proc);
5637 target_proc->is_frozen = false;
5638 binder_inner_proc_unlock(target_proc);
5639 } else {
5640 binder_add_freeze_work(target_proc, true);
5641 }
5642
5643 return ret;
5644 }
5645
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5646 static int binder_ioctl_get_freezer_info(
5647 struct binder_frozen_status_info *info)
5648 {
5649 struct binder_proc *target_proc;
5650 bool found = false;
5651 __u32 txns_pending;
5652
5653 info->sync_recv = 0;
5654 info->async_recv = 0;
5655
5656 mutex_lock(&binder_procs_lock);
5657 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5658 if (target_proc->pid == info->pid) {
5659 found = true;
5660 binder_inner_proc_lock(target_proc);
5661 txns_pending = binder_txns_pending_ilocked(target_proc);
5662 info->sync_recv |= target_proc->sync_recv |
5663 (txns_pending << 1);
5664 info->async_recv |= target_proc->async_recv;
5665 binder_inner_proc_unlock(target_proc);
5666 }
5667 }
5668 mutex_unlock(&binder_procs_lock);
5669
5670 if (!found)
5671 return -EINVAL;
5672
5673 return 0;
5674 }
5675
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5676 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5677 void __user *ubuf)
5678 {
5679 struct binder_extended_error ee;
5680
5681 binder_inner_proc_lock(thread->proc);
5682 ee = thread->ee;
5683 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5684 binder_inner_proc_unlock(thread->proc);
5685
5686 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5687 return -EFAULT;
5688
5689 return 0;
5690 }
5691
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5692 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5693 {
5694 int ret;
5695 struct binder_proc *proc = filp->private_data;
5696 struct binder_thread *thread;
5697 void __user *ubuf = (void __user *)arg;
5698
5699 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5700 proc->pid, current->pid, cmd, arg);*/
5701
5702 binder_selftest_alloc(&proc->alloc);
5703
5704 trace_binder_ioctl(cmd, arg);
5705
5706 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5707 if (ret)
5708 goto err_unlocked;
5709
5710 thread = binder_get_thread(proc);
5711 if (thread == NULL) {
5712 ret = -ENOMEM;
5713 goto err;
5714 }
5715
5716 switch (cmd) {
5717 case BINDER_WRITE_READ:
5718 ret = binder_ioctl_write_read(filp, arg, thread);
5719 if (ret)
5720 goto err;
5721 break;
5722 case BINDER_SET_MAX_THREADS: {
5723 u32 max_threads;
5724
5725 if (copy_from_user(&max_threads, ubuf,
5726 sizeof(max_threads))) {
5727 ret = -EINVAL;
5728 goto err;
5729 }
5730 binder_inner_proc_lock(proc);
5731 proc->max_threads = max_threads;
5732 binder_inner_proc_unlock(proc);
5733 break;
5734 }
5735 case BINDER_SET_CONTEXT_MGR_EXT: {
5736 struct flat_binder_object fbo;
5737
5738 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5739 ret = -EINVAL;
5740 goto err;
5741 }
5742 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5743 if (ret)
5744 goto err;
5745 break;
5746 }
5747 case BINDER_SET_CONTEXT_MGR:
5748 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5749 if (ret)
5750 goto err;
5751 break;
5752 case BINDER_THREAD_EXIT:
5753 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5754 proc->pid, thread->pid);
5755 binder_thread_release(proc, thread);
5756 thread = NULL;
5757 break;
5758 case BINDER_VERSION: {
5759 struct binder_version __user *ver = ubuf;
5760
5761 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5762 &ver->protocol_version)) {
5763 ret = -EINVAL;
5764 goto err;
5765 }
5766 break;
5767 }
5768 case BINDER_GET_NODE_INFO_FOR_REF: {
5769 struct binder_node_info_for_ref info;
5770
5771 if (copy_from_user(&info, ubuf, sizeof(info))) {
5772 ret = -EFAULT;
5773 goto err;
5774 }
5775
5776 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5777 if (ret < 0)
5778 goto err;
5779
5780 if (copy_to_user(ubuf, &info, sizeof(info))) {
5781 ret = -EFAULT;
5782 goto err;
5783 }
5784
5785 break;
5786 }
5787 case BINDER_GET_NODE_DEBUG_INFO: {
5788 struct binder_node_debug_info info;
5789
5790 if (copy_from_user(&info, ubuf, sizeof(info))) {
5791 ret = -EFAULT;
5792 goto err;
5793 }
5794
5795 ret = binder_ioctl_get_node_debug_info(proc, &info);
5796 if (ret < 0)
5797 goto err;
5798
5799 if (copy_to_user(ubuf, &info, sizeof(info))) {
5800 ret = -EFAULT;
5801 goto err;
5802 }
5803 break;
5804 }
5805 case BINDER_FREEZE: {
5806 struct binder_freeze_info info;
5807 struct binder_proc **target_procs = NULL, *target_proc;
5808 int target_procs_count = 0, i = 0;
5809
5810 ret = 0;
5811
5812 if (copy_from_user(&info, ubuf, sizeof(info))) {
5813 ret = -EFAULT;
5814 goto err;
5815 }
5816
5817 mutex_lock(&binder_procs_lock);
5818 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5819 if (target_proc->pid == info.pid)
5820 target_procs_count++;
5821 }
5822
5823 if (target_procs_count == 0) {
5824 mutex_unlock(&binder_procs_lock);
5825 ret = -EINVAL;
5826 goto err;
5827 }
5828
5829 target_procs = kcalloc(target_procs_count,
5830 sizeof(struct binder_proc *),
5831 GFP_KERNEL);
5832
5833 if (!target_procs) {
5834 mutex_unlock(&binder_procs_lock);
5835 ret = -ENOMEM;
5836 goto err;
5837 }
5838
5839 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5840 if (target_proc->pid != info.pid)
5841 continue;
5842
5843 binder_inner_proc_lock(target_proc);
5844 target_proc->tmp_ref++;
5845 binder_inner_proc_unlock(target_proc);
5846
5847 target_procs[i++] = target_proc;
5848 }
5849 mutex_unlock(&binder_procs_lock);
5850
5851 for (i = 0; i < target_procs_count; i++) {
5852 if (ret >= 0)
5853 ret = binder_ioctl_freeze(&info,
5854 target_procs[i]);
5855
5856 binder_proc_dec_tmpref(target_procs[i]);
5857 }
5858
5859 kfree(target_procs);
5860
5861 if (ret < 0)
5862 goto err;
5863 break;
5864 }
5865 case BINDER_GET_FROZEN_INFO: {
5866 struct binder_frozen_status_info info;
5867
5868 if (copy_from_user(&info, ubuf, sizeof(info))) {
5869 ret = -EFAULT;
5870 goto err;
5871 }
5872
5873 ret = binder_ioctl_get_freezer_info(&info);
5874 if (ret < 0)
5875 goto err;
5876
5877 if (copy_to_user(ubuf, &info, sizeof(info))) {
5878 ret = -EFAULT;
5879 goto err;
5880 }
5881 break;
5882 }
5883 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5884 uint32_t enable;
5885
5886 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5887 ret = -EFAULT;
5888 goto err;
5889 }
5890 binder_inner_proc_lock(proc);
5891 proc->oneway_spam_detection_enabled = (bool)enable;
5892 binder_inner_proc_unlock(proc);
5893 break;
5894 }
5895 case BINDER_GET_EXTENDED_ERROR:
5896 ret = binder_ioctl_get_extended_error(thread, ubuf);
5897 if (ret < 0)
5898 goto err;
5899 break;
5900 default:
5901 ret = -EINVAL;
5902 goto err;
5903 }
5904 ret = 0;
5905 err:
5906 if (thread)
5907 thread->looper_need_return = false;
5908 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5909 if (ret && ret != -EINTR)
5910 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5911 err_unlocked:
5912 trace_binder_ioctl_done(ret);
5913 return ret;
5914 }
5915
binder_vma_open(struct vm_area_struct * vma)5916 static void binder_vma_open(struct vm_area_struct *vma)
5917 {
5918 struct binder_proc *proc = vma->vm_private_data;
5919
5920 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5921 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5922 proc->pid, vma->vm_start, vma->vm_end,
5923 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5924 (unsigned long)pgprot_val(vma->vm_page_prot));
5925 }
5926
binder_vma_close(struct vm_area_struct * vma)5927 static void binder_vma_close(struct vm_area_struct *vma)
5928 {
5929 struct binder_proc *proc = vma->vm_private_data;
5930
5931 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5932 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5933 proc->pid, vma->vm_start, vma->vm_end,
5934 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5935 (unsigned long)pgprot_val(vma->vm_page_prot));
5936 binder_alloc_vma_close(&proc->alloc);
5937 }
5938
binder_vm_fault(struct vm_fault * vmf)5939 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5940 {
5941 return VM_FAULT_SIGBUS;
5942 }
5943
5944 static const struct vm_operations_struct binder_vm_ops = {
5945 .open = binder_vma_open,
5946 .close = binder_vma_close,
5947 .fault = binder_vm_fault,
5948 };
5949
binder_mmap(struct file * filp,struct vm_area_struct * vma)5950 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5951 {
5952 struct binder_proc *proc = filp->private_data;
5953
5954 if (proc->tsk != current->group_leader)
5955 return -EINVAL;
5956
5957 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5958 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5959 __func__, proc->pid, vma->vm_start, vma->vm_end,
5960 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5961 (unsigned long)pgprot_val(vma->vm_page_prot));
5962
5963 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5964 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5965 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5966 return -EPERM;
5967 }
5968 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5969
5970 vma->vm_ops = &binder_vm_ops;
5971 vma->vm_private_data = proc;
5972
5973 return binder_alloc_mmap_handler(&proc->alloc, vma);
5974 }
5975
binder_open(struct inode * nodp,struct file * filp)5976 static int binder_open(struct inode *nodp, struct file *filp)
5977 {
5978 struct binder_proc *proc, *itr;
5979 struct binder_device *binder_dev;
5980 struct binderfs_info *info;
5981 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5982 bool existing_pid = false;
5983
5984 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5985 current->group_leader->pid, current->pid);
5986
5987 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5988 if (proc == NULL)
5989 return -ENOMEM;
5990
5991 dbitmap_init(&proc->dmap);
5992 spin_lock_init(&proc->inner_lock);
5993 spin_lock_init(&proc->outer_lock);
5994 get_task_struct(current->group_leader);
5995 proc->tsk = current->group_leader;
5996 proc->cred = get_cred(filp->f_cred);
5997 INIT_LIST_HEAD(&proc->todo);
5998 init_waitqueue_head(&proc->freeze_wait);
5999 proc->default_priority = task_nice(current);
6000 /* binderfs stashes devices in i_private */
6001 if (is_binderfs_device(nodp)) {
6002 binder_dev = nodp->i_private;
6003 info = nodp->i_sb->s_fs_info;
6004 binder_binderfs_dir_entry_proc = info->proc_log_dir;
6005 } else {
6006 binder_dev = container_of(filp->private_data,
6007 struct binder_device, miscdev);
6008 }
6009 refcount_inc(&binder_dev->ref);
6010 proc->context = &binder_dev->context;
6011 binder_alloc_init(&proc->alloc);
6012
6013 binder_stats_created(BINDER_STAT_PROC);
6014 proc->pid = current->group_leader->pid;
6015 INIT_LIST_HEAD(&proc->delivered_death);
6016 INIT_LIST_HEAD(&proc->delivered_freeze);
6017 INIT_LIST_HEAD(&proc->waiting_threads);
6018 filp->private_data = proc;
6019
6020 mutex_lock(&binder_procs_lock);
6021 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6022 if (itr->pid == proc->pid) {
6023 existing_pid = true;
6024 break;
6025 }
6026 }
6027 hlist_add_head(&proc->proc_node, &binder_procs);
6028 mutex_unlock(&binder_procs_lock);
6029
6030 if (binder_debugfs_dir_entry_proc && !existing_pid) {
6031 char strbuf[11];
6032
6033 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6034 /*
6035 * proc debug entries are shared between contexts.
6036 * Only create for the first PID to avoid debugfs log spamming
6037 * The printing code will anyway print all contexts for a given
6038 * PID so this is not a problem.
6039 */
6040 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6041 binder_debugfs_dir_entry_proc,
6042 (void *)(unsigned long)proc->pid,
6043 &proc_fops);
6044 }
6045
6046 if (binder_binderfs_dir_entry_proc && !existing_pid) {
6047 char strbuf[11];
6048 struct dentry *binderfs_entry;
6049
6050 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6051 /*
6052 * Similar to debugfs, the process specific log file is shared
6053 * between contexts. Only create for the first PID.
6054 * This is ok since same as debugfs, the log file will contain
6055 * information on all contexts of a given PID.
6056 */
6057 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6058 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6059 if (!IS_ERR(binderfs_entry)) {
6060 proc->binderfs_entry = binderfs_entry;
6061 } else {
6062 int error;
6063
6064 error = PTR_ERR(binderfs_entry);
6065 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6066 strbuf, error);
6067 }
6068 }
6069
6070 return 0;
6071 }
6072
binder_flush(struct file * filp,fl_owner_t id)6073 static int binder_flush(struct file *filp, fl_owner_t id)
6074 {
6075 struct binder_proc *proc = filp->private_data;
6076
6077 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6078
6079 return 0;
6080 }
6081
binder_deferred_flush(struct binder_proc * proc)6082 static void binder_deferred_flush(struct binder_proc *proc)
6083 {
6084 struct rb_node *n;
6085 int wake_count = 0;
6086
6087 binder_inner_proc_lock(proc);
6088 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6089 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6090
6091 thread->looper_need_return = true;
6092 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6093 wake_up_interruptible(&thread->wait);
6094 wake_count++;
6095 }
6096 }
6097 binder_inner_proc_unlock(proc);
6098
6099 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6100 "binder_flush: %d woke %d threads\n", proc->pid,
6101 wake_count);
6102 }
6103
binder_release(struct inode * nodp,struct file * filp)6104 static int binder_release(struct inode *nodp, struct file *filp)
6105 {
6106 struct binder_proc *proc = filp->private_data;
6107
6108 debugfs_remove(proc->debugfs_entry);
6109
6110 if (proc->binderfs_entry) {
6111 binderfs_remove_file(proc->binderfs_entry);
6112 proc->binderfs_entry = NULL;
6113 }
6114
6115 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6116
6117 return 0;
6118 }
6119
binder_node_release(struct binder_node * node,int refs)6120 static int binder_node_release(struct binder_node *node, int refs)
6121 {
6122 struct binder_ref *ref;
6123 int death = 0;
6124 struct binder_proc *proc = node->proc;
6125
6126 binder_release_work(proc, &node->async_todo);
6127
6128 binder_node_lock(node);
6129 binder_inner_proc_lock(proc);
6130 binder_dequeue_work_ilocked(&node->work);
6131 /*
6132 * The caller must have taken a temporary ref on the node,
6133 */
6134 BUG_ON(!node->tmp_refs);
6135 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6136 binder_inner_proc_unlock(proc);
6137 binder_node_unlock(node);
6138 binder_free_node(node);
6139
6140 return refs;
6141 }
6142
6143 node->proc = NULL;
6144 node->local_strong_refs = 0;
6145 node->local_weak_refs = 0;
6146 binder_inner_proc_unlock(proc);
6147
6148 spin_lock(&binder_dead_nodes_lock);
6149 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6150 spin_unlock(&binder_dead_nodes_lock);
6151
6152 hlist_for_each_entry(ref, &node->refs, node_entry) {
6153 refs++;
6154 /*
6155 * Need the node lock to synchronize
6156 * with new notification requests and the
6157 * inner lock to synchronize with queued
6158 * death notifications.
6159 */
6160 binder_inner_proc_lock(ref->proc);
6161 if (!ref->death) {
6162 binder_inner_proc_unlock(ref->proc);
6163 continue;
6164 }
6165
6166 death++;
6167
6168 BUG_ON(!list_empty(&ref->death->work.entry));
6169 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6170 binder_enqueue_work_ilocked(&ref->death->work,
6171 &ref->proc->todo);
6172 binder_wakeup_proc_ilocked(ref->proc);
6173 binder_inner_proc_unlock(ref->proc);
6174 }
6175
6176 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6177 "node %d now dead, refs %d, death %d\n",
6178 node->debug_id, refs, death);
6179 binder_node_unlock(node);
6180 binder_put_node(node);
6181
6182 return refs;
6183 }
6184
binder_deferred_release(struct binder_proc * proc)6185 static void binder_deferred_release(struct binder_proc *proc)
6186 {
6187 struct binder_context *context = proc->context;
6188 struct rb_node *n;
6189 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6190
6191 mutex_lock(&binder_procs_lock);
6192 hlist_del(&proc->proc_node);
6193 mutex_unlock(&binder_procs_lock);
6194
6195 mutex_lock(&context->context_mgr_node_lock);
6196 if (context->binder_context_mgr_node &&
6197 context->binder_context_mgr_node->proc == proc) {
6198 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6199 "%s: %d context_mgr_node gone\n",
6200 __func__, proc->pid);
6201 context->binder_context_mgr_node = NULL;
6202 }
6203 mutex_unlock(&context->context_mgr_node_lock);
6204 binder_inner_proc_lock(proc);
6205 /*
6206 * Make sure proc stays alive after we
6207 * remove all the threads
6208 */
6209 proc->tmp_ref++;
6210
6211 proc->is_dead = true;
6212 proc->is_frozen = false;
6213 proc->sync_recv = false;
6214 proc->async_recv = false;
6215 threads = 0;
6216 active_transactions = 0;
6217 while ((n = rb_first(&proc->threads))) {
6218 struct binder_thread *thread;
6219
6220 thread = rb_entry(n, struct binder_thread, rb_node);
6221 binder_inner_proc_unlock(proc);
6222 threads++;
6223 active_transactions += binder_thread_release(proc, thread);
6224 binder_inner_proc_lock(proc);
6225 }
6226
6227 nodes = 0;
6228 incoming_refs = 0;
6229 while ((n = rb_first(&proc->nodes))) {
6230 struct binder_node *node;
6231
6232 node = rb_entry(n, struct binder_node, rb_node);
6233 nodes++;
6234 /*
6235 * take a temporary ref on the node before
6236 * calling binder_node_release() which will either
6237 * kfree() the node or call binder_put_node()
6238 */
6239 binder_inc_node_tmpref_ilocked(node);
6240 rb_erase(&node->rb_node, &proc->nodes);
6241 binder_inner_proc_unlock(proc);
6242 incoming_refs = binder_node_release(node, incoming_refs);
6243 binder_inner_proc_lock(proc);
6244 }
6245 binder_inner_proc_unlock(proc);
6246
6247 outgoing_refs = 0;
6248 binder_proc_lock(proc);
6249 while ((n = rb_first(&proc->refs_by_desc))) {
6250 struct binder_ref *ref;
6251
6252 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6253 outgoing_refs++;
6254 binder_cleanup_ref_olocked(ref);
6255 binder_proc_unlock(proc);
6256 binder_free_ref(ref);
6257 binder_proc_lock(proc);
6258 }
6259 binder_proc_unlock(proc);
6260
6261 binder_release_work(proc, &proc->todo);
6262 binder_release_work(proc, &proc->delivered_death);
6263
6264 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6265 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6266 __func__, proc->pid, threads, nodes, incoming_refs,
6267 outgoing_refs, active_transactions);
6268
6269 binder_proc_dec_tmpref(proc);
6270 }
6271
binder_deferred_func(struct work_struct * work)6272 static void binder_deferred_func(struct work_struct *work)
6273 {
6274 struct binder_proc *proc;
6275
6276 int defer;
6277
6278 do {
6279 mutex_lock(&binder_deferred_lock);
6280 if (!hlist_empty(&binder_deferred_list)) {
6281 proc = hlist_entry(binder_deferred_list.first,
6282 struct binder_proc, deferred_work_node);
6283 hlist_del_init(&proc->deferred_work_node);
6284 defer = proc->deferred_work;
6285 proc->deferred_work = 0;
6286 } else {
6287 proc = NULL;
6288 defer = 0;
6289 }
6290 mutex_unlock(&binder_deferred_lock);
6291
6292 if (defer & BINDER_DEFERRED_FLUSH)
6293 binder_deferred_flush(proc);
6294
6295 if (defer & BINDER_DEFERRED_RELEASE)
6296 binder_deferred_release(proc); /* frees proc */
6297 } while (proc);
6298 }
6299 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6300
6301 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6302 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6303 {
6304 mutex_lock(&binder_deferred_lock);
6305 proc->deferred_work |= defer;
6306 if (hlist_unhashed(&proc->deferred_work_node)) {
6307 hlist_add_head(&proc->deferred_work_node,
6308 &binder_deferred_list);
6309 schedule_work(&binder_deferred_work);
6310 }
6311 mutex_unlock(&binder_deferred_lock);
6312 }
6313
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6314 static void print_binder_transaction_ilocked(struct seq_file *m,
6315 struct binder_proc *proc,
6316 const char *prefix,
6317 struct binder_transaction *t)
6318 {
6319 struct binder_proc *to_proc;
6320 struct binder_buffer *buffer = t->buffer;
6321 ktime_t current_time = ktime_get();
6322
6323 spin_lock(&t->lock);
6324 to_proc = t->to_proc;
6325 seq_printf(m,
6326 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6327 prefix, t->debug_id, t,
6328 t->from_pid,
6329 t->from_tid,
6330 to_proc ? to_proc->pid : 0,
6331 t->to_thread ? t->to_thread->pid : 0,
6332 t->code, t->flags, t->priority, t->need_reply,
6333 ktime_ms_delta(current_time, t->start_time));
6334 spin_unlock(&t->lock);
6335
6336 if (proc != to_proc) {
6337 /*
6338 * Can only safely deref buffer if we are holding the
6339 * correct proc inner lock for this node
6340 */
6341 seq_puts(m, "\n");
6342 return;
6343 }
6344
6345 if (buffer == NULL) {
6346 seq_puts(m, " buffer free\n");
6347 return;
6348 }
6349 if (buffer->target_node)
6350 seq_printf(m, " node %d", buffer->target_node->debug_id);
6351 seq_printf(m, " size %zd:%zd offset %lx\n",
6352 buffer->data_size, buffer->offsets_size,
6353 proc->alloc.buffer - buffer->user_data);
6354 }
6355
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)6356 static void print_binder_work_ilocked(struct seq_file *m,
6357 struct binder_proc *proc,
6358 const char *prefix,
6359 const char *transaction_prefix,
6360 struct binder_work *w)
6361 {
6362 struct binder_node *node;
6363 struct binder_transaction *t;
6364
6365 switch (w->type) {
6366 case BINDER_WORK_TRANSACTION:
6367 t = container_of(w, struct binder_transaction, work);
6368 print_binder_transaction_ilocked(
6369 m, proc, transaction_prefix, t);
6370 break;
6371 case BINDER_WORK_RETURN_ERROR: {
6372 struct binder_error *e = container_of(
6373 w, struct binder_error, work);
6374
6375 seq_printf(m, "%stransaction error: %u\n",
6376 prefix, e->cmd);
6377 } break;
6378 case BINDER_WORK_TRANSACTION_COMPLETE:
6379 seq_printf(m, "%stransaction complete\n", prefix);
6380 break;
6381 case BINDER_WORK_NODE:
6382 node = container_of(w, struct binder_node, work);
6383 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6384 prefix, node->debug_id,
6385 (u64)node->ptr, (u64)node->cookie);
6386 break;
6387 case BINDER_WORK_DEAD_BINDER:
6388 seq_printf(m, "%shas dead binder\n", prefix);
6389 break;
6390 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6391 seq_printf(m, "%shas cleared dead binder\n", prefix);
6392 break;
6393 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6394 seq_printf(m, "%shas cleared death notification\n", prefix);
6395 break;
6396 default:
6397 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6398 break;
6399 }
6400 }
6401
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)6402 static void print_binder_thread_ilocked(struct seq_file *m,
6403 struct binder_thread *thread,
6404 int print_always)
6405 {
6406 struct binder_transaction *t;
6407 struct binder_work *w;
6408 size_t start_pos = m->count;
6409 size_t header_pos;
6410
6411 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6412 thread->pid, thread->looper,
6413 thread->looper_need_return,
6414 atomic_read(&thread->tmp_ref));
6415 header_pos = m->count;
6416 t = thread->transaction_stack;
6417 while (t) {
6418 if (t->from == thread) {
6419 print_binder_transaction_ilocked(m, thread->proc,
6420 " outgoing transaction", t);
6421 t = t->from_parent;
6422 } else if (t->to_thread == thread) {
6423 print_binder_transaction_ilocked(m, thread->proc,
6424 " incoming transaction", t);
6425 t = t->to_parent;
6426 } else {
6427 print_binder_transaction_ilocked(m, thread->proc,
6428 " bad transaction", t);
6429 t = NULL;
6430 }
6431 }
6432 list_for_each_entry(w, &thread->todo, entry) {
6433 print_binder_work_ilocked(m, thread->proc, " ",
6434 " pending transaction", w);
6435 }
6436 if (!print_always && m->count == header_pos)
6437 m->count = start_pos;
6438 }
6439
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)6440 static void print_binder_node_nilocked(struct seq_file *m,
6441 struct binder_node *node)
6442 {
6443 struct binder_ref *ref;
6444 struct binder_work *w;
6445 int count;
6446
6447 count = hlist_count_nodes(&node->refs);
6448
6449 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6450 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6451 node->has_strong_ref, node->has_weak_ref,
6452 node->local_strong_refs, node->local_weak_refs,
6453 node->internal_strong_refs, count, node->tmp_refs);
6454 if (count) {
6455 seq_puts(m, " proc");
6456 hlist_for_each_entry(ref, &node->refs, node_entry)
6457 seq_printf(m, " %d", ref->proc->pid);
6458 }
6459 seq_puts(m, "\n");
6460 if (node->proc) {
6461 list_for_each_entry(w, &node->async_todo, entry)
6462 print_binder_work_ilocked(m, node->proc, " ",
6463 " pending async transaction", w);
6464 }
6465 }
6466
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6467 static void print_binder_ref_olocked(struct seq_file *m,
6468 struct binder_ref *ref)
6469 {
6470 binder_node_lock(ref->node);
6471 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6472 ref->data.debug_id, ref->data.desc,
6473 ref->node->proc ? "" : "dead ",
6474 ref->node->debug_id, ref->data.strong,
6475 ref->data.weak, ref->death);
6476 binder_node_unlock(ref->node);
6477 }
6478
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)6479 static void print_binder_proc(struct seq_file *m,
6480 struct binder_proc *proc, int print_all)
6481 {
6482 struct binder_work *w;
6483 struct rb_node *n;
6484 size_t start_pos = m->count;
6485 size_t header_pos;
6486 struct binder_node *last_node = NULL;
6487
6488 seq_printf(m, "proc %d\n", proc->pid);
6489 seq_printf(m, "context %s\n", proc->context->name);
6490 header_pos = m->count;
6491
6492 binder_inner_proc_lock(proc);
6493 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6494 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6495 rb_node), print_all);
6496
6497 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6498 struct binder_node *node = rb_entry(n, struct binder_node,
6499 rb_node);
6500 if (!print_all && !node->has_async_transaction)
6501 continue;
6502
6503 /*
6504 * take a temporary reference on the node so it
6505 * survives and isn't removed from the tree
6506 * while we print it.
6507 */
6508 binder_inc_node_tmpref_ilocked(node);
6509 /* Need to drop inner lock to take node lock */
6510 binder_inner_proc_unlock(proc);
6511 if (last_node)
6512 binder_put_node(last_node);
6513 binder_node_inner_lock(node);
6514 print_binder_node_nilocked(m, node);
6515 binder_node_inner_unlock(node);
6516 last_node = node;
6517 binder_inner_proc_lock(proc);
6518 }
6519 binder_inner_proc_unlock(proc);
6520 if (last_node)
6521 binder_put_node(last_node);
6522
6523 if (print_all) {
6524 binder_proc_lock(proc);
6525 for (n = rb_first(&proc->refs_by_desc);
6526 n != NULL;
6527 n = rb_next(n))
6528 print_binder_ref_olocked(m, rb_entry(n,
6529 struct binder_ref,
6530 rb_node_desc));
6531 binder_proc_unlock(proc);
6532 }
6533 binder_alloc_print_allocated(m, &proc->alloc);
6534 binder_inner_proc_lock(proc);
6535 list_for_each_entry(w, &proc->todo, entry)
6536 print_binder_work_ilocked(m, proc, " ",
6537 " pending transaction", w);
6538 list_for_each_entry(w, &proc->delivered_death, entry) {
6539 seq_puts(m, " has delivered dead binder\n");
6540 break;
6541 }
6542 binder_inner_proc_unlock(proc);
6543 if (!print_all && m->count == header_pos)
6544 m->count = start_pos;
6545 }
6546
6547 static const char * const binder_return_strings[] = {
6548 "BR_ERROR",
6549 "BR_OK",
6550 "BR_TRANSACTION",
6551 "BR_REPLY",
6552 "BR_ACQUIRE_RESULT",
6553 "BR_DEAD_REPLY",
6554 "BR_TRANSACTION_COMPLETE",
6555 "BR_INCREFS",
6556 "BR_ACQUIRE",
6557 "BR_RELEASE",
6558 "BR_DECREFS",
6559 "BR_ATTEMPT_ACQUIRE",
6560 "BR_NOOP",
6561 "BR_SPAWN_LOOPER",
6562 "BR_FINISHED",
6563 "BR_DEAD_BINDER",
6564 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6565 "BR_FAILED_REPLY",
6566 "BR_FROZEN_REPLY",
6567 "BR_ONEWAY_SPAM_SUSPECT",
6568 "BR_TRANSACTION_PENDING_FROZEN",
6569 "BR_FROZEN_BINDER",
6570 "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6571 };
6572
6573 static const char * const binder_command_strings[] = {
6574 "BC_TRANSACTION",
6575 "BC_REPLY",
6576 "BC_ACQUIRE_RESULT",
6577 "BC_FREE_BUFFER",
6578 "BC_INCREFS",
6579 "BC_ACQUIRE",
6580 "BC_RELEASE",
6581 "BC_DECREFS",
6582 "BC_INCREFS_DONE",
6583 "BC_ACQUIRE_DONE",
6584 "BC_ATTEMPT_ACQUIRE",
6585 "BC_REGISTER_LOOPER",
6586 "BC_ENTER_LOOPER",
6587 "BC_EXIT_LOOPER",
6588 "BC_REQUEST_DEATH_NOTIFICATION",
6589 "BC_CLEAR_DEATH_NOTIFICATION",
6590 "BC_DEAD_BINDER_DONE",
6591 "BC_TRANSACTION_SG",
6592 "BC_REPLY_SG",
6593 "BC_REQUEST_FREEZE_NOTIFICATION",
6594 "BC_CLEAR_FREEZE_NOTIFICATION",
6595 "BC_FREEZE_NOTIFICATION_DONE",
6596 };
6597
6598 static const char * const binder_objstat_strings[] = {
6599 "proc",
6600 "thread",
6601 "node",
6602 "ref",
6603 "death",
6604 "transaction",
6605 "transaction_complete",
6606 "freeze",
6607 };
6608
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6609 static void print_binder_stats(struct seq_file *m, const char *prefix,
6610 struct binder_stats *stats)
6611 {
6612 int i;
6613
6614 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6615 ARRAY_SIZE(binder_command_strings));
6616 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6617 int temp = atomic_read(&stats->bc[i]);
6618
6619 if (temp)
6620 seq_printf(m, "%s%s: %d\n", prefix,
6621 binder_command_strings[i], temp);
6622 }
6623
6624 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6625 ARRAY_SIZE(binder_return_strings));
6626 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6627 int temp = atomic_read(&stats->br[i]);
6628
6629 if (temp)
6630 seq_printf(m, "%s%s: %d\n", prefix,
6631 binder_return_strings[i], temp);
6632 }
6633
6634 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6635 ARRAY_SIZE(binder_objstat_strings));
6636 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6637 ARRAY_SIZE(stats->obj_deleted));
6638 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6639 int created = atomic_read(&stats->obj_created[i]);
6640 int deleted = atomic_read(&stats->obj_deleted[i]);
6641
6642 if (created || deleted)
6643 seq_printf(m, "%s%s: active %d total %d\n",
6644 prefix,
6645 binder_objstat_strings[i],
6646 created - deleted,
6647 created);
6648 }
6649 }
6650
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6651 static void print_binder_proc_stats(struct seq_file *m,
6652 struct binder_proc *proc)
6653 {
6654 struct binder_work *w;
6655 struct binder_thread *thread;
6656 struct rb_node *n;
6657 int count, strong, weak, ready_threads;
6658 size_t free_async_space =
6659 binder_alloc_get_free_async_space(&proc->alloc);
6660
6661 seq_printf(m, "proc %d\n", proc->pid);
6662 seq_printf(m, "context %s\n", proc->context->name);
6663 count = 0;
6664 ready_threads = 0;
6665 binder_inner_proc_lock(proc);
6666 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6667 count++;
6668
6669 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6670 ready_threads++;
6671
6672 seq_printf(m, " threads: %d\n", count);
6673 seq_printf(m, " requested threads: %d+%d/%d\n"
6674 " ready threads %d\n"
6675 " free async space %zd\n", proc->requested_threads,
6676 proc->requested_threads_started, proc->max_threads,
6677 ready_threads,
6678 free_async_space);
6679 count = 0;
6680 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6681 count++;
6682 binder_inner_proc_unlock(proc);
6683 seq_printf(m, " nodes: %d\n", count);
6684 count = 0;
6685 strong = 0;
6686 weak = 0;
6687 binder_proc_lock(proc);
6688 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6689 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6690 rb_node_desc);
6691 count++;
6692 strong += ref->data.strong;
6693 weak += ref->data.weak;
6694 }
6695 binder_proc_unlock(proc);
6696 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6697
6698 count = binder_alloc_get_allocated_count(&proc->alloc);
6699 seq_printf(m, " buffers: %d\n", count);
6700
6701 binder_alloc_print_pages(m, &proc->alloc);
6702
6703 count = 0;
6704 binder_inner_proc_lock(proc);
6705 list_for_each_entry(w, &proc->todo, entry) {
6706 if (w->type == BINDER_WORK_TRANSACTION)
6707 count++;
6708 }
6709 binder_inner_proc_unlock(proc);
6710 seq_printf(m, " pending transactions: %d\n", count);
6711
6712 print_binder_stats(m, " ", &proc->stats);
6713 }
6714
state_show(struct seq_file * m,void * unused)6715 static int state_show(struct seq_file *m, void *unused)
6716 {
6717 struct binder_proc *proc;
6718 struct binder_node *node;
6719 struct binder_node *last_node = NULL;
6720
6721 seq_puts(m, "binder state:\n");
6722
6723 spin_lock(&binder_dead_nodes_lock);
6724 if (!hlist_empty(&binder_dead_nodes))
6725 seq_puts(m, "dead nodes:\n");
6726 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6727 /*
6728 * take a temporary reference on the node so it
6729 * survives and isn't removed from the list
6730 * while we print it.
6731 */
6732 node->tmp_refs++;
6733 spin_unlock(&binder_dead_nodes_lock);
6734 if (last_node)
6735 binder_put_node(last_node);
6736 binder_node_lock(node);
6737 print_binder_node_nilocked(m, node);
6738 binder_node_unlock(node);
6739 last_node = node;
6740 spin_lock(&binder_dead_nodes_lock);
6741 }
6742 spin_unlock(&binder_dead_nodes_lock);
6743 if (last_node)
6744 binder_put_node(last_node);
6745
6746 mutex_lock(&binder_procs_lock);
6747 hlist_for_each_entry(proc, &binder_procs, proc_node)
6748 print_binder_proc(m, proc, 1);
6749 mutex_unlock(&binder_procs_lock);
6750
6751 return 0;
6752 }
6753
stats_show(struct seq_file * m,void * unused)6754 static int stats_show(struct seq_file *m, void *unused)
6755 {
6756 struct binder_proc *proc;
6757
6758 seq_puts(m, "binder stats:\n");
6759
6760 print_binder_stats(m, "", &binder_stats);
6761
6762 mutex_lock(&binder_procs_lock);
6763 hlist_for_each_entry(proc, &binder_procs, proc_node)
6764 print_binder_proc_stats(m, proc);
6765 mutex_unlock(&binder_procs_lock);
6766
6767 return 0;
6768 }
6769
transactions_show(struct seq_file * m,void * unused)6770 static int transactions_show(struct seq_file *m, void *unused)
6771 {
6772 struct binder_proc *proc;
6773
6774 seq_puts(m, "binder transactions:\n");
6775 mutex_lock(&binder_procs_lock);
6776 hlist_for_each_entry(proc, &binder_procs, proc_node)
6777 print_binder_proc(m, proc, 0);
6778 mutex_unlock(&binder_procs_lock);
6779
6780 return 0;
6781 }
6782
proc_show(struct seq_file * m,void * unused)6783 static int proc_show(struct seq_file *m, void *unused)
6784 {
6785 struct binder_proc *itr;
6786 int pid = (unsigned long)m->private;
6787
6788 mutex_lock(&binder_procs_lock);
6789 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6790 if (itr->pid == pid) {
6791 seq_puts(m, "binder proc state:\n");
6792 print_binder_proc(m, itr, 1);
6793 }
6794 }
6795 mutex_unlock(&binder_procs_lock);
6796
6797 return 0;
6798 }
6799
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6800 static void print_binder_transaction_log_entry(struct seq_file *m,
6801 struct binder_transaction_log_entry *e)
6802 {
6803 int debug_id = READ_ONCE(e->debug_id_done);
6804 /*
6805 * read barrier to guarantee debug_id_done read before
6806 * we print the log values
6807 */
6808 smp_rmb();
6809 seq_printf(m,
6810 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6811 e->debug_id, (e->call_type == 2) ? "reply" :
6812 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6813 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6814 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6815 e->return_error, e->return_error_param,
6816 e->return_error_line);
6817 /*
6818 * read-barrier to guarantee read of debug_id_done after
6819 * done printing the fields of the entry
6820 */
6821 smp_rmb();
6822 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6823 "\n" : " (incomplete)\n");
6824 }
6825
transaction_log_show(struct seq_file * m,void * unused)6826 static int transaction_log_show(struct seq_file *m, void *unused)
6827 {
6828 struct binder_transaction_log *log = m->private;
6829 unsigned int log_cur = atomic_read(&log->cur);
6830 unsigned int count;
6831 unsigned int cur;
6832 int i;
6833
6834 count = log_cur + 1;
6835 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6836 0 : count % ARRAY_SIZE(log->entry);
6837 if (count > ARRAY_SIZE(log->entry) || log->full)
6838 count = ARRAY_SIZE(log->entry);
6839 for (i = 0; i < count; i++) {
6840 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6841
6842 print_binder_transaction_log_entry(m, &log->entry[index]);
6843 }
6844 return 0;
6845 }
6846
6847 const struct file_operations binder_fops = {
6848 .owner = THIS_MODULE,
6849 .poll = binder_poll,
6850 .unlocked_ioctl = binder_ioctl,
6851 .compat_ioctl = compat_ptr_ioctl,
6852 .mmap = binder_mmap,
6853 .open = binder_open,
6854 .flush = binder_flush,
6855 .release = binder_release,
6856 };
6857
6858 DEFINE_SHOW_ATTRIBUTE(state);
6859 DEFINE_SHOW_ATTRIBUTE(stats);
6860 DEFINE_SHOW_ATTRIBUTE(transactions);
6861 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6862
6863 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6864 {
6865 .name = "state",
6866 .mode = 0444,
6867 .fops = &state_fops,
6868 .data = NULL,
6869 },
6870 {
6871 .name = "stats",
6872 .mode = 0444,
6873 .fops = &stats_fops,
6874 .data = NULL,
6875 },
6876 {
6877 .name = "transactions",
6878 .mode = 0444,
6879 .fops = &transactions_fops,
6880 .data = NULL,
6881 },
6882 {
6883 .name = "transaction_log",
6884 .mode = 0444,
6885 .fops = &transaction_log_fops,
6886 .data = &binder_transaction_log,
6887 },
6888 {
6889 .name = "failed_transaction_log",
6890 .mode = 0444,
6891 .fops = &transaction_log_fops,
6892 .data = &binder_transaction_log_failed,
6893 },
6894 {} /* terminator */
6895 };
6896
init_binder_device(const char * name)6897 static int __init init_binder_device(const char *name)
6898 {
6899 int ret;
6900 struct binder_device *binder_device;
6901
6902 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6903 if (!binder_device)
6904 return -ENOMEM;
6905
6906 binder_device->miscdev.fops = &binder_fops;
6907 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6908 binder_device->miscdev.name = name;
6909
6910 refcount_set(&binder_device->ref, 1);
6911 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6912 binder_device->context.name = name;
6913 mutex_init(&binder_device->context.context_mgr_node_lock);
6914
6915 ret = misc_register(&binder_device->miscdev);
6916 if (ret < 0) {
6917 kfree(binder_device);
6918 return ret;
6919 }
6920
6921 hlist_add_head(&binder_device->hlist, &binder_devices);
6922
6923 return ret;
6924 }
6925
binder_init(void)6926 static int __init binder_init(void)
6927 {
6928 int ret;
6929 char *device_name, *device_tmp;
6930 struct binder_device *device;
6931 struct hlist_node *tmp;
6932 char *device_names = NULL;
6933 const struct binder_debugfs_entry *db_entry;
6934
6935 ret = binder_alloc_shrinker_init();
6936 if (ret)
6937 return ret;
6938
6939 atomic_set(&binder_transaction_log.cur, ~0U);
6940 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6941
6942 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6943
6944 binder_for_each_debugfs_entry(db_entry)
6945 debugfs_create_file(db_entry->name,
6946 db_entry->mode,
6947 binder_debugfs_dir_entry_root,
6948 db_entry->data,
6949 db_entry->fops);
6950
6951 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6952 binder_debugfs_dir_entry_root);
6953
6954 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6955 strcmp(binder_devices_param, "") != 0) {
6956 /*
6957 * Copy the module_parameter string, because we don't want to
6958 * tokenize it in-place.
6959 */
6960 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6961 if (!device_names) {
6962 ret = -ENOMEM;
6963 goto err_alloc_device_names_failed;
6964 }
6965
6966 device_tmp = device_names;
6967 while ((device_name = strsep(&device_tmp, ","))) {
6968 ret = init_binder_device(device_name);
6969 if (ret)
6970 goto err_init_binder_device_failed;
6971 }
6972 }
6973
6974 ret = init_binderfs();
6975 if (ret)
6976 goto err_init_binder_device_failed;
6977
6978 return ret;
6979
6980 err_init_binder_device_failed:
6981 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6982 misc_deregister(&device->miscdev);
6983 hlist_del(&device->hlist);
6984 kfree(device);
6985 }
6986
6987 kfree(device_names);
6988
6989 err_alloc_device_names_failed:
6990 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6991 binder_alloc_shrinker_exit();
6992
6993 return ret;
6994 }
6995
6996 device_initcall(binder_init);
6997
6998 #define CREATE_TRACE_POINTS
6999 #include "binder_trace.h"
7000
7001 MODULE_LICENSE("GPL v2");
7002