1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70
71 #include <uapi/linux/android/binder.h>
72
73 #include <linux/cacheflush.h>
74
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94
95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
96
97 enum {
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 };
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
120
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)124 static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
126 {
127 int ret;
128
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
132 return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
136
binder_debug(int mask,const char * format,...)137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 {
139 struct va_format vaf;
140 va_list args;
141
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
144 vaf.va = &args;
145 vaf.fmt = format;
146 pr_info_ratelimited("%pV", &vaf);
147 va_end(args);
148 }
149 }
150
151 #define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153
binder_user_error(const char * format,...)154 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 {
156 struct va_format vaf;
157 va_list args;
158
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
161 vaf.va = &args;
162 vaf.fmt = format;
163 pr_info_ratelimited("%pV", &vaf);
164 va_end(args);
165 }
166
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
169 }
170
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172 do { \
173 (ee)->id = _id; \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
176 } while (0)
177
178 #define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
180
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183 #define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
185
186 #define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
188
189 static struct binder_stats binder_stats;
190
binder_stats_deleted(enum binder_stat_types type)191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193 atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195
binder_stats_created(enum binder_stat_types type)196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198 atomic_inc(&binder_stats.obj_created[type]);
199 }
200
201 struct binder_transaction_log_entry {
202 int debug_id;
203 int debug_id_done;
204 int call_type;
205 int from_proc;
206 int from_thread;
207 int target_handle;
208 int to_proc;
209 int to_thread;
210 int to_node;
211 int data_size;
212 int offsets_size;
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
217 };
218
219 struct binder_transaction_log {
220 atomic_t cur;
221 bool full;
222 struct binder_transaction_log_entry entry[32];
223 };
224
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
227
binder_transaction_log_add(struct binder_transaction_log * log)228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
230 {
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
233
234 if (cur >= ARRAY_SIZE(log->entry))
235 log->full = true;
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
238 /*
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
242 */
243 smp_wmb();
244 memset(e, 0, sizeof(*e));
245 return e;
246 }
247
248 enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
251 };
252
253 enum {
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
260 };
261
262 /**
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
265 *
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
268 */
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
_binder_proc_lock(struct binder_proc * proc,int line)271 _binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
273 {
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
277 }
278
279 /**
280 * binder_proc_unlock() - Release spinlock for given binder_proc
281 * @proc: struct binder_proc to acquire
282 *
283 * Release lock acquired via binder_proc_lock()
284 */
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
_binder_proc_unlock(struct binder_proc * proc,int line)287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
289 {
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
293 }
294
295 /**
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
298 *
299 * Acquires proc->inner_lock. Used to protect todo lists
300 */
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
305 {
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
309 }
310
311 /**
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
314 *
315 * Release lock acquired via binder_inner_proc_lock()
316 */
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
321 {
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
325 }
326
327 /**
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
330 *
331 * Acquires node->lock. Used to protect binder_node fields
332 */
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
_binder_node_lock(struct binder_node * node,int line)335 _binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
337 {
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
341 }
342
343 /**
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
346 *
347 * Release lock acquired via binder_node_lock()
348 */
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
_binder_node_unlock(struct binder_node * node,int line)351 _binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
353 {
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
357 }
358
359 /**
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
362 *
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
365 */
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
_binder_node_inner_lock(struct binder_node * node,int line)368 _binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 {
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
374 if (node->proc)
375 binder_inner_proc_lock(node->proc);
376 else
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
379 }
380
381 /**
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
384 *
385 * Release lock acquired via binder_node_lock()
386 */
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
_binder_node_inner_unlock(struct binder_node * node,int line)389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
391 {
392 struct binder_proc *proc = node->proc;
393
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
396 if (proc)
397 binder_inner_proc_unlock(proc);
398 else
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
402 }
403
binder_worklist_empty_ilocked(struct list_head * list)404 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 {
406 return list_empty(list);
407 }
408
409 /**
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
413 *
414 * Return: true if there are no items on list, else false
415 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)416 static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
418 {
419 bool ret;
420
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
424 return ret;
425 }
426
427 /**
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
431 *
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
434 *
435 * Requires the proc->inner_lock to be held.
436 */
437 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)438 binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
440 {
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
444 }
445
446 /**
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
450 *
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
454 *
455 * Requires the proc->inner_lock to be held.
456 */
457 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
460 {
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
463 }
464
465 /**
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
469 *
470 * Adds the work to the todo list of the thread, and enables processing
471 * of the todo queue.
472 *
473 * Requires the proc->inner_lock to be held.
474 */
475 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
478 {
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
481
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
486 */
487 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 thread->pid == current->pid && !thread->process_todo)
489 wake_up_interruptible_sync(&thread->wait);
490
491 thread->process_todo = true;
492 }
493
494 /**
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
498 *
499 * Adds the work to the todo list of the thread, and enables processing
500 * of the todo queue.
501 */
502 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)503 binder_enqueue_thread_work(struct binder_thread *thread,
504 struct binder_work *work)
505 {
506 binder_inner_proc_lock(thread->proc);
507 binder_enqueue_thread_work_ilocked(thread, work);
508 binder_inner_proc_unlock(thread->proc);
509 }
510
511 static void
binder_dequeue_work_ilocked(struct binder_work * work)512 binder_dequeue_work_ilocked(struct binder_work *work)
513 {
514 list_del_init(&work->entry);
515 }
516
517 /**
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
521 *
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
524 */
525 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527 {
528 binder_inner_proc_lock(proc);
529 binder_dequeue_work_ilocked(work);
530 binder_inner_proc_unlock(proc);
531 }
532
binder_dequeue_work_head_ilocked(struct list_head * list)533 static struct binder_work *binder_dequeue_work_head_ilocked(
534 struct list_head *list)
535 {
536 struct binder_work *w;
537
538 w = list_first_entry_or_null(list, struct binder_work, entry);
539 if (w)
540 list_del_init(&w->entry);
541 return w;
542 }
543
544 static void
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)550 static bool binder_has_work_ilocked(struct binder_thread *thread,
551 bool do_proc_work)
552 {
553 return thread->process_todo ||
554 thread->looper_need_return ||
555 (do_proc_work &&
556 !binder_worklist_empty_ilocked(&thread->proc->todo));
557 }
558
binder_has_work(struct binder_thread * thread,bool do_proc_work)559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560 {
561 bool has_work;
562
563 binder_inner_proc_lock(thread->proc);
564 has_work = binder_has_work_ilocked(thread, do_proc_work);
565 binder_inner_proc_unlock(thread->proc);
566
567 return has_work;
568 }
569
binder_available_for_proc_work_ilocked(struct binder_thread * thread)570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571 {
572 return !thread->transaction_stack &&
573 binder_worklist_empty_ilocked(&thread->todo);
574 }
575
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)576 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
577 bool sync)
578 {
579 struct rb_node *n;
580 struct binder_thread *thread;
581
582 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
583 thread = rb_entry(n, struct binder_thread, rb_node);
584 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
585 binder_available_for_proc_work_ilocked(thread)) {
586 if (sync)
587 wake_up_interruptible_sync(&thread->wait);
588 else
589 wake_up_interruptible(&thread->wait);
590 }
591 }
592 }
593
594 /**
595 * binder_select_thread_ilocked() - selects a thread for doing proc work.
596 * @proc: process to select a thread from
597 *
598 * Note that calling this function moves the thread off the waiting_threads
599 * list, so it can only be woken up by the caller of this function, or a
600 * signal. Therefore, callers *should* always wake up the thread this function
601 * returns.
602 *
603 * Return: If there's a thread currently waiting for process work,
604 * returns that thread. Otherwise returns NULL.
605 */
606 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)607 binder_select_thread_ilocked(struct binder_proc *proc)
608 {
609 struct binder_thread *thread;
610
611 assert_spin_locked(&proc->inner_lock);
612 thread = list_first_entry_or_null(&proc->waiting_threads,
613 struct binder_thread,
614 waiting_thread_node);
615
616 if (thread)
617 list_del_init(&thread->waiting_thread_node);
618
619 return thread;
620 }
621
622 /**
623 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624 * @proc: process to wake up a thread in
625 * @thread: specific thread to wake-up (may be NULL)
626 * @sync: whether to do a synchronous wake-up
627 *
628 * This function wakes up a thread in the @proc process.
629 * The caller may provide a specific thread to wake-up in
630 * the @thread parameter. If @thread is NULL, this function
631 * will wake up threads that have called poll().
632 *
633 * Note that for this function to work as expected, callers
634 * should first call binder_select_thread() to find a thread
635 * to handle the work (if they don't have a thread already),
636 * and pass the result into the @thread parameter.
637 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)638 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
639 struct binder_thread *thread,
640 bool sync)
641 {
642 assert_spin_locked(&proc->inner_lock);
643
644 if (thread) {
645 if (sync)
646 wake_up_interruptible_sync(&thread->wait);
647 else
648 wake_up_interruptible(&thread->wait);
649 return;
650 }
651
652 /* Didn't find a thread waiting for proc work; this can happen
653 * in two scenarios:
654 * 1. All threads are busy handling transactions
655 * In that case, one of those threads should call back into
656 * the kernel driver soon and pick up this work.
657 * 2. Threads are using the (e)poll interface, in which case
658 * they may be blocked on the waitqueue without having been
659 * added to waiting_threads. For this case, we just iterate
660 * over all threads not handling transaction work, and
661 * wake them all up. We wake all because we don't know whether
662 * a thread that called into (e)poll is handling non-binder
663 * work currently.
664 */
665 binder_wakeup_poll_threads_ilocked(proc, sync);
666 }
667
binder_wakeup_proc_ilocked(struct binder_proc * proc)668 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
669 {
670 struct binder_thread *thread = binder_select_thread_ilocked(proc);
671
672 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
673 }
674
binder_set_nice(long nice)675 static void binder_set_nice(long nice)
676 {
677 long min_nice;
678
679 if (can_nice(current, nice)) {
680 set_user_nice(current, nice);
681 return;
682 }
683 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
684 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
685 "%d: nice value %ld not allowed use %ld instead\n",
686 current->pid, nice, min_nice);
687 set_user_nice(current, min_nice);
688 if (min_nice <= MAX_NICE)
689 return;
690 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
691 }
692
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)693 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
694 binder_uintptr_t ptr)
695 {
696 struct rb_node *n = proc->nodes.rb_node;
697 struct binder_node *node;
698
699 assert_spin_locked(&proc->inner_lock);
700
701 while (n) {
702 node = rb_entry(n, struct binder_node, rb_node);
703
704 if (ptr < node->ptr)
705 n = n->rb_left;
706 else if (ptr > node->ptr)
707 n = n->rb_right;
708 else {
709 /*
710 * take an implicit weak reference
711 * to ensure node stays alive until
712 * call to binder_put_node()
713 */
714 binder_inc_node_tmpref_ilocked(node);
715 return node;
716 }
717 }
718 return NULL;
719 }
720
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)721 static struct binder_node *binder_get_node(struct binder_proc *proc,
722 binder_uintptr_t ptr)
723 {
724 struct binder_node *node;
725
726 binder_inner_proc_lock(proc);
727 node = binder_get_node_ilocked(proc, ptr);
728 binder_inner_proc_unlock(proc);
729 return node;
730 }
731
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)732 static struct binder_node *binder_init_node_ilocked(
733 struct binder_proc *proc,
734 struct binder_node *new_node,
735 struct flat_binder_object *fp)
736 {
737 struct rb_node **p = &proc->nodes.rb_node;
738 struct rb_node *parent = NULL;
739 struct binder_node *node;
740 binder_uintptr_t ptr = fp ? fp->binder : 0;
741 binder_uintptr_t cookie = fp ? fp->cookie : 0;
742 __u32 flags = fp ? fp->flags : 0;
743
744 assert_spin_locked(&proc->inner_lock);
745
746 while (*p) {
747
748 parent = *p;
749 node = rb_entry(parent, struct binder_node, rb_node);
750
751 if (ptr < node->ptr)
752 p = &(*p)->rb_left;
753 else if (ptr > node->ptr)
754 p = &(*p)->rb_right;
755 else {
756 /*
757 * A matching node is already in
758 * the rb tree. Abandon the init
759 * and return it.
760 */
761 binder_inc_node_tmpref_ilocked(node);
762 return node;
763 }
764 }
765 node = new_node;
766 binder_stats_created(BINDER_STAT_NODE);
767 node->tmp_refs++;
768 rb_link_node(&node->rb_node, parent, p);
769 rb_insert_color(&node->rb_node, &proc->nodes);
770 node->debug_id = atomic_inc_return(&binder_last_id);
771 node->proc = proc;
772 node->ptr = ptr;
773 node->cookie = cookie;
774 node->work.type = BINDER_WORK_NODE;
775 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
776 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
777 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
778 spin_lock_init(&node->lock);
779 INIT_LIST_HEAD(&node->work.entry);
780 INIT_LIST_HEAD(&node->async_todo);
781 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
782 "%d:%d node %d u%016llx c%016llx created\n",
783 proc->pid, current->pid, node->debug_id,
784 (u64)node->ptr, (u64)node->cookie);
785
786 return node;
787 }
788
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)789 static struct binder_node *binder_new_node(struct binder_proc *proc,
790 struct flat_binder_object *fp)
791 {
792 struct binder_node *node;
793 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
794
795 if (!new_node)
796 return NULL;
797 binder_inner_proc_lock(proc);
798 node = binder_init_node_ilocked(proc, new_node, fp);
799 binder_inner_proc_unlock(proc);
800 if (node != new_node)
801 /*
802 * The node was already added by another thread
803 */
804 kfree(new_node);
805
806 return node;
807 }
808
binder_free_node(struct binder_node * node)809 static void binder_free_node(struct binder_node *node)
810 {
811 kfree(node);
812 binder_stats_deleted(BINDER_STAT_NODE);
813 }
814
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)815 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
816 int internal,
817 struct list_head *target_list)
818 {
819 struct binder_proc *proc = node->proc;
820
821 assert_spin_locked(&node->lock);
822 if (proc)
823 assert_spin_locked(&proc->inner_lock);
824 if (strong) {
825 if (internal) {
826 if (target_list == NULL &&
827 node->internal_strong_refs == 0 &&
828 !(node->proc &&
829 node == node->proc->context->binder_context_mgr_node &&
830 node->has_strong_ref)) {
831 pr_err("invalid inc strong node for %d\n",
832 node->debug_id);
833 return -EINVAL;
834 }
835 node->internal_strong_refs++;
836 } else
837 node->local_strong_refs++;
838 if (!node->has_strong_ref && target_list) {
839 struct binder_thread *thread = container_of(target_list,
840 struct binder_thread, todo);
841 binder_dequeue_work_ilocked(&node->work);
842 BUG_ON(&thread->todo != target_list);
843 binder_enqueue_deferred_thread_work_ilocked(thread,
844 &node->work);
845 }
846 } else {
847 if (!internal)
848 node->local_weak_refs++;
849 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
850 if (target_list == NULL) {
851 pr_err("invalid inc weak node for %d\n",
852 node->debug_id);
853 return -EINVAL;
854 }
855 /*
856 * See comment above
857 */
858 binder_enqueue_work_ilocked(&node->work, target_list);
859 }
860 }
861 return 0;
862 }
863
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)864 static int binder_inc_node(struct binder_node *node, int strong, int internal,
865 struct list_head *target_list)
866 {
867 int ret;
868
869 binder_node_inner_lock(node);
870 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
871 binder_node_inner_unlock(node);
872
873 return ret;
874 }
875
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)876 static bool binder_dec_node_nilocked(struct binder_node *node,
877 int strong, int internal)
878 {
879 struct binder_proc *proc = node->proc;
880
881 assert_spin_locked(&node->lock);
882 if (proc)
883 assert_spin_locked(&proc->inner_lock);
884 if (strong) {
885 if (internal)
886 node->internal_strong_refs--;
887 else
888 node->local_strong_refs--;
889 if (node->local_strong_refs || node->internal_strong_refs)
890 return false;
891 } else {
892 if (!internal)
893 node->local_weak_refs--;
894 if (node->local_weak_refs || node->tmp_refs ||
895 !hlist_empty(&node->refs))
896 return false;
897 }
898
899 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
900 if (list_empty(&node->work.entry)) {
901 binder_enqueue_work_ilocked(&node->work, &proc->todo);
902 binder_wakeup_proc_ilocked(proc);
903 }
904 } else {
905 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
906 !node->local_weak_refs && !node->tmp_refs) {
907 if (proc) {
908 binder_dequeue_work_ilocked(&node->work);
909 rb_erase(&node->rb_node, &proc->nodes);
910 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911 "refless node %d deleted\n",
912 node->debug_id);
913 } else {
914 BUG_ON(!list_empty(&node->work.entry));
915 spin_lock(&binder_dead_nodes_lock);
916 /*
917 * tmp_refs could have changed so
918 * check it again
919 */
920 if (node->tmp_refs) {
921 spin_unlock(&binder_dead_nodes_lock);
922 return false;
923 }
924 hlist_del(&node->dead_node);
925 spin_unlock(&binder_dead_nodes_lock);
926 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
927 "dead node %d deleted\n",
928 node->debug_id);
929 }
930 return true;
931 }
932 }
933 return false;
934 }
935
binder_dec_node(struct binder_node * node,int strong,int internal)936 static void binder_dec_node(struct binder_node *node, int strong, int internal)
937 {
938 bool free_node;
939
940 binder_node_inner_lock(node);
941 free_node = binder_dec_node_nilocked(node, strong, internal);
942 binder_node_inner_unlock(node);
943 if (free_node)
944 binder_free_node(node);
945 }
946
binder_inc_node_tmpref_ilocked(struct binder_node * node)947 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
948 {
949 /*
950 * No call to binder_inc_node() is needed since we
951 * don't need to inform userspace of any changes to
952 * tmp_refs
953 */
954 node->tmp_refs++;
955 }
956
957 /**
958 * binder_inc_node_tmpref() - take a temporary reference on node
959 * @node: node to reference
960 *
961 * Take reference on node to prevent the node from being freed
962 * while referenced only by a local variable. The inner lock is
963 * needed to serialize with the node work on the queue (which
964 * isn't needed after the node is dead). If the node is dead
965 * (node->proc is NULL), use binder_dead_nodes_lock to protect
966 * node->tmp_refs against dead-node-only cases where the node
967 * lock cannot be acquired (eg traversing the dead node list to
968 * print nodes)
969 */
binder_inc_node_tmpref(struct binder_node * node)970 static void binder_inc_node_tmpref(struct binder_node *node)
971 {
972 binder_node_lock(node);
973 if (node->proc)
974 binder_inner_proc_lock(node->proc);
975 else
976 spin_lock(&binder_dead_nodes_lock);
977 binder_inc_node_tmpref_ilocked(node);
978 if (node->proc)
979 binder_inner_proc_unlock(node->proc);
980 else
981 spin_unlock(&binder_dead_nodes_lock);
982 binder_node_unlock(node);
983 }
984
985 /**
986 * binder_dec_node_tmpref() - remove a temporary reference on node
987 * @node: node to reference
988 *
989 * Release temporary reference on node taken via binder_inc_node_tmpref()
990 */
binder_dec_node_tmpref(struct binder_node * node)991 static void binder_dec_node_tmpref(struct binder_node *node)
992 {
993 bool free_node;
994
995 binder_node_inner_lock(node);
996 if (!node->proc)
997 spin_lock(&binder_dead_nodes_lock);
998 else
999 __acquire(&binder_dead_nodes_lock);
1000 node->tmp_refs--;
1001 BUG_ON(node->tmp_refs < 0);
1002 if (!node->proc)
1003 spin_unlock(&binder_dead_nodes_lock);
1004 else
1005 __release(&binder_dead_nodes_lock);
1006 /*
1007 * Call binder_dec_node() to check if all refcounts are 0
1008 * and cleanup is needed. Calling with strong=0 and internal=1
1009 * causes no actual reference to be released in binder_dec_node().
1010 * If that changes, a change is needed here too.
1011 */
1012 free_node = binder_dec_node_nilocked(node, 0, 1);
1013 binder_node_inner_unlock(node);
1014 if (free_node)
1015 binder_free_node(node);
1016 }
1017
binder_put_node(struct binder_node * node)1018 static void binder_put_node(struct binder_node *node)
1019 {
1020 binder_dec_node_tmpref(node);
1021 }
1022
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1023 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1024 u32 desc, bool need_strong_ref)
1025 {
1026 struct rb_node *n = proc->refs_by_desc.rb_node;
1027 struct binder_ref *ref;
1028
1029 while (n) {
1030 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1031
1032 if (desc < ref->data.desc) {
1033 n = n->rb_left;
1034 } else if (desc > ref->data.desc) {
1035 n = n->rb_right;
1036 } else if (need_strong_ref && !ref->data.strong) {
1037 binder_user_error("tried to use weak ref as strong ref\n");
1038 return NULL;
1039 } else {
1040 return ref;
1041 }
1042 }
1043 return NULL;
1044 }
1045
1046 /* Find the smallest unused descriptor the "slow way" */
slow_desc_lookup_olocked(struct binder_proc * proc,u32 offset)1047 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1048 {
1049 struct binder_ref *ref;
1050 struct rb_node *n;
1051 u32 desc;
1052
1053 desc = offset;
1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1056 if (ref->data.desc > desc)
1057 break;
1058 desc = ref->data.desc + 1;
1059 }
1060
1061 return desc;
1062 }
1063
1064 /*
1065 * Find an available reference descriptor ID. The proc->outer_lock might
1066 * be released in the process, in which case -EAGAIN is returned and the
1067 * @desc should be considered invalid.
1068 */
get_ref_desc_olocked(struct binder_proc * proc,struct binder_node * node,u32 * desc)1069 static int get_ref_desc_olocked(struct binder_proc *proc,
1070 struct binder_node *node,
1071 u32 *desc)
1072 {
1073 struct dbitmap *dmap = &proc->dmap;
1074 unsigned int nbits, offset;
1075 unsigned long *new, bit;
1076
1077 /* 0 is reserved for the context manager */
1078 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1079
1080 if (!dbitmap_enabled(dmap)) {
1081 *desc = slow_desc_lookup_olocked(proc, offset);
1082 return 0;
1083 }
1084
1085 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1086 *desc = bit;
1087 return 0;
1088 }
1089
1090 /*
1091 * The dbitmap is full and needs to grow. The proc->outer_lock
1092 * is briefly released to allocate the new bitmap safely.
1093 */
1094 nbits = dbitmap_grow_nbits(dmap);
1095 binder_proc_unlock(proc);
1096 new = bitmap_zalloc(nbits, GFP_KERNEL);
1097 binder_proc_lock(proc);
1098 dbitmap_grow(dmap, new, nbits);
1099
1100 return -EAGAIN;
1101 }
1102
1103 /**
1104 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1105 * @proc: binder_proc that owns the ref
1106 * @node: binder_node of target
1107 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1108 *
1109 * Look up the ref for the given node and return it if it exists
1110 *
1111 * If it doesn't exist and the caller provides a newly allocated
1112 * ref, initialize the fields of the newly allocated ref and insert
1113 * into the given proc rb_trees and node refs list.
1114 *
1115 * Return: the ref for node. It is possible that another thread
1116 * allocated/initialized the ref first in which case the
1117 * returned ref would be different than the passed-in
1118 * new_ref. new_ref must be kfree'd by the caller in
1119 * this case.
1120 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1121 static struct binder_ref *binder_get_ref_for_node_olocked(
1122 struct binder_proc *proc,
1123 struct binder_node *node,
1124 struct binder_ref *new_ref)
1125 {
1126 struct binder_ref *ref;
1127 struct rb_node *parent;
1128 struct rb_node **p;
1129 u32 desc;
1130
1131 retry:
1132 p = &proc->refs_by_node.rb_node;
1133 parent = NULL;
1134 while (*p) {
1135 parent = *p;
1136 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1137
1138 if (node < ref->node)
1139 p = &(*p)->rb_left;
1140 else if (node > ref->node)
1141 p = &(*p)->rb_right;
1142 else
1143 return ref;
1144 }
1145 if (!new_ref)
1146 return NULL;
1147
1148 /* might release the proc->outer_lock */
1149 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1150 goto retry;
1151
1152 binder_stats_created(BINDER_STAT_REF);
1153 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1154 new_ref->proc = proc;
1155 new_ref->node = node;
1156 rb_link_node(&new_ref->rb_node_node, parent, p);
1157 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1158
1159 new_ref->data.desc = desc;
1160 p = &proc->refs_by_desc.rb_node;
1161 while (*p) {
1162 parent = *p;
1163 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1164
1165 if (new_ref->data.desc < ref->data.desc)
1166 p = &(*p)->rb_left;
1167 else if (new_ref->data.desc > ref->data.desc)
1168 p = &(*p)->rb_right;
1169 else
1170 BUG();
1171 }
1172 rb_link_node(&new_ref->rb_node_desc, parent, p);
1173 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1174
1175 binder_node_lock(node);
1176 hlist_add_head(&new_ref->node_entry, &node->refs);
1177
1178 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1179 "%d new ref %d desc %d for node %d\n",
1180 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1181 node->debug_id);
1182 binder_node_unlock(node);
1183 return new_ref;
1184 }
1185
binder_cleanup_ref_olocked(struct binder_ref * ref)1186 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1187 {
1188 struct dbitmap *dmap = &ref->proc->dmap;
1189 bool delete_node = false;
1190
1191 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1192 "%d delete ref %d desc %d for node %d\n",
1193 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1194 ref->node->debug_id);
1195
1196 if (dbitmap_enabled(dmap))
1197 dbitmap_clear_bit(dmap, ref->data.desc);
1198 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1199 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1200
1201 binder_node_inner_lock(ref->node);
1202 if (ref->data.strong)
1203 binder_dec_node_nilocked(ref->node, 1, 1);
1204
1205 hlist_del(&ref->node_entry);
1206 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1207 binder_node_inner_unlock(ref->node);
1208 /*
1209 * Clear ref->node unless we want the caller to free the node
1210 */
1211 if (!delete_node) {
1212 /*
1213 * The caller uses ref->node to determine
1214 * whether the node needs to be freed. Clear
1215 * it since the node is still alive.
1216 */
1217 ref->node = NULL;
1218 }
1219
1220 if (ref->death) {
1221 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1222 "%d delete ref %d desc %d has death notification\n",
1223 ref->proc->pid, ref->data.debug_id,
1224 ref->data.desc);
1225 binder_dequeue_work(ref->proc, &ref->death->work);
1226 binder_stats_deleted(BINDER_STAT_DEATH);
1227 }
1228 binder_stats_deleted(BINDER_STAT_REF);
1229 }
1230
1231 /**
1232 * binder_inc_ref_olocked() - increment the ref for given handle
1233 * @ref: ref to be incremented
1234 * @strong: if true, strong increment, else weak
1235 * @target_list: list to queue node work on
1236 *
1237 * Increment the ref. @ref->proc->outer_lock must be held on entry
1238 *
1239 * Return: 0, if successful, else errno
1240 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1241 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1242 struct list_head *target_list)
1243 {
1244 int ret;
1245
1246 if (strong) {
1247 if (ref->data.strong == 0) {
1248 ret = binder_inc_node(ref->node, 1, 1, target_list);
1249 if (ret)
1250 return ret;
1251 }
1252 ref->data.strong++;
1253 } else {
1254 if (ref->data.weak == 0) {
1255 ret = binder_inc_node(ref->node, 0, 1, target_list);
1256 if (ret)
1257 return ret;
1258 }
1259 ref->data.weak++;
1260 }
1261 return 0;
1262 }
1263
1264 /**
1265 * binder_dec_ref_olocked() - dec the ref for given handle
1266 * @ref: ref to be decremented
1267 * @strong: if true, strong decrement, else weak
1268 *
1269 * Decrement the ref.
1270 *
1271 * Return: %true if ref is cleaned up and ready to be freed.
1272 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1273 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1274 {
1275 if (strong) {
1276 if (ref->data.strong == 0) {
1277 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1278 ref->proc->pid, ref->data.debug_id,
1279 ref->data.desc, ref->data.strong,
1280 ref->data.weak);
1281 return false;
1282 }
1283 ref->data.strong--;
1284 if (ref->data.strong == 0)
1285 binder_dec_node(ref->node, strong, 1);
1286 } else {
1287 if (ref->data.weak == 0) {
1288 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1289 ref->proc->pid, ref->data.debug_id,
1290 ref->data.desc, ref->data.strong,
1291 ref->data.weak);
1292 return false;
1293 }
1294 ref->data.weak--;
1295 }
1296 if (ref->data.strong == 0 && ref->data.weak == 0) {
1297 binder_cleanup_ref_olocked(ref);
1298 return true;
1299 }
1300 return false;
1301 }
1302
1303 /**
1304 * binder_get_node_from_ref() - get the node from the given proc/desc
1305 * @proc: proc containing the ref
1306 * @desc: the handle associated with the ref
1307 * @need_strong_ref: if true, only return node if ref is strong
1308 * @rdata: the id/refcount data for the ref
1309 *
1310 * Given a proc and ref handle, return the associated binder_node
1311 *
1312 * Return: a binder_node or NULL if not found or not strong when strong required
1313 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1314 static struct binder_node *binder_get_node_from_ref(
1315 struct binder_proc *proc,
1316 u32 desc, bool need_strong_ref,
1317 struct binder_ref_data *rdata)
1318 {
1319 struct binder_node *node;
1320 struct binder_ref *ref;
1321
1322 binder_proc_lock(proc);
1323 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1324 if (!ref)
1325 goto err_no_ref;
1326 node = ref->node;
1327 /*
1328 * Take an implicit reference on the node to ensure
1329 * it stays alive until the call to binder_put_node()
1330 */
1331 binder_inc_node_tmpref(node);
1332 if (rdata)
1333 *rdata = ref->data;
1334 binder_proc_unlock(proc);
1335
1336 return node;
1337
1338 err_no_ref:
1339 binder_proc_unlock(proc);
1340 return NULL;
1341 }
1342
1343 /**
1344 * binder_free_ref() - free the binder_ref
1345 * @ref: ref to free
1346 *
1347 * Free the binder_ref. Free the binder_node indicated by ref->node
1348 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1349 */
binder_free_ref(struct binder_ref * ref)1350 static void binder_free_ref(struct binder_ref *ref)
1351 {
1352 if (ref->node)
1353 binder_free_node(ref->node);
1354 kfree(ref->death);
1355 kfree(ref);
1356 }
1357
1358 /* shrink descriptor bitmap if needed */
try_shrink_dmap(struct binder_proc * proc)1359 static void try_shrink_dmap(struct binder_proc *proc)
1360 {
1361 unsigned long *new;
1362 int nbits;
1363
1364 binder_proc_lock(proc);
1365 nbits = dbitmap_shrink_nbits(&proc->dmap);
1366 binder_proc_unlock(proc);
1367
1368 if (!nbits)
1369 return;
1370
1371 new = bitmap_zalloc(nbits, GFP_KERNEL);
1372 binder_proc_lock(proc);
1373 dbitmap_shrink(&proc->dmap, new, nbits);
1374 binder_proc_unlock(proc);
1375 }
1376
1377 /**
1378 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1379 * @proc: proc containing the ref
1380 * @desc: the handle associated with the ref
1381 * @increment: true=inc reference, false=dec reference
1382 * @strong: true=strong reference, false=weak reference
1383 * @rdata: the id/refcount data for the ref
1384 *
1385 * Given a proc and ref handle, increment or decrement the ref
1386 * according to "increment" arg.
1387 *
1388 * Return: 0 if successful, else errno
1389 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1390 static int binder_update_ref_for_handle(struct binder_proc *proc,
1391 uint32_t desc, bool increment, bool strong,
1392 struct binder_ref_data *rdata)
1393 {
1394 int ret = 0;
1395 struct binder_ref *ref;
1396 bool delete_ref = false;
1397
1398 binder_proc_lock(proc);
1399 ref = binder_get_ref_olocked(proc, desc, strong);
1400 if (!ref) {
1401 ret = -EINVAL;
1402 goto err_no_ref;
1403 }
1404 if (increment)
1405 ret = binder_inc_ref_olocked(ref, strong, NULL);
1406 else
1407 delete_ref = binder_dec_ref_olocked(ref, strong);
1408
1409 if (rdata)
1410 *rdata = ref->data;
1411 binder_proc_unlock(proc);
1412
1413 if (delete_ref) {
1414 binder_free_ref(ref);
1415 try_shrink_dmap(proc);
1416 }
1417 return ret;
1418
1419 err_no_ref:
1420 binder_proc_unlock(proc);
1421 return ret;
1422 }
1423
1424 /**
1425 * binder_dec_ref_for_handle() - dec the ref for given handle
1426 * @proc: proc containing the ref
1427 * @desc: the handle associated with the ref
1428 * @strong: true=strong reference, false=weak reference
1429 * @rdata: the id/refcount data for the ref
1430 *
1431 * Just calls binder_update_ref_for_handle() to decrement the ref.
1432 *
1433 * Return: 0 if successful, else errno
1434 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1435 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1436 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1437 {
1438 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1439 }
1440
1441
1442 /**
1443 * binder_inc_ref_for_node() - increment the ref for given proc/node
1444 * @proc: proc containing the ref
1445 * @node: target node
1446 * @strong: true=strong reference, false=weak reference
1447 * @target_list: worklist to use if node is incremented
1448 * @rdata: the id/refcount data for the ref
1449 *
1450 * Given a proc and node, increment the ref. Create the ref if it
1451 * doesn't already exist
1452 *
1453 * Return: 0 if successful, else errno
1454 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1455 static int binder_inc_ref_for_node(struct binder_proc *proc,
1456 struct binder_node *node,
1457 bool strong,
1458 struct list_head *target_list,
1459 struct binder_ref_data *rdata)
1460 {
1461 struct binder_ref *ref;
1462 struct binder_ref *new_ref = NULL;
1463 int ret = 0;
1464
1465 binder_proc_lock(proc);
1466 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1467 if (!ref) {
1468 binder_proc_unlock(proc);
1469 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1470 if (!new_ref)
1471 return -ENOMEM;
1472 binder_proc_lock(proc);
1473 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1474 }
1475 ret = binder_inc_ref_olocked(ref, strong, target_list);
1476 *rdata = ref->data;
1477 if (ret && ref == new_ref) {
1478 /*
1479 * Cleanup the failed reference here as the target
1480 * could now be dead and have already released its
1481 * references by now. Calling on the new reference
1482 * with strong=0 and a tmp_refs will not decrement
1483 * the node. The new_ref gets kfree'd below.
1484 */
1485 binder_cleanup_ref_olocked(new_ref);
1486 ref = NULL;
1487 }
1488
1489 binder_proc_unlock(proc);
1490 if (new_ref && ref != new_ref)
1491 /*
1492 * Another thread created the ref first so
1493 * free the one we allocated
1494 */
1495 kfree(new_ref);
1496 return ret;
1497 }
1498
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1499 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1500 struct binder_transaction *t)
1501 {
1502 BUG_ON(!target_thread);
1503 assert_spin_locked(&target_thread->proc->inner_lock);
1504 BUG_ON(target_thread->transaction_stack != t);
1505 BUG_ON(target_thread->transaction_stack->from != target_thread);
1506 target_thread->transaction_stack =
1507 target_thread->transaction_stack->from_parent;
1508 t->from = NULL;
1509 }
1510
1511 /**
1512 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1513 * @thread: thread to decrement
1514 *
1515 * A thread needs to be kept alive while being used to create or
1516 * handle a transaction. binder_get_txn_from() is used to safely
1517 * extract t->from from a binder_transaction and keep the thread
1518 * indicated by t->from from being freed. When done with that
1519 * binder_thread, this function is called to decrement the
1520 * tmp_ref and free if appropriate (thread has been released
1521 * and no transaction being processed by the driver)
1522 */
binder_thread_dec_tmpref(struct binder_thread * thread)1523 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1524 {
1525 /*
1526 * atomic is used to protect the counter value while
1527 * it cannot reach zero or thread->is_dead is false
1528 */
1529 binder_inner_proc_lock(thread->proc);
1530 atomic_dec(&thread->tmp_ref);
1531 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1532 binder_inner_proc_unlock(thread->proc);
1533 binder_free_thread(thread);
1534 return;
1535 }
1536 binder_inner_proc_unlock(thread->proc);
1537 }
1538
1539 /**
1540 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1541 * @proc: proc to decrement
1542 *
1543 * A binder_proc needs to be kept alive while being used to create or
1544 * handle a transaction. proc->tmp_ref is incremented when
1545 * creating a new transaction or the binder_proc is currently in-use
1546 * by threads that are being released. When done with the binder_proc,
1547 * this function is called to decrement the counter and free the
1548 * proc if appropriate (proc has been released, all threads have
1549 * been released and not currenly in-use to process a transaction).
1550 */
binder_proc_dec_tmpref(struct binder_proc * proc)1551 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1552 {
1553 binder_inner_proc_lock(proc);
1554 proc->tmp_ref--;
1555 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1556 !proc->tmp_ref) {
1557 binder_inner_proc_unlock(proc);
1558 binder_free_proc(proc);
1559 return;
1560 }
1561 binder_inner_proc_unlock(proc);
1562 }
1563
1564 /**
1565 * binder_get_txn_from() - safely extract the "from" thread in transaction
1566 * @t: binder transaction for t->from
1567 *
1568 * Atomically return the "from" thread and increment the tmp_ref
1569 * count for the thread to ensure it stays alive until
1570 * binder_thread_dec_tmpref() is called.
1571 *
1572 * Return: the value of t->from
1573 */
binder_get_txn_from(struct binder_transaction * t)1574 static struct binder_thread *binder_get_txn_from(
1575 struct binder_transaction *t)
1576 {
1577 struct binder_thread *from;
1578
1579 spin_lock(&t->lock);
1580 from = t->from;
1581 if (from)
1582 atomic_inc(&from->tmp_ref);
1583 spin_unlock(&t->lock);
1584 return from;
1585 }
1586
1587 /**
1588 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1589 * @t: binder transaction for t->from
1590 *
1591 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1592 * to guarantee that the thread cannot be released while operating on it.
1593 * The caller must call binder_inner_proc_unlock() to release the inner lock
1594 * as well as call binder_dec_thread_txn() to release the reference.
1595 *
1596 * Return: the value of t->from
1597 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1598 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1599 struct binder_transaction *t)
1600 __acquires(&t->from->proc->inner_lock)
1601 {
1602 struct binder_thread *from;
1603
1604 from = binder_get_txn_from(t);
1605 if (!from) {
1606 __acquire(&from->proc->inner_lock);
1607 return NULL;
1608 }
1609 binder_inner_proc_lock(from->proc);
1610 if (t->from) {
1611 BUG_ON(from != t->from);
1612 return from;
1613 }
1614 binder_inner_proc_unlock(from->proc);
1615 __acquire(&from->proc->inner_lock);
1616 binder_thread_dec_tmpref(from);
1617 return NULL;
1618 }
1619
1620 /**
1621 * binder_free_txn_fixups() - free unprocessed fd fixups
1622 * @t: binder transaction for t->from
1623 *
1624 * If the transaction is being torn down prior to being
1625 * processed by the target process, free all of the
1626 * fd fixups and fput the file structs. It is safe to
1627 * call this function after the fixups have been
1628 * processed -- in that case, the list will be empty.
1629 */
binder_free_txn_fixups(struct binder_transaction * t)1630 static void binder_free_txn_fixups(struct binder_transaction *t)
1631 {
1632 struct binder_txn_fd_fixup *fixup, *tmp;
1633
1634 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1635 fput(fixup->file);
1636 if (fixup->target_fd >= 0)
1637 put_unused_fd(fixup->target_fd);
1638 list_del(&fixup->fixup_entry);
1639 kfree(fixup);
1640 }
1641 }
1642
binder_txn_latency_free(struct binder_transaction * t)1643 static void binder_txn_latency_free(struct binder_transaction *t)
1644 {
1645 int from_proc, from_thread, to_proc, to_thread;
1646
1647 spin_lock(&t->lock);
1648 from_proc = t->from ? t->from->proc->pid : 0;
1649 from_thread = t->from ? t->from->pid : 0;
1650 to_proc = t->to_proc ? t->to_proc->pid : 0;
1651 to_thread = t->to_thread ? t->to_thread->pid : 0;
1652 spin_unlock(&t->lock);
1653
1654 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1655 }
1656
binder_free_transaction(struct binder_transaction * t)1657 static void binder_free_transaction(struct binder_transaction *t)
1658 {
1659 struct binder_proc *target_proc = t->to_proc;
1660
1661 if (target_proc) {
1662 binder_inner_proc_lock(target_proc);
1663 target_proc->outstanding_txns--;
1664 if (target_proc->outstanding_txns < 0)
1665 pr_warn("%s: Unexpected outstanding_txns %d\n",
1666 __func__, target_proc->outstanding_txns);
1667 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1668 wake_up_interruptible_all(&target_proc->freeze_wait);
1669 if (t->buffer)
1670 t->buffer->transaction = NULL;
1671 binder_inner_proc_unlock(target_proc);
1672 }
1673 if (trace_binder_txn_latency_free_enabled())
1674 binder_txn_latency_free(t);
1675 /*
1676 * If the transaction has no target_proc, then
1677 * t->buffer->transaction has already been cleared.
1678 */
1679 binder_free_txn_fixups(t);
1680 kfree(t);
1681 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1682 }
1683
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1684 static void binder_send_failed_reply(struct binder_transaction *t,
1685 uint32_t error_code)
1686 {
1687 struct binder_thread *target_thread;
1688 struct binder_transaction *next;
1689
1690 BUG_ON(t->flags & TF_ONE_WAY);
1691 while (1) {
1692 target_thread = binder_get_txn_from_and_acq_inner(t);
1693 if (target_thread) {
1694 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1695 "send failed reply for transaction %d to %d:%d\n",
1696 t->debug_id,
1697 target_thread->proc->pid,
1698 target_thread->pid);
1699
1700 binder_pop_transaction_ilocked(target_thread, t);
1701 if (target_thread->reply_error.cmd == BR_OK) {
1702 target_thread->reply_error.cmd = error_code;
1703 binder_enqueue_thread_work_ilocked(
1704 target_thread,
1705 &target_thread->reply_error.work);
1706 wake_up_interruptible(&target_thread->wait);
1707 } else {
1708 /*
1709 * Cannot get here for normal operation, but
1710 * we can if multiple synchronous transactions
1711 * are sent without blocking for responses.
1712 * Just ignore the 2nd error in this case.
1713 */
1714 pr_warn("Unexpected reply error: %u\n",
1715 target_thread->reply_error.cmd);
1716 }
1717 binder_inner_proc_unlock(target_thread->proc);
1718 binder_thread_dec_tmpref(target_thread);
1719 binder_free_transaction(t);
1720 return;
1721 }
1722 __release(&target_thread->proc->inner_lock);
1723 next = t->from_parent;
1724
1725 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1726 "send failed reply for transaction %d, target dead\n",
1727 t->debug_id);
1728
1729 binder_free_transaction(t);
1730 if (next == NULL) {
1731 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1732 "reply failed, no target thread at root\n");
1733 return;
1734 }
1735 t = next;
1736 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1737 "reply failed, no target thread -- retry %d\n",
1738 t->debug_id);
1739 }
1740 }
1741
1742 /**
1743 * binder_cleanup_transaction() - cleans up undelivered transaction
1744 * @t: transaction that needs to be cleaned up
1745 * @reason: reason the transaction wasn't delivered
1746 * @error_code: error to return to caller (if synchronous call)
1747 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1748 static void binder_cleanup_transaction(struct binder_transaction *t,
1749 const char *reason,
1750 uint32_t error_code)
1751 {
1752 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1753 binder_send_failed_reply(t, error_code);
1754 } else {
1755 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1756 "undelivered transaction %d, %s\n",
1757 t->debug_id, reason);
1758 binder_free_transaction(t);
1759 }
1760 }
1761
1762 /**
1763 * binder_get_object() - gets object and checks for valid metadata
1764 * @proc: binder_proc owning the buffer
1765 * @u: sender's user pointer to base of buffer
1766 * @buffer: binder_buffer that we're parsing.
1767 * @offset: offset in the @buffer at which to validate an object.
1768 * @object: struct binder_object to read into
1769 *
1770 * Copy the binder object at the given offset into @object. If @u is
1771 * provided then the copy is from the sender's buffer. If not, then
1772 * it is copied from the target's @buffer.
1773 *
1774 * Return: If there's a valid metadata object at @offset, the
1775 * size of that object. Otherwise, it returns zero. The object
1776 * is read into the struct binder_object pointed to by @object.
1777 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1778 static size_t binder_get_object(struct binder_proc *proc,
1779 const void __user *u,
1780 struct binder_buffer *buffer,
1781 unsigned long offset,
1782 struct binder_object *object)
1783 {
1784 size_t read_size;
1785 struct binder_object_header *hdr;
1786 size_t object_size = 0;
1787
1788 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1789 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1790 !IS_ALIGNED(offset, sizeof(u32)))
1791 return 0;
1792
1793 if (u) {
1794 if (copy_from_user(object, u + offset, read_size))
1795 return 0;
1796 } else {
1797 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1798 offset, read_size))
1799 return 0;
1800 }
1801
1802 /* Ok, now see if we read a complete object. */
1803 hdr = &object->hdr;
1804 switch (hdr->type) {
1805 case BINDER_TYPE_BINDER:
1806 case BINDER_TYPE_WEAK_BINDER:
1807 case BINDER_TYPE_HANDLE:
1808 case BINDER_TYPE_WEAK_HANDLE:
1809 object_size = sizeof(struct flat_binder_object);
1810 break;
1811 case BINDER_TYPE_FD:
1812 object_size = sizeof(struct binder_fd_object);
1813 break;
1814 case BINDER_TYPE_PTR:
1815 object_size = sizeof(struct binder_buffer_object);
1816 break;
1817 case BINDER_TYPE_FDA:
1818 object_size = sizeof(struct binder_fd_array_object);
1819 break;
1820 default:
1821 return 0;
1822 }
1823 if (offset <= buffer->data_size - object_size &&
1824 buffer->data_size >= object_size)
1825 return object_size;
1826 else
1827 return 0;
1828 }
1829
1830 /**
1831 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1832 * @proc: binder_proc owning the buffer
1833 * @b: binder_buffer containing the object
1834 * @object: struct binder_object to read into
1835 * @index: index in offset array at which the binder_buffer_object is
1836 * located
1837 * @start_offset: points to the start of the offset array
1838 * @object_offsetp: offset of @object read from @b
1839 * @num_valid: the number of valid offsets in the offset array
1840 *
1841 * Return: If @index is within the valid range of the offset array
1842 * described by @start and @num_valid, and if there's a valid
1843 * binder_buffer_object at the offset found in index @index
1844 * of the offset array, that object is returned. Otherwise,
1845 * %NULL is returned.
1846 * Note that the offset found in index @index itself is not
1847 * verified; this function assumes that @num_valid elements
1848 * from @start were previously verified to have valid offsets.
1849 * If @object_offsetp is non-NULL, then the offset within
1850 * @b is written to it.
1851 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1852 static struct binder_buffer_object *binder_validate_ptr(
1853 struct binder_proc *proc,
1854 struct binder_buffer *b,
1855 struct binder_object *object,
1856 binder_size_t index,
1857 binder_size_t start_offset,
1858 binder_size_t *object_offsetp,
1859 binder_size_t num_valid)
1860 {
1861 size_t object_size;
1862 binder_size_t object_offset;
1863 unsigned long buffer_offset;
1864
1865 if (index >= num_valid)
1866 return NULL;
1867
1868 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1869 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1870 b, buffer_offset,
1871 sizeof(object_offset)))
1872 return NULL;
1873 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1874 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1875 return NULL;
1876 if (object_offsetp)
1877 *object_offsetp = object_offset;
1878
1879 return &object->bbo;
1880 }
1881
1882 /**
1883 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1884 * @proc: binder_proc owning the buffer
1885 * @b: transaction buffer
1886 * @objects_start_offset: offset to start of objects buffer
1887 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1888 * @fixup_offset: start offset in @buffer to fix up
1889 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1890 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1891 *
1892 * Return: %true if a fixup in buffer @buffer at offset @offset is
1893 * allowed.
1894 *
1895 * For safety reasons, we only allow fixups inside a buffer to happen
1896 * at increasing offsets; additionally, we only allow fixup on the last
1897 * buffer object that was verified, or one of its parents.
1898 *
1899 * Example of what is allowed:
1900 *
1901 * A
1902 * B (parent = A, offset = 0)
1903 * C (parent = A, offset = 16)
1904 * D (parent = C, offset = 0)
1905 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1906 *
1907 * Examples of what is not allowed:
1908 *
1909 * Decreasing offsets within the same parent:
1910 * A
1911 * C (parent = A, offset = 16)
1912 * B (parent = A, offset = 0) // decreasing offset within A
1913 *
1914 * Referring to a parent that wasn't the last object or any of its parents:
1915 * A
1916 * B (parent = A, offset = 0)
1917 * C (parent = A, offset = 0)
1918 * C (parent = A, offset = 16)
1919 * D (parent = B, offset = 0) // B is not A or any of A's parents
1920 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1921 static bool binder_validate_fixup(struct binder_proc *proc,
1922 struct binder_buffer *b,
1923 binder_size_t objects_start_offset,
1924 binder_size_t buffer_obj_offset,
1925 binder_size_t fixup_offset,
1926 binder_size_t last_obj_offset,
1927 binder_size_t last_min_offset)
1928 {
1929 if (!last_obj_offset) {
1930 /* Nothing to fix up in */
1931 return false;
1932 }
1933
1934 while (last_obj_offset != buffer_obj_offset) {
1935 unsigned long buffer_offset;
1936 struct binder_object last_object;
1937 struct binder_buffer_object *last_bbo;
1938 size_t object_size = binder_get_object(proc, NULL, b,
1939 last_obj_offset,
1940 &last_object);
1941 if (object_size != sizeof(*last_bbo))
1942 return false;
1943
1944 last_bbo = &last_object.bbo;
1945 /*
1946 * Safe to retrieve the parent of last_obj, since it
1947 * was already previously verified by the driver.
1948 */
1949 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1950 return false;
1951 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1952 buffer_offset = objects_start_offset +
1953 sizeof(binder_size_t) * last_bbo->parent;
1954 if (binder_alloc_copy_from_buffer(&proc->alloc,
1955 &last_obj_offset,
1956 b, buffer_offset,
1957 sizeof(last_obj_offset)))
1958 return false;
1959 }
1960 return (fixup_offset >= last_min_offset);
1961 }
1962
1963 /**
1964 * struct binder_task_work_cb - for deferred close
1965 *
1966 * @twork: callback_head for task work
1967 * @fd: fd to close
1968 *
1969 * Structure to pass task work to be handled after
1970 * returning from binder_ioctl() via task_work_add().
1971 */
1972 struct binder_task_work_cb {
1973 struct callback_head twork;
1974 struct file *file;
1975 };
1976
1977 /**
1978 * binder_do_fd_close() - close list of file descriptors
1979 * @twork: callback head for task work
1980 *
1981 * It is not safe to call ksys_close() during the binder_ioctl()
1982 * function if there is a chance that binder's own file descriptor
1983 * might be closed. This is to meet the requirements for using
1984 * fdget() (see comments for __fget_light()). Therefore use
1985 * task_work_add() to schedule the close operation once we have
1986 * returned from binder_ioctl(). This function is a callback
1987 * for that mechanism and does the actual ksys_close() on the
1988 * given file descriptor.
1989 */
binder_do_fd_close(struct callback_head * twork)1990 static void binder_do_fd_close(struct callback_head *twork)
1991 {
1992 struct binder_task_work_cb *twcb = container_of(twork,
1993 struct binder_task_work_cb, twork);
1994
1995 fput(twcb->file);
1996 kfree(twcb);
1997 }
1998
1999 /**
2000 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2001 * @fd: file-descriptor to close
2002 *
2003 * See comments in binder_do_fd_close(). This function is used to schedule
2004 * a file-descriptor to be closed after returning from binder_ioctl().
2005 */
binder_deferred_fd_close(int fd)2006 static void binder_deferred_fd_close(int fd)
2007 {
2008 struct binder_task_work_cb *twcb;
2009
2010 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2011 if (!twcb)
2012 return;
2013 init_task_work(&twcb->twork, binder_do_fd_close);
2014 twcb->file = file_close_fd(fd);
2015 if (twcb->file) {
2016 // pin it until binder_do_fd_close(); see comments there
2017 get_file(twcb->file);
2018 filp_close(twcb->file, current->files);
2019 task_work_add(current, &twcb->twork, TWA_RESUME);
2020 } else {
2021 kfree(twcb);
2022 }
2023 }
2024
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2025 static void binder_transaction_buffer_release(struct binder_proc *proc,
2026 struct binder_thread *thread,
2027 struct binder_buffer *buffer,
2028 binder_size_t off_end_offset,
2029 bool is_failure)
2030 {
2031 int debug_id = buffer->debug_id;
2032 binder_size_t off_start_offset, buffer_offset;
2033
2034 binder_debug(BINDER_DEBUG_TRANSACTION,
2035 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2036 proc->pid, buffer->debug_id,
2037 buffer->data_size, buffer->offsets_size,
2038 (unsigned long long)off_end_offset);
2039
2040 if (buffer->target_node)
2041 binder_dec_node(buffer->target_node, 1, 0);
2042
2043 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2044
2045 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2046 buffer_offset += sizeof(binder_size_t)) {
2047 struct binder_object_header *hdr;
2048 size_t object_size = 0;
2049 struct binder_object object;
2050 binder_size_t object_offset;
2051
2052 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2053 buffer, buffer_offset,
2054 sizeof(object_offset)))
2055 object_size = binder_get_object(proc, NULL, buffer,
2056 object_offset, &object);
2057 if (object_size == 0) {
2058 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2059 debug_id, (u64)object_offset, buffer->data_size);
2060 continue;
2061 }
2062 hdr = &object.hdr;
2063 switch (hdr->type) {
2064 case BINDER_TYPE_BINDER:
2065 case BINDER_TYPE_WEAK_BINDER: {
2066 struct flat_binder_object *fp;
2067 struct binder_node *node;
2068
2069 fp = to_flat_binder_object(hdr);
2070 node = binder_get_node(proc, fp->binder);
2071 if (node == NULL) {
2072 pr_err("transaction release %d bad node %016llx\n",
2073 debug_id, (u64)fp->binder);
2074 break;
2075 }
2076 binder_debug(BINDER_DEBUG_TRANSACTION,
2077 " node %d u%016llx\n",
2078 node->debug_id, (u64)node->ptr);
2079 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2080 0);
2081 binder_put_node(node);
2082 } break;
2083 case BINDER_TYPE_HANDLE:
2084 case BINDER_TYPE_WEAK_HANDLE: {
2085 struct flat_binder_object *fp;
2086 struct binder_ref_data rdata;
2087 int ret;
2088
2089 fp = to_flat_binder_object(hdr);
2090 ret = binder_dec_ref_for_handle(proc, fp->handle,
2091 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2092
2093 if (ret) {
2094 pr_err("transaction release %d bad handle %d, ret = %d\n",
2095 debug_id, fp->handle, ret);
2096 break;
2097 }
2098 binder_debug(BINDER_DEBUG_TRANSACTION,
2099 " ref %d desc %d\n",
2100 rdata.debug_id, rdata.desc);
2101 } break;
2102
2103 case BINDER_TYPE_FD: {
2104 /*
2105 * No need to close the file here since user-space
2106 * closes it for successfully delivered
2107 * transactions. For transactions that weren't
2108 * delivered, the new fd was never allocated so
2109 * there is no need to close and the fput on the
2110 * file is done when the transaction is torn
2111 * down.
2112 */
2113 } break;
2114 case BINDER_TYPE_PTR:
2115 /*
2116 * Nothing to do here, this will get cleaned up when the
2117 * transaction buffer gets freed
2118 */
2119 break;
2120 case BINDER_TYPE_FDA: {
2121 struct binder_fd_array_object *fda;
2122 struct binder_buffer_object *parent;
2123 struct binder_object ptr_object;
2124 binder_size_t fda_offset;
2125 size_t fd_index;
2126 binder_size_t fd_buf_size;
2127 binder_size_t num_valid;
2128
2129 if (is_failure) {
2130 /*
2131 * The fd fixups have not been applied so no
2132 * fds need to be closed.
2133 */
2134 continue;
2135 }
2136
2137 num_valid = (buffer_offset - off_start_offset) /
2138 sizeof(binder_size_t);
2139 fda = to_binder_fd_array_object(hdr);
2140 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2141 fda->parent,
2142 off_start_offset,
2143 NULL,
2144 num_valid);
2145 if (!parent) {
2146 pr_err("transaction release %d bad parent offset\n",
2147 debug_id);
2148 continue;
2149 }
2150 fd_buf_size = sizeof(u32) * fda->num_fds;
2151 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2152 pr_err("transaction release %d invalid number of fds (%lld)\n",
2153 debug_id, (u64)fda->num_fds);
2154 continue;
2155 }
2156 if (fd_buf_size > parent->length ||
2157 fda->parent_offset > parent->length - fd_buf_size) {
2158 /* No space for all file descriptors here. */
2159 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2160 debug_id, (u64)fda->num_fds);
2161 continue;
2162 }
2163 /*
2164 * the source data for binder_buffer_object is visible
2165 * to user-space and the @buffer element is the user
2166 * pointer to the buffer_object containing the fd_array.
2167 * Convert the address to an offset relative to
2168 * the base of the transaction buffer.
2169 */
2170 fda_offset = parent->buffer - buffer->user_data +
2171 fda->parent_offset;
2172 for (fd_index = 0; fd_index < fda->num_fds;
2173 fd_index++) {
2174 u32 fd;
2175 int err;
2176 binder_size_t offset = fda_offset +
2177 fd_index * sizeof(fd);
2178
2179 err = binder_alloc_copy_from_buffer(
2180 &proc->alloc, &fd, buffer,
2181 offset, sizeof(fd));
2182 WARN_ON(err);
2183 if (!err) {
2184 binder_deferred_fd_close(fd);
2185 /*
2186 * Need to make sure the thread goes
2187 * back to userspace to complete the
2188 * deferred close
2189 */
2190 if (thread)
2191 thread->looper_need_return = true;
2192 }
2193 }
2194 } break;
2195 default:
2196 pr_err("transaction release %d bad object type %x\n",
2197 debug_id, hdr->type);
2198 break;
2199 }
2200 }
2201 }
2202
2203 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2204 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2205 struct binder_thread *thread,
2206 struct binder_buffer *buffer,
2207 bool is_failure)
2208 {
2209 binder_size_t off_end_offset;
2210
2211 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2212 off_end_offset += buffer->offsets_size;
2213
2214 binder_transaction_buffer_release(proc, thread, buffer,
2215 off_end_offset, is_failure);
2216 }
2217
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2218 static int binder_translate_binder(struct flat_binder_object *fp,
2219 struct binder_transaction *t,
2220 struct binder_thread *thread)
2221 {
2222 struct binder_node *node;
2223 struct binder_proc *proc = thread->proc;
2224 struct binder_proc *target_proc = t->to_proc;
2225 struct binder_ref_data rdata;
2226 int ret = 0;
2227
2228 node = binder_get_node(proc, fp->binder);
2229 if (!node) {
2230 node = binder_new_node(proc, fp);
2231 if (!node)
2232 return -ENOMEM;
2233 }
2234 if (fp->cookie != node->cookie) {
2235 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2236 proc->pid, thread->pid, (u64)fp->binder,
2237 node->debug_id, (u64)fp->cookie,
2238 (u64)node->cookie);
2239 ret = -EINVAL;
2240 goto done;
2241 }
2242 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2243 ret = -EPERM;
2244 goto done;
2245 }
2246
2247 ret = binder_inc_ref_for_node(target_proc, node,
2248 fp->hdr.type == BINDER_TYPE_BINDER,
2249 &thread->todo, &rdata);
2250 if (ret)
2251 goto done;
2252
2253 if (fp->hdr.type == BINDER_TYPE_BINDER)
2254 fp->hdr.type = BINDER_TYPE_HANDLE;
2255 else
2256 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2257 fp->binder = 0;
2258 fp->handle = rdata.desc;
2259 fp->cookie = 0;
2260
2261 trace_binder_transaction_node_to_ref(t, node, &rdata);
2262 binder_debug(BINDER_DEBUG_TRANSACTION,
2263 " node %d u%016llx -> ref %d desc %d\n",
2264 node->debug_id, (u64)node->ptr,
2265 rdata.debug_id, rdata.desc);
2266 done:
2267 binder_put_node(node);
2268 return ret;
2269 }
2270
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2271 static int binder_translate_handle(struct flat_binder_object *fp,
2272 struct binder_transaction *t,
2273 struct binder_thread *thread)
2274 {
2275 struct binder_proc *proc = thread->proc;
2276 struct binder_proc *target_proc = t->to_proc;
2277 struct binder_node *node;
2278 struct binder_ref_data src_rdata;
2279 int ret = 0;
2280
2281 node = binder_get_node_from_ref(proc, fp->handle,
2282 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2283 if (!node) {
2284 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2285 proc->pid, thread->pid, fp->handle);
2286 return -EINVAL;
2287 }
2288 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2289 ret = -EPERM;
2290 goto done;
2291 }
2292
2293 binder_node_lock(node);
2294 if (node->proc == target_proc) {
2295 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2296 fp->hdr.type = BINDER_TYPE_BINDER;
2297 else
2298 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2299 fp->binder = node->ptr;
2300 fp->cookie = node->cookie;
2301 if (node->proc)
2302 binder_inner_proc_lock(node->proc);
2303 else
2304 __acquire(&node->proc->inner_lock);
2305 binder_inc_node_nilocked(node,
2306 fp->hdr.type == BINDER_TYPE_BINDER,
2307 0, NULL);
2308 if (node->proc)
2309 binder_inner_proc_unlock(node->proc);
2310 else
2311 __release(&node->proc->inner_lock);
2312 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2313 binder_debug(BINDER_DEBUG_TRANSACTION,
2314 " ref %d desc %d -> node %d u%016llx\n",
2315 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2316 (u64)node->ptr);
2317 binder_node_unlock(node);
2318 } else {
2319 struct binder_ref_data dest_rdata;
2320
2321 binder_node_unlock(node);
2322 ret = binder_inc_ref_for_node(target_proc, node,
2323 fp->hdr.type == BINDER_TYPE_HANDLE,
2324 NULL, &dest_rdata);
2325 if (ret)
2326 goto done;
2327
2328 fp->binder = 0;
2329 fp->handle = dest_rdata.desc;
2330 fp->cookie = 0;
2331 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2332 &dest_rdata);
2333 binder_debug(BINDER_DEBUG_TRANSACTION,
2334 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2335 src_rdata.debug_id, src_rdata.desc,
2336 dest_rdata.debug_id, dest_rdata.desc,
2337 node->debug_id);
2338 }
2339 done:
2340 binder_put_node(node);
2341 return ret;
2342 }
2343
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2344 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2345 struct binder_transaction *t,
2346 struct binder_thread *thread,
2347 struct binder_transaction *in_reply_to)
2348 {
2349 struct binder_proc *proc = thread->proc;
2350 struct binder_proc *target_proc = t->to_proc;
2351 struct binder_txn_fd_fixup *fixup;
2352 struct file *file;
2353 int ret = 0;
2354 bool target_allows_fd;
2355
2356 if (in_reply_to)
2357 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2358 else
2359 target_allows_fd = t->buffer->target_node->accept_fds;
2360 if (!target_allows_fd) {
2361 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2362 proc->pid, thread->pid,
2363 in_reply_to ? "reply" : "transaction",
2364 fd);
2365 ret = -EPERM;
2366 goto err_fd_not_accepted;
2367 }
2368
2369 file = fget(fd);
2370 if (!file) {
2371 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2372 proc->pid, thread->pid, fd);
2373 ret = -EBADF;
2374 goto err_fget;
2375 }
2376 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2377 if (ret < 0) {
2378 ret = -EPERM;
2379 goto err_security;
2380 }
2381
2382 /*
2383 * Add fixup record for this transaction. The allocation
2384 * of the fd in the target needs to be done from a
2385 * target thread.
2386 */
2387 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2388 if (!fixup) {
2389 ret = -ENOMEM;
2390 goto err_alloc;
2391 }
2392 fixup->file = file;
2393 fixup->offset = fd_offset;
2394 fixup->target_fd = -1;
2395 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2396 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2397
2398 return ret;
2399
2400 err_alloc:
2401 err_security:
2402 fput(file);
2403 err_fget:
2404 err_fd_not_accepted:
2405 return ret;
2406 }
2407
2408 /**
2409 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2410 * @offset offset in target buffer to fixup
2411 * @skip_size bytes to skip in copy (fixup will be written later)
2412 * @fixup_data data to write at fixup offset
2413 * @node list node
2414 *
2415 * This is used for the pointer fixup list (pf) which is created and consumed
2416 * during binder_transaction() and is only accessed locally. No
2417 * locking is necessary.
2418 *
2419 * The list is ordered by @offset.
2420 */
2421 struct binder_ptr_fixup {
2422 binder_size_t offset;
2423 size_t skip_size;
2424 binder_uintptr_t fixup_data;
2425 struct list_head node;
2426 };
2427
2428 /**
2429 * struct binder_sg_copy - scatter-gather data to be copied
2430 * @offset offset in target buffer
2431 * @sender_uaddr user address in source buffer
2432 * @length bytes to copy
2433 * @node list node
2434 *
2435 * This is used for the sg copy list (sgc) which is created and consumed
2436 * during binder_transaction() and is only accessed locally. No
2437 * locking is necessary.
2438 *
2439 * The list is ordered by @offset.
2440 */
2441 struct binder_sg_copy {
2442 binder_size_t offset;
2443 const void __user *sender_uaddr;
2444 size_t length;
2445 struct list_head node;
2446 };
2447
2448 /**
2449 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2450 * @alloc: binder_alloc associated with @buffer
2451 * @buffer: binder buffer in target process
2452 * @sgc_head: list_head of scatter-gather copy list
2453 * @pf_head: list_head of pointer fixup list
2454 *
2455 * Processes all elements of @sgc_head, applying fixups from @pf_head
2456 * and copying the scatter-gather data from the source process' user
2457 * buffer to the target's buffer. It is expected that the list creation
2458 * and processing all occurs during binder_transaction() so these lists
2459 * are only accessed in local context.
2460 *
2461 * Return: 0=success, else -errno
2462 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2463 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2464 struct binder_buffer *buffer,
2465 struct list_head *sgc_head,
2466 struct list_head *pf_head)
2467 {
2468 int ret = 0;
2469 struct binder_sg_copy *sgc, *tmpsgc;
2470 struct binder_ptr_fixup *tmppf;
2471 struct binder_ptr_fixup *pf =
2472 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2473 node);
2474
2475 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2476 size_t bytes_copied = 0;
2477
2478 while (bytes_copied < sgc->length) {
2479 size_t copy_size;
2480 size_t bytes_left = sgc->length - bytes_copied;
2481 size_t offset = sgc->offset + bytes_copied;
2482
2483 /*
2484 * We copy up to the fixup (pointed to by pf)
2485 */
2486 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2487 : bytes_left;
2488 if (!ret && copy_size)
2489 ret = binder_alloc_copy_user_to_buffer(
2490 alloc, buffer,
2491 offset,
2492 sgc->sender_uaddr + bytes_copied,
2493 copy_size);
2494 bytes_copied += copy_size;
2495 if (copy_size != bytes_left) {
2496 BUG_ON(!pf);
2497 /* we stopped at a fixup offset */
2498 if (pf->skip_size) {
2499 /*
2500 * we are just skipping. This is for
2501 * BINDER_TYPE_FDA where the translated
2502 * fds will be fixed up when we get
2503 * to target context.
2504 */
2505 bytes_copied += pf->skip_size;
2506 } else {
2507 /* apply the fixup indicated by pf */
2508 if (!ret)
2509 ret = binder_alloc_copy_to_buffer(
2510 alloc, buffer,
2511 pf->offset,
2512 &pf->fixup_data,
2513 sizeof(pf->fixup_data));
2514 bytes_copied += sizeof(pf->fixup_data);
2515 }
2516 list_del(&pf->node);
2517 kfree(pf);
2518 pf = list_first_entry_or_null(pf_head,
2519 struct binder_ptr_fixup, node);
2520 }
2521 }
2522 list_del(&sgc->node);
2523 kfree(sgc);
2524 }
2525 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2526 BUG_ON(pf->skip_size == 0);
2527 list_del(&pf->node);
2528 kfree(pf);
2529 }
2530 BUG_ON(!list_empty(sgc_head));
2531
2532 return ret > 0 ? -EINVAL : ret;
2533 }
2534
2535 /**
2536 * binder_cleanup_deferred_txn_lists() - free specified lists
2537 * @sgc_head: list_head of scatter-gather copy list
2538 * @pf_head: list_head of pointer fixup list
2539 *
2540 * Called to clean up @sgc_head and @pf_head if there is an
2541 * error.
2542 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2543 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2544 struct list_head *pf_head)
2545 {
2546 struct binder_sg_copy *sgc, *tmpsgc;
2547 struct binder_ptr_fixup *pf, *tmppf;
2548
2549 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2550 list_del(&sgc->node);
2551 kfree(sgc);
2552 }
2553 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2554 list_del(&pf->node);
2555 kfree(pf);
2556 }
2557 }
2558
2559 /**
2560 * binder_defer_copy() - queue a scatter-gather buffer for copy
2561 * @sgc_head: list_head of scatter-gather copy list
2562 * @offset: binder buffer offset in target process
2563 * @sender_uaddr: user address in source process
2564 * @length: bytes to copy
2565 *
2566 * Specify a scatter-gather block to be copied. The actual copy must
2567 * be deferred until all the needed fixups are identified and queued.
2568 * Then the copy and fixups are done together so un-translated values
2569 * from the source are never visible in the target buffer.
2570 *
2571 * We are guaranteed that repeated calls to this function will have
2572 * monotonically increasing @offset values so the list will naturally
2573 * be ordered.
2574 *
2575 * Return: 0=success, else -errno
2576 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2577 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2578 const void __user *sender_uaddr, size_t length)
2579 {
2580 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2581
2582 if (!bc)
2583 return -ENOMEM;
2584
2585 bc->offset = offset;
2586 bc->sender_uaddr = sender_uaddr;
2587 bc->length = length;
2588 INIT_LIST_HEAD(&bc->node);
2589
2590 /*
2591 * We are guaranteed that the deferred copies are in-order
2592 * so just add to the tail.
2593 */
2594 list_add_tail(&bc->node, sgc_head);
2595
2596 return 0;
2597 }
2598
2599 /**
2600 * binder_add_fixup() - queue a fixup to be applied to sg copy
2601 * @pf_head: list_head of binder ptr fixup list
2602 * @offset: binder buffer offset in target process
2603 * @fixup: bytes to be copied for fixup
2604 * @skip_size: bytes to skip when copying (fixup will be applied later)
2605 *
2606 * Add the specified fixup to a list ordered by @offset. When copying
2607 * the scatter-gather buffers, the fixup will be copied instead of
2608 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2609 * will be applied later (in target process context), so we just skip
2610 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2611 * value in @fixup.
2612 *
2613 * This function is called *mostly* in @offset order, but there are
2614 * exceptions. Since out-of-order inserts are relatively uncommon,
2615 * we insert the new element by searching backward from the tail of
2616 * the list.
2617 *
2618 * Return: 0=success, else -errno
2619 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2620 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2621 binder_uintptr_t fixup, size_t skip_size)
2622 {
2623 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2624 struct binder_ptr_fixup *tmppf;
2625
2626 if (!pf)
2627 return -ENOMEM;
2628
2629 pf->offset = offset;
2630 pf->fixup_data = fixup;
2631 pf->skip_size = skip_size;
2632 INIT_LIST_HEAD(&pf->node);
2633
2634 /* Fixups are *mostly* added in-order, but there are some
2635 * exceptions. Look backwards through list for insertion point.
2636 */
2637 list_for_each_entry_reverse(tmppf, pf_head, node) {
2638 if (tmppf->offset < pf->offset) {
2639 list_add(&pf->node, &tmppf->node);
2640 return 0;
2641 }
2642 }
2643 /*
2644 * if we get here, then the new offset is the lowest so
2645 * insert at the head
2646 */
2647 list_add(&pf->node, pf_head);
2648 return 0;
2649 }
2650
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2651 static int binder_translate_fd_array(struct list_head *pf_head,
2652 struct binder_fd_array_object *fda,
2653 const void __user *sender_ubuffer,
2654 struct binder_buffer_object *parent,
2655 struct binder_buffer_object *sender_uparent,
2656 struct binder_transaction *t,
2657 struct binder_thread *thread,
2658 struct binder_transaction *in_reply_to)
2659 {
2660 binder_size_t fdi, fd_buf_size;
2661 binder_size_t fda_offset;
2662 const void __user *sender_ufda_base;
2663 struct binder_proc *proc = thread->proc;
2664 int ret;
2665
2666 if (fda->num_fds == 0)
2667 return 0;
2668
2669 fd_buf_size = sizeof(u32) * fda->num_fds;
2670 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2671 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2672 proc->pid, thread->pid, (u64)fda->num_fds);
2673 return -EINVAL;
2674 }
2675 if (fd_buf_size > parent->length ||
2676 fda->parent_offset > parent->length - fd_buf_size) {
2677 /* No space for all file descriptors here. */
2678 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2679 proc->pid, thread->pid, (u64)fda->num_fds);
2680 return -EINVAL;
2681 }
2682 /*
2683 * the source data for binder_buffer_object is visible
2684 * to user-space and the @buffer element is the user
2685 * pointer to the buffer_object containing the fd_array.
2686 * Convert the address to an offset relative to
2687 * the base of the transaction buffer.
2688 */
2689 fda_offset = parent->buffer - t->buffer->user_data +
2690 fda->parent_offset;
2691 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2692 fda->parent_offset;
2693
2694 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2695 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2696 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2697 proc->pid, thread->pid);
2698 return -EINVAL;
2699 }
2700 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2701 if (ret)
2702 return ret;
2703
2704 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2705 u32 fd;
2706 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2707 binder_size_t sender_uoffset = fdi * sizeof(fd);
2708
2709 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2710 if (!ret)
2711 ret = binder_translate_fd(fd, offset, t, thread,
2712 in_reply_to);
2713 if (ret)
2714 return ret > 0 ? -EINVAL : ret;
2715 }
2716 return 0;
2717 }
2718
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2719 static int binder_fixup_parent(struct list_head *pf_head,
2720 struct binder_transaction *t,
2721 struct binder_thread *thread,
2722 struct binder_buffer_object *bp,
2723 binder_size_t off_start_offset,
2724 binder_size_t num_valid,
2725 binder_size_t last_fixup_obj_off,
2726 binder_size_t last_fixup_min_off)
2727 {
2728 struct binder_buffer_object *parent;
2729 struct binder_buffer *b = t->buffer;
2730 struct binder_proc *proc = thread->proc;
2731 struct binder_proc *target_proc = t->to_proc;
2732 struct binder_object object;
2733 binder_size_t buffer_offset;
2734 binder_size_t parent_offset;
2735
2736 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2737 return 0;
2738
2739 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2740 off_start_offset, &parent_offset,
2741 num_valid);
2742 if (!parent) {
2743 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2744 proc->pid, thread->pid);
2745 return -EINVAL;
2746 }
2747
2748 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2749 parent_offset, bp->parent_offset,
2750 last_fixup_obj_off,
2751 last_fixup_min_off)) {
2752 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2753 proc->pid, thread->pid);
2754 return -EINVAL;
2755 }
2756
2757 if (parent->length < sizeof(binder_uintptr_t) ||
2758 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2759 /* No space for a pointer here! */
2760 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2761 proc->pid, thread->pid);
2762 return -EINVAL;
2763 }
2764
2765 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2766
2767 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2768 }
2769
2770 /**
2771 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2772 * @t1: the pending async txn in the frozen process
2773 * @t2: the new async txn to supersede the outdated pending one
2774 *
2775 * Return: true if t2 can supersede t1
2776 * false if t2 can not supersede t1
2777 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2778 static bool binder_can_update_transaction(struct binder_transaction *t1,
2779 struct binder_transaction *t2)
2780 {
2781 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2782 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2783 return false;
2784 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2785 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2786 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2787 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2788 return true;
2789 return false;
2790 }
2791
2792 /**
2793 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2794 * @t: new async transaction
2795 * @target_list: list to find outdated transaction
2796 *
2797 * Return: the outdated transaction if found
2798 * NULL if no outdated transacton can be found
2799 *
2800 * Requires the proc->inner_lock to be held.
2801 */
2802 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2803 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2804 struct list_head *target_list)
2805 {
2806 struct binder_work *w;
2807
2808 list_for_each_entry(w, target_list, entry) {
2809 struct binder_transaction *t_queued;
2810
2811 if (w->type != BINDER_WORK_TRANSACTION)
2812 continue;
2813 t_queued = container_of(w, struct binder_transaction, work);
2814 if (binder_can_update_transaction(t_queued, t))
2815 return t_queued;
2816 }
2817 return NULL;
2818 }
2819
2820 /**
2821 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2822 * @t: transaction to send
2823 * @proc: process to send the transaction to
2824 * @thread: thread in @proc to send the transaction to (may be NULL)
2825 *
2826 * This function queues a transaction to the specified process. It will try
2827 * to find a thread in the target process to handle the transaction and
2828 * wake it up. If no thread is found, the work is queued to the proc
2829 * waitqueue.
2830 *
2831 * If the @thread parameter is not NULL, the transaction is always queued
2832 * to the waitlist of that specific thread.
2833 *
2834 * Return: 0 if the transaction was successfully queued
2835 * BR_DEAD_REPLY if the target process or thread is dead
2836 * BR_FROZEN_REPLY if the target process or thread is frozen and
2837 * the sync transaction was rejected
2838 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2839 * and the async transaction was successfully queued
2840 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2841 static int binder_proc_transaction(struct binder_transaction *t,
2842 struct binder_proc *proc,
2843 struct binder_thread *thread)
2844 {
2845 struct binder_node *node = t->buffer->target_node;
2846 bool oneway = !!(t->flags & TF_ONE_WAY);
2847 bool pending_async = false;
2848 struct binder_transaction *t_outdated = NULL;
2849 bool frozen = false;
2850
2851 BUG_ON(!node);
2852 binder_node_lock(node);
2853 if (oneway) {
2854 BUG_ON(thread);
2855 if (node->has_async_transaction)
2856 pending_async = true;
2857 else
2858 node->has_async_transaction = true;
2859 }
2860
2861 binder_inner_proc_lock(proc);
2862 if (proc->is_frozen) {
2863 frozen = true;
2864 proc->sync_recv |= !oneway;
2865 proc->async_recv |= oneway;
2866 }
2867
2868 if ((frozen && !oneway) || proc->is_dead ||
2869 (thread && thread->is_dead)) {
2870 binder_inner_proc_unlock(proc);
2871 binder_node_unlock(node);
2872 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2873 }
2874
2875 if (!thread && !pending_async)
2876 thread = binder_select_thread_ilocked(proc);
2877
2878 if (thread) {
2879 binder_enqueue_thread_work_ilocked(thread, &t->work);
2880 } else if (!pending_async) {
2881 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2882 } else {
2883 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2884 t_outdated = binder_find_outdated_transaction_ilocked(t,
2885 &node->async_todo);
2886 if (t_outdated) {
2887 binder_debug(BINDER_DEBUG_TRANSACTION,
2888 "txn %d supersedes %d\n",
2889 t->debug_id, t_outdated->debug_id);
2890 list_del_init(&t_outdated->work.entry);
2891 proc->outstanding_txns--;
2892 }
2893 }
2894 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2895 }
2896
2897 if (!pending_async)
2898 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2899
2900 proc->outstanding_txns++;
2901 binder_inner_proc_unlock(proc);
2902 binder_node_unlock(node);
2903
2904 /*
2905 * To reduce potential contention, free the outdated transaction and
2906 * buffer after releasing the locks.
2907 */
2908 if (t_outdated) {
2909 struct binder_buffer *buffer = t_outdated->buffer;
2910
2911 t_outdated->buffer = NULL;
2912 buffer->transaction = NULL;
2913 trace_binder_transaction_update_buffer_release(buffer);
2914 binder_release_entire_buffer(proc, NULL, buffer, false);
2915 binder_alloc_free_buf(&proc->alloc, buffer);
2916 kfree(t_outdated);
2917 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2918 }
2919
2920 if (oneway && frozen)
2921 return BR_TRANSACTION_PENDING_FROZEN;
2922
2923 return 0;
2924 }
2925
2926 /**
2927 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2928 * @node: struct binder_node for which to get refs
2929 * @procp: returns @node->proc if valid
2930 * @error: if no @procp then returns BR_DEAD_REPLY
2931 *
2932 * User-space normally keeps the node alive when creating a transaction
2933 * since it has a reference to the target. The local strong ref keeps it
2934 * alive if the sending process dies before the target process processes
2935 * the transaction. If the source process is malicious or has a reference
2936 * counting bug, relying on the local strong ref can fail.
2937 *
2938 * Since user-space can cause the local strong ref to go away, we also take
2939 * a tmpref on the node to ensure it survives while we are constructing
2940 * the transaction. We also need a tmpref on the proc while we are
2941 * constructing the transaction, so we take that here as well.
2942 *
2943 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2944 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2945 * target proc has died, @error is set to BR_DEAD_REPLY.
2946 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2947 static struct binder_node *binder_get_node_refs_for_txn(
2948 struct binder_node *node,
2949 struct binder_proc **procp,
2950 uint32_t *error)
2951 {
2952 struct binder_node *target_node = NULL;
2953
2954 binder_node_inner_lock(node);
2955 if (node->proc) {
2956 target_node = node;
2957 binder_inc_node_nilocked(node, 1, 0, NULL);
2958 binder_inc_node_tmpref_ilocked(node);
2959 node->proc->tmp_ref++;
2960 *procp = node->proc;
2961 } else
2962 *error = BR_DEAD_REPLY;
2963 binder_node_inner_unlock(node);
2964
2965 return target_node;
2966 }
2967
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2968 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2969 uint32_t command, int32_t param)
2970 {
2971 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2972
2973 if (!from) {
2974 /* annotation for sparse */
2975 __release(&from->proc->inner_lock);
2976 return;
2977 }
2978
2979 /* don't override existing errors */
2980 if (from->ee.command == BR_OK)
2981 binder_set_extended_error(&from->ee, id, command, param);
2982 binder_inner_proc_unlock(from->proc);
2983 binder_thread_dec_tmpref(from);
2984 }
2985
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2986 static void binder_transaction(struct binder_proc *proc,
2987 struct binder_thread *thread,
2988 struct binder_transaction_data *tr, int reply,
2989 binder_size_t extra_buffers_size)
2990 {
2991 int ret;
2992 struct binder_transaction *t;
2993 struct binder_work *w;
2994 struct binder_work *tcomplete;
2995 binder_size_t buffer_offset = 0;
2996 binder_size_t off_start_offset, off_end_offset;
2997 binder_size_t off_min;
2998 binder_size_t sg_buf_offset, sg_buf_end_offset;
2999 binder_size_t user_offset = 0;
3000 struct binder_proc *target_proc = NULL;
3001 struct binder_thread *target_thread = NULL;
3002 struct binder_node *target_node = NULL;
3003 struct binder_transaction *in_reply_to = NULL;
3004 struct binder_transaction_log_entry *e;
3005 uint32_t return_error = 0;
3006 uint32_t return_error_param = 0;
3007 uint32_t return_error_line = 0;
3008 binder_size_t last_fixup_obj_off = 0;
3009 binder_size_t last_fixup_min_off = 0;
3010 struct binder_context *context = proc->context;
3011 int t_debug_id = atomic_inc_return(&binder_last_id);
3012 ktime_t t_start_time = ktime_get();
3013 char *secctx = NULL;
3014 u32 secctx_sz = 0;
3015 struct list_head sgc_head;
3016 struct list_head pf_head;
3017 const void __user *user_buffer = (const void __user *)
3018 (uintptr_t)tr->data.ptr.buffer;
3019 INIT_LIST_HEAD(&sgc_head);
3020 INIT_LIST_HEAD(&pf_head);
3021
3022 e = binder_transaction_log_add(&binder_transaction_log);
3023 e->debug_id = t_debug_id;
3024 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3025 e->from_proc = proc->pid;
3026 e->from_thread = thread->pid;
3027 e->target_handle = tr->target.handle;
3028 e->data_size = tr->data_size;
3029 e->offsets_size = tr->offsets_size;
3030 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3031
3032 binder_inner_proc_lock(proc);
3033 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3034 binder_inner_proc_unlock(proc);
3035
3036 if (reply) {
3037 binder_inner_proc_lock(proc);
3038 in_reply_to = thread->transaction_stack;
3039 if (in_reply_to == NULL) {
3040 binder_inner_proc_unlock(proc);
3041 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3042 proc->pid, thread->pid);
3043 return_error = BR_FAILED_REPLY;
3044 return_error_param = -EPROTO;
3045 return_error_line = __LINE__;
3046 goto err_empty_call_stack;
3047 }
3048 if (in_reply_to->to_thread != thread) {
3049 spin_lock(&in_reply_to->lock);
3050 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3051 proc->pid, thread->pid, in_reply_to->debug_id,
3052 in_reply_to->to_proc ?
3053 in_reply_to->to_proc->pid : 0,
3054 in_reply_to->to_thread ?
3055 in_reply_to->to_thread->pid : 0);
3056 spin_unlock(&in_reply_to->lock);
3057 binder_inner_proc_unlock(proc);
3058 return_error = BR_FAILED_REPLY;
3059 return_error_param = -EPROTO;
3060 return_error_line = __LINE__;
3061 in_reply_to = NULL;
3062 goto err_bad_call_stack;
3063 }
3064 thread->transaction_stack = in_reply_to->to_parent;
3065 binder_inner_proc_unlock(proc);
3066 binder_set_nice(in_reply_to->saved_priority);
3067 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3068 if (target_thread == NULL) {
3069 /* annotation for sparse */
3070 __release(&target_thread->proc->inner_lock);
3071 binder_txn_error("%d:%d reply target not found\n",
3072 thread->pid, proc->pid);
3073 return_error = BR_DEAD_REPLY;
3074 return_error_line = __LINE__;
3075 goto err_dead_binder;
3076 }
3077 if (target_thread->transaction_stack != in_reply_to) {
3078 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3079 proc->pid, thread->pid,
3080 target_thread->transaction_stack ?
3081 target_thread->transaction_stack->debug_id : 0,
3082 in_reply_to->debug_id);
3083 binder_inner_proc_unlock(target_thread->proc);
3084 return_error = BR_FAILED_REPLY;
3085 return_error_param = -EPROTO;
3086 return_error_line = __LINE__;
3087 in_reply_to = NULL;
3088 target_thread = NULL;
3089 goto err_dead_binder;
3090 }
3091 target_proc = target_thread->proc;
3092 target_proc->tmp_ref++;
3093 binder_inner_proc_unlock(target_thread->proc);
3094 } else {
3095 if (tr->target.handle) {
3096 struct binder_ref *ref;
3097
3098 /*
3099 * There must already be a strong ref
3100 * on this node. If so, do a strong
3101 * increment on the node to ensure it
3102 * stays alive until the transaction is
3103 * done.
3104 */
3105 binder_proc_lock(proc);
3106 ref = binder_get_ref_olocked(proc, tr->target.handle,
3107 true);
3108 if (ref) {
3109 target_node = binder_get_node_refs_for_txn(
3110 ref->node, &target_proc,
3111 &return_error);
3112 } else {
3113 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3114 proc->pid, thread->pid, tr->target.handle);
3115 return_error = BR_FAILED_REPLY;
3116 }
3117 binder_proc_unlock(proc);
3118 } else {
3119 mutex_lock(&context->context_mgr_node_lock);
3120 target_node = context->binder_context_mgr_node;
3121 if (target_node)
3122 target_node = binder_get_node_refs_for_txn(
3123 target_node, &target_proc,
3124 &return_error);
3125 else
3126 return_error = BR_DEAD_REPLY;
3127 mutex_unlock(&context->context_mgr_node_lock);
3128 if (target_node && target_proc->pid == proc->pid) {
3129 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3130 proc->pid, thread->pid);
3131 return_error = BR_FAILED_REPLY;
3132 return_error_param = -EINVAL;
3133 return_error_line = __LINE__;
3134 goto err_invalid_target_handle;
3135 }
3136 }
3137 if (!target_node) {
3138 binder_txn_error("%d:%d cannot find target node\n",
3139 thread->pid, proc->pid);
3140 /*
3141 * return_error is set above
3142 */
3143 return_error_param = -EINVAL;
3144 return_error_line = __LINE__;
3145 goto err_dead_binder;
3146 }
3147 e->to_node = target_node->debug_id;
3148 if (WARN_ON(proc == target_proc)) {
3149 binder_txn_error("%d:%d self transactions not allowed\n",
3150 thread->pid, proc->pid);
3151 return_error = BR_FAILED_REPLY;
3152 return_error_param = -EINVAL;
3153 return_error_line = __LINE__;
3154 goto err_invalid_target_handle;
3155 }
3156 if (security_binder_transaction(proc->cred,
3157 target_proc->cred) < 0) {
3158 binder_txn_error("%d:%d transaction credentials failed\n",
3159 thread->pid, proc->pid);
3160 return_error = BR_FAILED_REPLY;
3161 return_error_param = -EPERM;
3162 return_error_line = __LINE__;
3163 goto err_invalid_target_handle;
3164 }
3165 binder_inner_proc_lock(proc);
3166
3167 w = list_first_entry_or_null(&thread->todo,
3168 struct binder_work, entry);
3169 if (!(tr->flags & TF_ONE_WAY) && w &&
3170 w->type == BINDER_WORK_TRANSACTION) {
3171 /*
3172 * Do not allow new outgoing transaction from a
3173 * thread that has a transaction at the head of
3174 * its todo list. Only need to check the head
3175 * because binder_select_thread_ilocked picks a
3176 * thread from proc->waiting_threads to enqueue
3177 * the transaction, and nothing is queued to the
3178 * todo list while the thread is on waiting_threads.
3179 */
3180 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3181 proc->pid, thread->pid);
3182 binder_inner_proc_unlock(proc);
3183 return_error = BR_FAILED_REPLY;
3184 return_error_param = -EPROTO;
3185 return_error_line = __LINE__;
3186 goto err_bad_todo_list;
3187 }
3188
3189 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3190 struct binder_transaction *tmp;
3191
3192 tmp = thread->transaction_stack;
3193 if (tmp->to_thread != thread) {
3194 spin_lock(&tmp->lock);
3195 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3196 proc->pid, thread->pid, tmp->debug_id,
3197 tmp->to_proc ? tmp->to_proc->pid : 0,
3198 tmp->to_thread ?
3199 tmp->to_thread->pid : 0);
3200 spin_unlock(&tmp->lock);
3201 binder_inner_proc_unlock(proc);
3202 return_error = BR_FAILED_REPLY;
3203 return_error_param = -EPROTO;
3204 return_error_line = __LINE__;
3205 goto err_bad_call_stack;
3206 }
3207 while (tmp) {
3208 struct binder_thread *from;
3209
3210 spin_lock(&tmp->lock);
3211 from = tmp->from;
3212 if (from && from->proc == target_proc) {
3213 atomic_inc(&from->tmp_ref);
3214 target_thread = from;
3215 spin_unlock(&tmp->lock);
3216 break;
3217 }
3218 spin_unlock(&tmp->lock);
3219 tmp = tmp->from_parent;
3220 }
3221 }
3222 binder_inner_proc_unlock(proc);
3223 }
3224 if (target_thread)
3225 e->to_thread = target_thread->pid;
3226 e->to_proc = target_proc->pid;
3227
3228 /* TODO: reuse incoming transaction for reply */
3229 t = kzalloc(sizeof(*t), GFP_KERNEL);
3230 if (t == NULL) {
3231 binder_txn_error("%d:%d cannot allocate transaction\n",
3232 thread->pid, proc->pid);
3233 return_error = BR_FAILED_REPLY;
3234 return_error_param = -ENOMEM;
3235 return_error_line = __LINE__;
3236 goto err_alloc_t_failed;
3237 }
3238 INIT_LIST_HEAD(&t->fd_fixups);
3239 binder_stats_created(BINDER_STAT_TRANSACTION);
3240 spin_lock_init(&t->lock);
3241
3242 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3243 if (tcomplete == NULL) {
3244 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3245 thread->pid, proc->pid);
3246 return_error = BR_FAILED_REPLY;
3247 return_error_param = -ENOMEM;
3248 return_error_line = __LINE__;
3249 goto err_alloc_tcomplete_failed;
3250 }
3251 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3252
3253 t->debug_id = t_debug_id;
3254 t->start_time = t_start_time;
3255
3256 if (reply)
3257 binder_debug(BINDER_DEBUG_TRANSACTION,
3258 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3259 proc->pid, thread->pid, t->debug_id,
3260 target_proc->pid, target_thread->pid,
3261 (u64)tr->data.ptr.buffer,
3262 (u64)tr->data.ptr.offsets,
3263 (u64)tr->data_size, (u64)tr->offsets_size,
3264 (u64)extra_buffers_size);
3265 else
3266 binder_debug(BINDER_DEBUG_TRANSACTION,
3267 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3268 proc->pid, thread->pid, t->debug_id,
3269 target_proc->pid, target_node->debug_id,
3270 (u64)tr->data.ptr.buffer,
3271 (u64)tr->data.ptr.offsets,
3272 (u64)tr->data_size, (u64)tr->offsets_size,
3273 (u64)extra_buffers_size);
3274
3275 if (!reply && !(tr->flags & TF_ONE_WAY))
3276 t->from = thread;
3277 else
3278 t->from = NULL;
3279 t->from_pid = proc->pid;
3280 t->from_tid = thread->pid;
3281 t->sender_euid = task_euid(proc->tsk);
3282 t->to_proc = target_proc;
3283 t->to_thread = target_thread;
3284 t->code = tr->code;
3285 t->flags = tr->flags;
3286 t->priority = task_nice(current);
3287
3288 if (target_node && target_node->txn_security_ctx) {
3289 u32 secid;
3290 size_t added_size;
3291
3292 security_cred_getsecid(proc->cred, &secid);
3293 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3294 if (ret) {
3295 binder_txn_error("%d:%d failed to get security context\n",
3296 thread->pid, proc->pid);
3297 return_error = BR_FAILED_REPLY;
3298 return_error_param = ret;
3299 return_error_line = __LINE__;
3300 goto err_get_secctx_failed;
3301 }
3302 added_size = ALIGN(secctx_sz, sizeof(u64));
3303 extra_buffers_size += added_size;
3304 if (extra_buffers_size < added_size) {
3305 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3306 thread->pid, proc->pid);
3307 return_error = BR_FAILED_REPLY;
3308 return_error_param = -EINVAL;
3309 return_error_line = __LINE__;
3310 goto err_bad_extra_size;
3311 }
3312 }
3313
3314 trace_binder_transaction(reply, t, target_node);
3315
3316 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3317 tr->offsets_size, extra_buffers_size,
3318 !reply && (t->flags & TF_ONE_WAY));
3319 if (IS_ERR(t->buffer)) {
3320 char *s;
3321
3322 ret = PTR_ERR(t->buffer);
3323 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3324 : (ret == -ENOSPC) ? ": no space left"
3325 : (ret == -ENOMEM) ? ": memory allocation failed"
3326 : "";
3327 binder_txn_error("cannot allocate buffer%s", s);
3328
3329 return_error_param = PTR_ERR(t->buffer);
3330 return_error = return_error_param == -ESRCH ?
3331 BR_DEAD_REPLY : BR_FAILED_REPLY;
3332 return_error_line = __LINE__;
3333 t->buffer = NULL;
3334 goto err_binder_alloc_buf_failed;
3335 }
3336 if (secctx) {
3337 int err;
3338 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3339 ALIGN(tr->offsets_size, sizeof(void *)) +
3340 ALIGN(extra_buffers_size, sizeof(void *)) -
3341 ALIGN(secctx_sz, sizeof(u64));
3342
3343 t->security_ctx = t->buffer->user_data + buf_offset;
3344 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3345 t->buffer, buf_offset,
3346 secctx, secctx_sz);
3347 if (err) {
3348 t->security_ctx = 0;
3349 WARN_ON(1);
3350 }
3351 security_release_secctx(secctx, secctx_sz);
3352 secctx = NULL;
3353 }
3354 t->buffer->debug_id = t->debug_id;
3355 t->buffer->transaction = t;
3356 t->buffer->target_node = target_node;
3357 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3358 trace_binder_transaction_alloc_buf(t->buffer);
3359
3360 if (binder_alloc_copy_user_to_buffer(
3361 &target_proc->alloc,
3362 t->buffer,
3363 ALIGN(tr->data_size, sizeof(void *)),
3364 (const void __user *)
3365 (uintptr_t)tr->data.ptr.offsets,
3366 tr->offsets_size)) {
3367 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3368 proc->pid, thread->pid);
3369 return_error = BR_FAILED_REPLY;
3370 return_error_param = -EFAULT;
3371 return_error_line = __LINE__;
3372 goto err_copy_data_failed;
3373 }
3374 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3375 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3376 proc->pid, thread->pid, (u64)tr->offsets_size);
3377 return_error = BR_FAILED_REPLY;
3378 return_error_param = -EINVAL;
3379 return_error_line = __LINE__;
3380 goto err_bad_offset;
3381 }
3382 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3383 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3384 proc->pid, thread->pid,
3385 (u64)extra_buffers_size);
3386 return_error = BR_FAILED_REPLY;
3387 return_error_param = -EINVAL;
3388 return_error_line = __LINE__;
3389 goto err_bad_offset;
3390 }
3391 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3392 buffer_offset = off_start_offset;
3393 off_end_offset = off_start_offset + tr->offsets_size;
3394 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3395 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3396 ALIGN(secctx_sz, sizeof(u64));
3397 off_min = 0;
3398 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3399 buffer_offset += sizeof(binder_size_t)) {
3400 struct binder_object_header *hdr;
3401 size_t object_size;
3402 struct binder_object object;
3403 binder_size_t object_offset;
3404 binder_size_t copy_size;
3405
3406 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3407 &object_offset,
3408 t->buffer,
3409 buffer_offset,
3410 sizeof(object_offset))) {
3411 binder_txn_error("%d:%d copy offset from buffer failed\n",
3412 thread->pid, proc->pid);
3413 return_error = BR_FAILED_REPLY;
3414 return_error_param = -EINVAL;
3415 return_error_line = __LINE__;
3416 goto err_bad_offset;
3417 }
3418
3419 /*
3420 * Copy the source user buffer up to the next object
3421 * that will be processed.
3422 */
3423 copy_size = object_offset - user_offset;
3424 if (copy_size && (user_offset > object_offset ||
3425 object_offset > tr->data_size ||
3426 binder_alloc_copy_user_to_buffer(
3427 &target_proc->alloc,
3428 t->buffer, user_offset,
3429 user_buffer + user_offset,
3430 copy_size))) {
3431 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3432 proc->pid, thread->pid);
3433 return_error = BR_FAILED_REPLY;
3434 return_error_param = -EFAULT;
3435 return_error_line = __LINE__;
3436 goto err_copy_data_failed;
3437 }
3438 object_size = binder_get_object(target_proc, user_buffer,
3439 t->buffer, object_offset, &object);
3440 if (object_size == 0 || object_offset < off_min) {
3441 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3442 proc->pid, thread->pid,
3443 (u64)object_offset,
3444 (u64)off_min,
3445 (u64)t->buffer->data_size);
3446 return_error = BR_FAILED_REPLY;
3447 return_error_param = -EINVAL;
3448 return_error_line = __LINE__;
3449 goto err_bad_offset;
3450 }
3451 /*
3452 * Set offset to the next buffer fragment to be
3453 * copied
3454 */
3455 user_offset = object_offset + object_size;
3456
3457 hdr = &object.hdr;
3458 off_min = object_offset + object_size;
3459 switch (hdr->type) {
3460 case BINDER_TYPE_BINDER:
3461 case BINDER_TYPE_WEAK_BINDER: {
3462 struct flat_binder_object *fp;
3463
3464 fp = to_flat_binder_object(hdr);
3465 ret = binder_translate_binder(fp, t, thread);
3466
3467 if (ret < 0 ||
3468 binder_alloc_copy_to_buffer(&target_proc->alloc,
3469 t->buffer,
3470 object_offset,
3471 fp, sizeof(*fp))) {
3472 binder_txn_error("%d:%d translate binder failed\n",
3473 thread->pid, proc->pid);
3474 return_error = BR_FAILED_REPLY;
3475 return_error_param = ret;
3476 return_error_line = __LINE__;
3477 goto err_translate_failed;
3478 }
3479 } break;
3480 case BINDER_TYPE_HANDLE:
3481 case BINDER_TYPE_WEAK_HANDLE: {
3482 struct flat_binder_object *fp;
3483
3484 fp = to_flat_binder_object(hdr);
3485 ret = binder_translate_handle(fp, t, thread);
3486 if (ret < 0 ||
3487 binder_alloc_copy_to_buffer(&target_proc->alloc,
3488 t->buffer,
3489 object_offset,
3490 fp, sizeof(*fp))) {
3491 binder_txn_error("%d:%d translate handle failed\n",
3492 thread->pid, proc->pid);
3493 return_error = BR_FAILED_REPLY;
3494 return_error_param = ret;
3495 return_error_line = __LINE__;
3496 goto err_translate_failed;
3497 }
3498 } break;
3499
3500 case BINDER_TYPE_FD: {
3501 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3502 binder_size_t fd_offset = object_offset +
3503 (uintptr_t)&fp->fd - (uintptr_t)fp;
3504 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3505 thread, in_reply_to);
3506
3507 fp->pad_binder = 0;
3508 if (ret < 0 ||
3509 binder_alloc_copy_to_buffer(&target_proc->alloc,
3510 t->buffer,
3511 object_offset,
3512 fp, sizeof(*fp))) {
3513 binder_txn_error("%d:%d translate fd failed\n",
3514 thread->pid, proc->pid);
3515 return_error = BR_FAILED_REPLY;
3516 return_error_param = ret;
3517 return_error_line = __LINE__;
3518 goto err_translate_failed;
3519 }
3520 } break;
3521 case BINDER_TYPE_FDA: {
3522 struct binder_object ptr_object;
3523 binder_size_t parent_offset;
3524 struct binder_object user_object;
3525 size_t user_parent_size;
3526 struct binder_fd_array_object *fda =
3527 to_binder_fd_array_object(hdr);
3528 size_t num_valid = (buffer_offset - off_start_offset) /
3529 sizeof(binder_size_t);
3530 struct binder_buffer_object *parent =
3531 binder_validate_ptr(target_proc, t->buffer,
3532 &ptr_object, fda->parent,
3533 off_start_offset,
3534 &parent_offset,
3535 num_valid);
3536 if (!parent) {
3537 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3538 proc->pid, thread->pid);
3539 return_error = BR_FAILED_REPLY;
3540 return_error_param = -EINVAL;
3541 return_error_line = __LINE__;
3542 goto err_bad_parent;
3543 }
3544 if (!binder_validate_fixup(target_proc, t->buffer,
3545 off_start_offset,
3546 parent_offset,
3547 fda->parent_offset,
3548 last_fixup_obj_off,
3549 last_fixup_min_off)) {
3550 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3551 proc->pid, thread->pid);
3552 return_error = BR_FAILED_REPLY;
3553 return_error_param = -EINVAL;
3554 return_error_line = __LINE__;
3555 goto err_bad_parent;
3556 }
3557 /*
3558 * We need to read the user version of the parent
3559 * object to get the original user offset
3560 */
3561 user_parent_size =
3562 binder_get_object(proc, user_buffer, t->buffer,
3563 parent_offset, &user_object);
3564 if (user_parent_size != sizeof(user_object.bbo)) {
3565 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3566 proc->pid, thread->pid,
3567 user_parent_size,
3568 sizeof(user_object.bbo));
3569 return_error = BR_FAILED_REPLY;
3570 return_error_param = -EINVAL;
3571 return_error_line = __LINE__;
3572 goto err_bad_parent;
3573 }
3574 ret = binder_translate_fd_array(&pf_head, fda,
3575 user_buffer, parent,
3576 &user_object.bbo, t,
3577 thread, in_reply_to);
3578 if (!ret)
3579 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3580 t->buffer,
3581 object_offset,
3582 fda, sizeof(*fda));
3583 if (ret) {
3584 binder_txn_error("%d:%d translate fd array failed\n",
3585 thread->pid, proc->pid);
3586 return_error = BR_FAILED_REPLY;
3587 return_error_param = ret > 0 ? -EINVAL : ret;
3588 return_error_line = __LINE__;
3589 goto err_translate_failed;
3590 }
3591 last_fixup_obj_off = parent_offset;
3592 last_fixup_min_off =
3593 fda->parent_offset + sizeof(u32) * fda->num_fds;
3594 } break;
3595 case BINDER_TYPE_PTR: {
3596 struct binder_buffer_object *bp =
3597 to_binder_buffer_object(hdr);
3598 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3599 size_t num_valid;
3600
3601 if (bp->length > buf_left) {
3602 binder_user_error("%d:%d got transaction with too large buffer\n",
3603 proc->pid, thread->pid);
3604 return_error = BR_FAILED_REPLY;
3605 return_error_param = -EINVAL;
3606 return_error_line = __LINE__;
3607 goto err_bad_offset;
3608 }
3609 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3610 (const void __user *)(uintptr_t)bp->buffer,
3611 bp->length);
3612 if (ret) {
3613 binder_txn_error("%d:%d deferred copy failed\n",
3614 thread->pid, proc->pid);
3615 return_error = BR_FAILED_REPLY;
3616 return_error_param = ret;
3617 return_error_line = __LINE__;
3618 goto err_translate_failed;
3619 }
3620 /* Fixup buffer pointer to target proc address space */
3621 bp->buffer = t->buffer->user_data + sg_buf_offset;
3622 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3623
3624 num_valid = (buffer_offset - off_start_offset) /
3625 sizeof(binder_size_t);
3626 ret = binder_fixup_parent(&pf_head, t,
3627 thread, bp,
3628 off_start_offset,
3629 num_valid,
3630 last_fixup_obj_off,
3631 last_fixup_min_off);
3632 if (ret < 0 ||
3633 binder_alloc_copy_to_buffer(&target_proc->alloc,
3634 t->buffer,
3635 object_offset,
3636 bp, sizeof(*bp))) {
3637 binder_txn_error("%d:%d failed to fixup parent\n",
3638 thread->pid, proc->pid);
3639 return_error = BR_FAILED_REPLY;
3640 return_error_param = ret;
3641 return_error_line = __LINE__;
3642 goto err_translate_failed;
3643 }
3644 last_fixup_obj_off = object_offset;
3645 last_fixup_min_off = 0;
3646 } break;
3647 default:
3648 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3649 proc->pid, thread->pid, hdr->type);
3650 return_error = BR_FAILED_REPLY;
3651 return_error_param = -EINVAL;
3652 return_error_line = __LINE__;
3653 goto err_bad_object_type;
3654 }
3655 }
3656 /* Done processing objects, copy the rest of the buffer */
3657 if (binder_alloc_copy_user_to_buffer(
3658 &target_proc->alloc,
3659 t->buffer, user_offset,
3660 user_buffer + user_offset,
3661 tr->data_size - user_offset)) {
3662 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3663 proc->pid, thread->pid);
3664 return_error = BR_FAILED_REPLY;
3665 return_error_param = -EFAULT;
3666 return_error_line = __LINE__;
3667 goto err_copy_data_failed;
3668 }
3669
3670 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3671 &sgc_head, &pf_head);
3672 if (ret) {
3673 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3674 proc->pid, thread->pid);
3675 return_error = BR_FAILED_REPLY;
3676 return_error_param = ret;
3677 return_error_line = __LINE__;
3678 goto err_copy_data_failed;
3679 }
3680 if (t->buffer->oneway_spam_suspect)
3681 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3682 else
3683 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3684 t->work.type = BINDER_WORK_TRANSACTION;
3685
3686 if (reply) {
3687 binder_enqueue_thread_work(thread, tcomplete);
3688 binder_inner_proc_lock(target_proc);
3689 if (target_thread->is_dead) {
3690 return_error = BR_DEAD_REPLY;
3691 binder_inner_proc_unlock(target_proc);
3692 goto err_dead_proc_or_thread;
3693 }
3694 BUG_ON(t->buffer->async_transaction != 0);
3695 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3696 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3697 target_proc->outstanding_txns++;
3698 binder_inner_proc_unlock(target_proc);
3699 wake_up_interruptible_sync(&target_thread->wait);
3700 binder_free_transaction(in_reply_to);
3701 } else if (!(t->flags & TF_ONE_WAY)) {
3702 BUG_ON(t->buffer->async_transaction != 0);
3703 binder_inner_proc_lock(proc);
3704 /*
3705 * Defer the TRANSACTION_COMPLETE, so we don't return to
3706 * userspace immediately; this allows the target process to
3707 * immediately start processing this transaction, reducing
3708 * latency. We will then return the TRANSACTION_COMPLETE when
3709 * the target replies (or there is an error).
3710 */
3711 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3712 t->need_reply = 1;
3713 t->from_parent = thread->transaction_stack;
3714 thread->transaction_stack = t;
3715 binder_inner_proc_unlock(proc);
3716 return_error = binder_proc_transaction(t,
3717 target_proc, target_thread);
3718 if (return_error) {
3719 binder_inner_proc_lock(proc);
3720 binder_pop_transaction_ilocked(thread, t);
3721 binder_inner_proc_unlock(proc);
3722 goto err_dead_proc_or_thread;
3723 }
3724 } else {
3725 BUG_ON(target_node == NULL);
3726 BUG_ON(t->buffer->async_transaction != 1);
3727 return_error = binder_proc_transaction(t, target_proc, NULL);
3728 /*
3729 * Let the caller know when async transaction reaches a frozen
3730 * process and is put in a pending queue, waiting for the target
3731 * process to be unfrozen.
3732 */
3733 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3734 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3735 binder_enqueue_thread_work(thread, tcomplete);
3736 if (return_error &&
3737 return_error != BR_TRANSACTION_PENDING_FROZEN)
3738 goto err_dead_proc_or_thread;
3739 }
3740 if (target_thread)
3741 binder_thread_dec_tmpref(target_thread);
3742 binder_proc_dec_tmpref(target_proc);
3743 if (target_node)
3744 binder_dec_node_tmpref(target_node);
3745 /*
3746 * write barrier to synchronize with initialization
3747 * of log entry
3748 */
3749 smp_wmb();
3750 WRITE_ONCE(e->debug_id_done, t_debug_id);
3751 return;
3752
3753 err_dead_proc_or_thread:
3754 binder_txn_error("%d:%d dead process or thread\n",
3755 thread->pid, proc->pid);
3756 return_error_line = __LINE__;
3757 binder_dequeue_work(proc, tcomplete);
3758 err_translate_failed:
3759 err_bad_object_type:
3760 err_bad_offset:
3761 err_bad_parent:
3762 err_copy_data_failed:
3763 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3764 binder_free_txn_fixups(t);
3765 trace_binder_transaction_failed_buffer_release(t->buffer);
3766 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3767 buffer_offset, true);
3768 if (target_node)
3769 binder_dec_node_tmpref(target_node);
3770 target_node = NULL;
3771 t->buffer->transaction = NULL;
3772 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3773 err_binder_alloc_buf_failed:
3774 err_bad_extra_size:
3775 if (secctx)
3776 security_release_secctx(secctx, secctx_sz);
3777 err_get_secctx_failed:
3778 kfree(tcomplete);
3779 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3780 err_alloc_tcomplete_failed:
3781 if (trace_binder_txn_latency_free_enabled())
3782 binder_txn_latency_free(t);
3783 kfree(t);
3784 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3785 err_alloc_t_failed:
3786 err_bad_todo_list:
3787 err_bad_call_stack:
3788 err_empty_call_stack:
3789 err_dead_binder:
3790 err_invalid_target_handle:
3791 if (target_node) {
3792 binder_dec_node(target_node, 1, 0);
3793 binder_dec_node_tmpref(target_node);
3794 }
3795
3796 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3797 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3798 proc->pid, thread->pid, reply ? "reply" :
3799 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3800 target_proc ? target_proc->pid : 0,
3801 target_thread ? target_thread->pid : 0,
3802 t_debug_id, return_error, return_error_param,
3803 (u64)tr->data_size, (u64)tr->offsets_size,
3804 return_error_line);
3805
3806 if (target_thread)
3807 binder_thread_dec_tmpref(target_thread);
3808 if (target_proc)
3809 binder_proc_dec_tmpref(target_proc);
3810
3811 {
3812 struct binder_transaction_log_entry *fe;
3813
3814 e->return_error = return_error;
3815 e->return_error_param = return_error_param;
3816 e->return_error_line = return_error_line;
3817 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3818 *fe = *e;
3819 /*
3820 * write barrier to synchronize with initialization
3821 * of log entry
3822 */
3823 smp_wmb();
3824 WRITE_ONCE(e->debug_id_done, t_debug_id);
3825 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3826 }
3827
3828 BUG_ON(thread->return_error.cmd != BR_OK);
3829 if (in_reply_to) {
3830 binder_set_txn_from_error(in_reply_to, t_debug_id,
3831 return_error, return_error_param);
3832 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3833 binder_enqueue_thread_work(thread, &thread->return_error.work);
3834 binder_send_failed_reply(in_reply_to, return_error);
3835 } else {
3836 binder_inner_proc_lock(proc);
3837 binder_set_extended_error(&thread->ee, t_debug_id,
3838 return_error, return_error_param);
3839 binder_inner_proc_unlock(proc);
3840 thread->return_error.cmd = return_error;
3841 binder_enqueue_thread_work(thread, &thread->return_error.work);
3842 }
3843 }
3844
3845 /**
3846 * binder_free_buf() - free the specified buffer
3847 * @proc: binder proc that owns buffer
3848 * @buffer: buffer to be freed
3849 * @is_failure: failed to send transaction
3850 *
3851 * If buffer for an async transaction, enqueue the next async
3852 * transaction from the node.
3853 *
3854 * Cleanup buffer and free it.
3855 */
3856 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)3857 binder_free_buf(struct binder_proc *proc,
3858 struct binder_thread *thread,
3859 struct binder_buffer *buffer, bool is_failure)
3860 {
3861 binder_inner_proc_lock(proc);
3862 if (buffer->transaction) {
3863 buffer->transaction->buffer = NULL;
3864 buffer->transaction = NULL;
3865 }
3866 binder_inner_proc_unlock(proc);
3867 if (buffer->async_transaction && buffer->target_node) {
3868 struct binder_node *buf_node;
3869 struct binder_work *w;
3870
3871 buf_node = buffer->target_node;
3872 binder_node_inner_lock(buf_node);
3873 BUG_ON(!buf_node->has_async_transaction);
3874 BUG_ON(buf_node->proc != proc);
3875 w = binder_dequeue_work_head_ilocked(
3876 &buf_node->async_todo);
3877 if (!w) {
3878 buf_node->has_async_transaction = false;
3879 } else {
3880 binder_enqueue_work_ilocked(
3881 w, &proc->todo);
3882 binder_wakeup_proc_ilocked(proc);
3883 }
3884 binder_node_inner_unlock(buf_node);
3885 }
3886 trace_binder_transaction_buffer_release(buffer);
3887 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3888 binder_alloc_free_buf(&proc->alloc, buffer);
3889 }
3890
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)3891 static int binder_thread_write(struct binder_proc *proc,
3892 struct binder_thread *thread,
3893 binder_uintptr_t binder_buffer, size_t size,
3894 binder_size_t *consumed)
3895 {
3896 uint32_t cmd;
3897 struct binder_context *context = proc->context;
3898 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3899 void __user *ptr = buffer + *consumed;
3900 void __user *end = buffer + size;
3901
3902 while (ptr < end && thread->return_error.cmd == BR_OK) {
3903 int ret;
3904
3905 if (get_user(cmd, (uint32_t __user *)ptr))
3906 return -EFAULT;
3907 ptr += sizeof(uint32_t);
3908 trace_binder_command(cmd);
3909 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3910 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3911 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3912 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3913 }
3914 switch (cmd) {
3915 case BC_INCREFS:
3916 case BC_ACQUIRE:
3917 case BC_RELEASE:
3918 case BC_DECREFS: {
3919 uint32_t target;
3920 const char *debug_string;
3921 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3922 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3923 struct binder_ref_data rdata;
3924
3925 if (get_user(target, (uint32_t __user *)ptr))
3926 return -EFAULT;
3927
3928 ptr += sizeof(uint32_t);
3929 ret = -1;
3930 if (increment && !target) {
3931 struct binder_node *ctx_mgr_node;
3932
3933 mutex_lock(&context->context_mgr_node_lock);
3934 ctx_mgr_node = context->binder_context_mgr_node;
3935 if (ctx_mgr_node) {
3936 if (ctx_mgr_node->proc == proc) {
3937 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3938 proc->pid, thread->pid);
3939 mutex_unlock(&context->context_mgr_node_lock);
3940 return -EINVAL;
3941 }
3942 ret = binder_inc_ref_for_node(
3943 proc, ctx_mgr_node,
3944 strong, NULL, &rdata);
3945 }
3946 mutex_unlock(&context->context_mgr_node_lock);
3947 }
3948 if (ret)
3949 ret = binder_update_ref_for_handle(
3950 proc, target, increment, strong,
3951 &rdata);
3952 if (!ret && rdata.desc != target) {
3953 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3954 proc->pid, thread->pid,
3955 target, rdata.desc);
3956 }
3957 switch (cmd) {
3958 case BC_INCREFS:
3959 debug_string = "IncRefs";
3960 break;
3961 case BC_ACQUIRE:
3962 debug_string = "Acquire";
3963 break;
3964 case BC_RELEASE:
3965 debug_string = "Release";
3966 break;
3967 case BC_DECREFS:
3968 default:
3969 debug_string = "DecRefs";
3970 break;
3971 }
3972 if (ret) {
3973 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3974 proc->pid, thread->pid, debug_string,
3975 strong, target, ret);
3976 break;
3977 }
3978 binder_debug(BINDER_DEBUG_USER_REFS,
3979 "%d:%d %s ref %d desc %d s %d w %d\n",
3980 proc->pid, thread->pid, debug_string,
3981 rdata.debug_id, rdata.desc, rdata.strong,
3982 rdata.weak);
3983 break;
3984 }
3985 case BC_INCREFS_DONE:
3986 case BC_ACQUIRE_DONE: {
3987 binder_uintptr_t node_ptr;
3988 binder_uintptr_t cookie;
3989 struct binder_node *node;
3990 bool free_node;
3991
3992 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3993 return -EFAULT;
3994 ptr += sizeof(binder_uintptr_t);
3995 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3996 return -EFAULT;
3997 ptr += sizeof(binder_uintptr_t);
3998 node = binder_get_node(proc, node_ptr);
3999 if (node == NULL) {
4000 binder_user_error("%d:%d %s u%016llx no match\n",
4001 proc->pid, thread->pid,
4002 cmd == BC_INCREFS_DONE ?
4003 "BC_INCREFS_DONE" :
4004 "BC_ACQUIRE_DONE",
4005 (u64)node_ptr);
4006 break;
4007 }
4008 if (cookie != node->cookie) {
4009 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4010 proc->pid, thread->pid,
4011 cmd == BC_INCREFS_DONE ?
4012 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4013 (u64)node_ptr, node->debug_id,
4014 (u64)cookie, (u64)node->cookie);
4015 binder_put_node(node);
4016 break;
4017 }
4018 binder_node_inner_lock(node);
4019 if (cmd == BC_ACQUIRE_DONE) {
4020 if (node->pending_strong_ref == 0) {
4021 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4022 proc->pid, thread->pid,
4023 node->debug_id);
4024 binder_node_inner_unlock(node);
4025 binder_put_node(node);
4026 break;
4027 }
4028 node->pending_strong_ref = 0;
4029 } else {
4030 if (node->pending_weak_ref == 0) {
4031 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4032 proc->pid, thread->pid,
4033 node->debug_id);
4034 binder_node_inner_unlock(node);
4035 binder_put_node(node);
4036 break;
4037 }
4038 node->pending_weak_ref = 0;
4039 }
4040 free_node = binder_dec_node_nilocked(node,
4041 cmd == BC_ACQUIRE_DONE, 0);
4042 WARN_ON(free_node);
4043 binder_debug(BINDER_DEBUG_USER_REFS,
4044 "%d:%d %s node %d ls %d lw %d tr %d\n",
4045 proc->pid, thread->pid,
4046 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4047 node->debug_id, node->local_strong_refs,
4048 node->local_weak_refs, node->tmp_refs);
4049 binder_node_inner_unlock(node);
4050 binder_put_node(node);
4051 break;
4052 }
4053 case BC_ATTEMPT_ACQUIRE:
4054 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4055 return -EINVAL;
4056 case BC_ACQUIRE_RESULT:
4057 pr_err("BC_ACQUIRE_RESULT not supported\n");
4058 return -EINVAL;
4059
4060 case BC_FREE_BUFFER: {
4061 binder_uintptr_t data_ptr;
4062 struct binder_buffer *buffer;
4063
4064 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4065 return -EFAULT;
4066 ptr += sizeof(binder_uintptr_t);
4067
4068 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4069 data_ptr);
4070 if (IS_ERR_OR_NULL(buffer)) {
4071 if (PTR_ERR(buffer) == -EPERM) {
4072 binder_user_error(
4073 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4074 proc->pid, thread->pid,
4075 (u64)data_ptr);
4076 } else {
4077 binder_user_error(
4078 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4079 proc->pid, thread->pid,
4080 (u64)data_ptr);
4081 }
4082 break;
4083 }
4084 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4085 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4086 proc->pid, thread->pid, (u64)data_ptr,
4087 buffer->debug_id,
4088 buffer->transaction ? "active" : "finished");
4089 binder_free_buf(proc, thread, buffer, false);
4090 break;
4091 }
4092
4093 case BC_TRANSACTION_SG:
4094 case BC_REPLY_SG: {
4095 struct binder_transaction_data_sg tr;
4096
4097 if (copy_from_user(&tr, ptr, sizeof(tr)))
4098 return -EFAULT;
4099 ptr += sizeof(tr);
4100 binder_transaction(proc, thread, &tr.transaction_data,
4101 cmd == BC_REPLY_SG, tr.buffers_size);
4102 break;
4103 }
4104 case BC_TRANSACTION:
4105 case BC_REPLY: {
4106 struct binder_transaction_data tr;
4107
4108 if (copy_from_user(&tr, ptr, sizeof(tr)))
4109 return -EFAULT;
4110 ptr += sizeof(tr);
4111 binder_transaction(proc, thread, &tr,
4112 cmd == BC_REPLY, 0);
4113 break;
4114 }
4115
4116 case BC_REGISTER_LOOPER:
4117 binder_debug(BINDER_DEBUG_THREADS,
4118 "%d:%d BC_REGISTER_LOOPER\n",
4119 proc->pid, thread->pid);
4120 binder_inner_proc_lock(proc);
4121 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4122 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4123 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4124 proc->pid, thread->pid);
4125 } else if (proc->requested_threads == 0) {
4126 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4127 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4128 proc->pid, thread->pid);
4129 } else {
4130 proc->requested_threads--;
4131 proc->requested_threads_started++;
4132 }
4133 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4134 binder_inner_proc_unlock(proc);
4135 break;
4136 case BC_ENTER_LOOPER:
4137 binder_debug(BINDER_DEBUG_THREADS,
4138 "%d:%d BC_ENTER_LOOPER\n",
4139 proc->pid, thread->pid);
4140 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4141 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4142 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4143 proc->pid, thread->pid);
4144 }
4145 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4146 break;
4147 case BC_EXIT_LOOPER:
4148 binder_debug(BINDER_DEBUG_THREADS,
4149 "%d:%d BC_EXIT_LOOPER\n",
4150 proc->pid, thread->pid);
4151 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4152 break;
4153
4154 case BC_REQUEST_DEATH_NOTIFICATION:
4155 case BC_CLEAR_DEATH_NOTIFICATION: {
4156 uint32_t target;
4157 binder_uintptr_t cookie;
4158 struct binder_ref *ref;
4159 struct binder_ref_death *death = NULL;
4160
4161 if (get_user(target, (uint32_t __user *)ptr))
4162 return -EFAULT;
4163 ptr += sizeof(uint32_t);
4164 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4165 return -EFAULT;
4166 ptr += sizeof(binder_uintptr_t);
4167 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4168 /*
4169 * Allocate memory for death notification
4170 * before taking lock
4171 */
4172 death = kzalloc(sizeof(*death), GFP_KERNEL);
4173 if (death == NULL) {
4174 WARN_ON(thread->return_error.cmd !=
4175 BR_OK);
4176 thread->return_error.cmd = BR_ERROR;
4177 binder_enqueue_thread_work(
4178 thread,
4179 &thread->return_error.work);
4180 binder_debug(
4181 BINDER_DEBUG_FAILED_TRANSACTION,
4182 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4183 proc->pid, thread->pid);
4184 break;
4185 }
4186 }
4187 binder_proc_lock(proc);
4188 ref = binder_get_ref_olocked(proc, target, false);
4189 if (ref == NULL) {
4190 binder_user_error("%d:%d %s invalid ref %d\n",
4191 proc->pid, thread->pid,
4192 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4193 "BC_REQUEST_DEATH_NOTIFICATION" :
4194 "BC_CLEAR_DEATH_NOTIFICATION",
4195 target);
4196 binder_proc_unlock(proc);
4197 kfree(death);
4198 break;
4199 }
4200
4201 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4202 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4203 proc->pid, thread->pid,
4204 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4205 "BC_REQUEST_DEATH_NOTIFICATION" :
4206 "BC_CLEAR_DEATH_NOTIFICATION",
4207 (u64)cookie, ref->data.debug_id,
4208 ref->data.desc, ref->data.strong,
4209 ref->data.weak, ref->node->debug_id);
4210
4211 binder_node_lock(ref->node);
4212 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4213 if (ref->death) {
4214 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4215 proc->pid, thread->pid);
4216 binder_node_unlock(ref->node);
4217 binder_proc_unlock(proc);
4218 kfree(death);
4219 break;
4220 }
4221 binder_stats_created(BINDER_STAT_DEATH);
4222 INIT_LIST_HEAD(&death->work.entry);
4223 death->cookie = cookie;
4224 ref->death = death;
4225 if (ref->node->proc == NULL) {
4226 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4227
4228 binder_inner_proc_lock(proc);
4229 binder_enqueue_work_ilocked(
4230 &ref->death->work, &proc->todo);
4231 binder_wakeup_proc_ilocked(proc);
4232 binder_inner_proc_unlock(proc);
4233 }
4234 } else {
4235 if (ref->death == NULL) {
4236 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4237 proc->pid, thread->pid);
4238 binder_node_unlock(ref->node);
4239 binder_proc_unlock(proc);
4240 break;
4241 }
4242 death = ref->death;
4243 if (death->cookie != cookie) {
4244 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4245 proc->pid, thread->pid,
4246 (u64)death->cookie,
4247 (u64)cookie);
4248 binder_node_unlock(ref->node);
4249 binder_proc_unlock(proc);
4250 break;
4251 }
4252 ref->death = NULL;
4253 binder_inner_proc_lock(proc);
4254 if (list_empty(&death->work.entry)) {
4255 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4256 if (thread->looper &
4257 (BINDER_LOOPER_STATE_REGISTERED |
4258 BINDER_LOOPER_STATE_ENTERED))
4259 binder_enqueue_thread_work_ilocked(
4260 thread,
4261 &death->work);
4262 else {
4263 binder_enqueue_work_ilocked(
4264 &death->work,
4265 &proc->todo);
4266 binder_wakeup_proc_ilocked(
4267 proc);
4268 }
4269 } else {
4270 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4271 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4272 }
4273 binder_inner_proc_unlock(proc);
4274 }
4275 binder_node_unlock(ref->node);
4276 binder_proc_unlock(proc);
4277 } break;
4278 case BC_DEAD_BINDER_DONE: {
4279 struct binder_work *w;
4280 binder_uintptr_t cookie;
4281 struct binder_ref_death *death = NULL;
4282
4283 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4284 return -EFAULT;
4285
4286 ptr += sizeof(cookie);
4287 binder_inner_proc_lock(proc);
4288 list_for_each_entry(w, &proc->delivered_death,
4289 entry) {
4290 struct binder_ref_death *tmp_death =
4291 container_of(w,
4292 struct binder_ref_death,
4293 work);
4294
4295 if (tmp_death->cookie == cookie) {
4296 death = tmp_death;
4297 break;
4298 }
4299 }
4300 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4301 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4302 proc->pid, thread->pid, (u64)cookie,
4303 death);
4304 if (death == NULL) {
4305 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4306 proc->pid, thread->pid, (u64)cookie);
4307 binder_inner_proc_unlock(proc);
4308 break;
4309 }
4310 binder_dequeue_work_ilocked(&death->work);
4311 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4312 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4313 if (thread->looper &
4314 (BINDER_LOOPER_STATE_REGISTERED |
4315 BINDER_LOOPER_STATE_ENTERED))
4316 binder_enqueue_thread_work_ilocked(
4317 thread, &death->work);
4318 else {
4319 binder_enqueue_work_ilocked(
4320 &death->work,
4321 &proc->todo);
4322 binder_wakeup_proc_ilocked(proc);
4323 }
4324 }
4325 binder_inner_proc_unlock(proc);
4326 } break;
4327
4328 default:
4329 pr_err("%d:%d unknown command %u\n",
4330 proc->pid, thread->pid, cmd);
4331 return -EINVAL;
4332 }
4333 *consumed = ptr - buffer;
4334 }
4335 return 0;
4336 }
4337
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4338 static void binder_stat_br(struct binder_proc *proc,
4339 struct binder_thread *thread, uint32_t cmd)
4340 {
4341 trace_binder_return(cmd);
4342 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4343 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4344 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4345 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4346 }
4347 }
4348
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4349 static int binder_put_node_cmd(struct binder_proc *proc,
4350 struct binder_thread *thread,
4351 void __user **ptrp,
4352 binder_uintptr_t node_ptr,
4353 binder_uintptr_t node_cookie,
4354 int node_debug_id,
4355 uint32_t cmd, const char *cmd_name)
4356 {
4357 void __user *ptr = *ptrp;
4358
4359 if (put_user(cmd, (uint32_t __user *)ptr))
4360 return -EFAULT;
4361 ptr += sizeof(uint32_t);
4362
4363 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4364 return -EFAULT;
4365 ptr += sizeof(binder_uintptr_t);
4366
4367 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4368 return -EFAULT;
4369 ptr += sizeof(binder_uintptr_t);
4370
4371 binder_stat_br(proc, thread, cmd);
4372 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4373 proc->pid, thread->pid, cmd_name, node_debug_id,
4374 (u64)node_ptr, (u64)node_cookie);
4375
4376 *ptrp = ptr;
4377 return 0;
4378 }
4379
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4380 static int binder_wait_for_work(struct binder_thread *thread,
4381 bool do_proc_work)
4382 {
4383 DEFINE_WAIT(wait);
4384 struct binder_proc *proc = thread->proc;
4385 int ret = 0;
4386
4387 binder_inner_proc_lock(proc);
4388 for (;;) {
4389 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4390 if (binder_has_work_ilocked(thread, do_proc_work))
4391 break;
4392 if (do_proc_work)
4393 list_add(&thread->waiting_thread_node,
4394 &proc->waiting_threads);
4395 binder_inner_proc_unlock(proc);
4396 schedule();
4397 binder_inner_proc_lock(proc);
4398 list_del_init(&thread->waiting_thread_node);
4399 if (signal_pending(current)) {
4400 ret = -EINTR;
4401 break;
4402 }
4403 }
4404 finish_wait(&thread->wait, &wait);
4405 binder_inner_proc_unlock(proc);
4406
4407 return ret;
4408 }
4409
4410 /**
4411 * binder_apply_fd_fixups() - finish fd translation
4412 * @proc: binder_proc associated @t->buffer
4413 * @t: binder transaction with list of fd fixups
4414 *
4415 * Now that we are in the context of the transaction target
4416 * process, we can allocate and install fds. Process the
4417 * list of fds to translate and fixup the buffer with the
4418 * new fds first and only then install the files.
4419 *
4420 * If we fail to allocate an fd, skip the install and release
4421 * any fds that have already been allocated.
4422 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4423 static int binder_apply_fd_fixups(struct binder_proc *proc,
4424 struct binder_transaction *t)
4425 {
4426 struct binder_txn_fd_fixup *fixup, *tmp;
4427 int ret = 0;
4428
4429 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4430 int fd = get_unused_fd_flags(O_CLOEXEC);
4431
4432 if (fd < 0) {
4433 binder_debug(BINDER_DEBUG_TRANSACTION,
4434 "failed fd fixup txn %d fd %d\n",
4435 t->debug_id, fd);
4436 ret = -ENOMEM;
4437 goto err;
4438 }
4439 binder_debug(BINDER_DEBUG_TRANSACTION,
4440 "fd fixup txn %d fd %d\n",
4441 t->debug_id, fd);
4442 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4443 fixup->target_fd = fd;
4444 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4445 fixup->offset, &fd,
4446 sizeof(u32))) {
4447 ret = -EINVAL;
4448 goto err;
4449 }
4450 }
4451 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4452 fd_install(fixup->target_fd, fixup->file);
4453 list_del(&fixup->fixup_entry);
4454 kfree(fixup);
4455 }
4456
4457 return ret;
4458
4459 err:
4460 binder_free_txn_fixups(t);
4461 return ret;
4462 }
4463
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4464 static int binder_thread_read(struct binder_proc *proc,
4465 struct binder_thread *thread,
4466 binder_uintptr_t binder_buffer, size_t size,
4467 binder_size_t *consumed, int non_block)
4468 {
4469 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4470 void __user *ptr = buffer + *consumed;
4471 void __user *end = buffer + size;
4472
4473 int ret = 0;
4474 int wait_for_proc_work;
4475
4476 if (*consumed == 0) {
4477 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4478 return -EFAULT;
4479 ptr += sizeof(uint32_t);
4480 }
4481
4482 retry:
4483 binder_inner_proc_lock(proc);
4484 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4485 binder_inner_proc_unlock(proc);
4486
4487 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4488
4489 trace_binder_wait_for_work(wait_for_proc_work,
4490 !!thread->transaction_stack,
4491 !binder_worklist_empty(proc, &thread->todo));
4492 if (wait_for_proc_work) {
4493 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4494 BINDER_LOOPER_STATE_ENTERED))) {
4495 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4496 proc->pid, thread->pid, thread->looper);
4497 wait_event_interruptible(binder_user_error_wait,
4498 binder_stop_on_user_error < 2);
4499 }
4500 binder_set_nice(proc->default_priority);
4501 }
4502
4503 if (non_block) {
4504 if (!binder_has_work(thread, wait_for_proc_work))
4505 ret = -EAGAIN;
4506 } else {
4507 ret = binder_wait_for_work(thread, wait_for_proc_work);
4508 }
4509
4510 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4511
4512 if (ret)
4513 return ret;
4514
4515 while (1) {
4516 uint32_t cmd;
4517 struct binder_transaction_data_secctx tr;
4518 struct binder_transaction_data *trd = &tr.transaction_data;
4519 struct binder_work *w = NULL;
4520 struct list_head *list = NULL;
4521 struct binder_transaction *t = NULL;
4522 struct binder_thread *t_from;
4523 size_t trsize = sizeof(*trd);
4524
4525 binder_inner_proc_lock(proc);
4526 if (!binder_worklist_empty_ilocked(&thread->todo))
4527 list = &thread->todo;
4528 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4529 wait_for_proc_work)
4530 list = &proc->todo;
4531 else {
4532 binder_inner_proc_unlock(proc);
4533
4534 /* no data added */
4535 if (ptr - buffer == 4 && !thread->looper_need_return)
4536 goto retry;
4537 break;
4538 }
4539
4540 if (end - ptr < sizeof(tr) + 4) {
4541 binder_inner_proc_unlock(proc);
4542 break;
4543 }
4544 w = binder_dequeue_work_head_ilocked(list);
4545 if (binder_worklist_empty_ilocked(&thread->todo))
4546 thread->process_todo = false;
4547
4548 switch (w->type) {
4549 case BINDER_WORK_TRANSACTION: {
4550 binder_inner_proc_unlock(proc);
4551 t = container_of(w, struct binder_transaction, work);
4552 } break;
4553 case BINDER_WORK_RETURN_ERROR: {
4554 struct binder_error *e = container_of(
4555 w, struct binder_error, work);
4556
4557 WARN_ON(e->cmd == BR_OK);
4558 binder_inner_proc_unlock(proc);
4559 if (put_user(e->cmd, (uint32_t __user *)ptr))
4560 return -EFAULT;
4561 cmd = e->cmd;
4562 e->cmd = BR_OK;
4563 ptr += sizeof(uint32_t);
4564
4565 binder_stat_br(proc, thread, cmd);
4566 } break;
4567 case BINDER_WORK_TRANSACTION_COMPLETE:
4568 case BINDER_WORK_TRANSACTION_PENDING:
4569 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4570 if (proc->oneway_spam_detection_enabled &&
4571 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4572 cmd = BR_ONEWAY_SPAM_SUSPECT;
4573 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4574 cmd = BR_TRANSACTION_PENDING_FROZEN;
4575 else
4576 cmd = BR_TRANSACTION_COMPLETE;
4577 binder_inner_proc_unlock(proc);
4578 kfree(w);
4579 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4580 if (put_user(cmd, (uint32_t __user *)ptr))
4581 return -EFAULT;
4582 ptr += sizeof(uint32_t);
4583
4584 binder_stat_br(proc, thread, cmd);
4585 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4586 "%d:%d BR_TRANSACTION_COMPLETE\n",
4587 proc->pid, thread->pid);
4588 } break;
4589 case BINDER_WORK_NODE: {
4590 struct binder_node *node = container_of(w, struct binder_node, work);
4591 int strong, weak;
4592 binder_uintptr_t node_ptr = node->ptr;
4593 binder_uintptr_t node_cookie = node->cookie;
4594 int node_debug_id = node->debug_id;
4595 int has_weak_ref;
4596 int has_strong_ref;
4597 void __user *orig_ptr = ptr;
4598
4599 BUG_ON(proc != node->proc);
4600 strong = node->internal_strong_refs ||
4601 node->local_strong_refs;
4602 weak = !hlist_empty(&node->refs) ||
4603 node->local_weak_refs ||
4604 node->tmp_refs || strong;
4605 has_strong_ref = node->has_strong_ref;
4606 has_weak_ref = node->has_weak_ref;
4607
4608 if (weak && !has_weak_ref) {
4609 node->has_weak_ref = 1;
4610 node->pending_weak_ref = 1;
4611 node->local_weak_refs++;
4612 }
4613 if (strong && !has_strong_ref) {
4614 node->has_strong_ref = 1;
4615 node->pending_strong_ref = 1;
4616 node->local_strong_refs++;
4617 }
4618 if (!strong && has_strong_ref)
4619 node->has_strong_ref = 0;
4620 if (!weak && has_weak_ref)
4621 node->has_weak_ref = 0;
4622 if (!weak && !strong) {
4623 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4624 "%d:%d node %d u%016llx c%016llx deleted\n",
4625 proc->pid, thread->pid,
4626 node_debug_id,
4627 (u64)node_ptr,
4628 (u64)node_cookie);
4629 rb_erase(&node->rb_node, &proc->nodes);
4630 binder_inner_proc_unlock(proc);
4631 binder_node_lock(node);
4632 /*
4633 * Acquire the node lock before freeing the
4634 * node to serialize with other threads that
4635 * may have been holding the node lock while
4636 * decrementing this node (avoids race where
4637 * this thread frees while the other thread
4638 * is unlocking the node after the final
4639 * decrement)
4640 */
4641 binder_node_unlock(node);
4642 binder_free_node(node);
4643 } else
4644 binder_inner_proc_unlock(proc);
4645
4646 if (weak && !has_weak_ref)
4647 ret = binder_put_node_cmd(
4648 proc, thread, &ptr, node_ptr,
4649 node_cookie, node_debug_id,
4650 BR_INCREFS, "BR_INCREFS");
4651 if (!ret && strong && !has_strong_ref)
4652 ret = binder_put_node_cmd(
4653 proc, thread, &ptr, node_ptr,
4654 node_cookie, node_debug_id,
4655 BR_ACQUIRE, "BR_ACQUIRE");
4656 if (!ret && !strong && has_strong_ref)
4657 ret = binder_put_node_cmd(
4658 proc, thread, &ptr, node_ptr,
4659 node_cookie, node_debug_id,
4660 BR_RELEASE, "BR_RELEASE");
4661 if (!ret && !weak && has_weak_ref)
4662 ret = binder_put_node_cmd(
4663 proc, thread, &ptr, node_ptr,
4664 node_cookie, node_debug_id,
4665 BR_DECREFS, "BR_DECREFS");
4666 if (orig_ptr == ptr)
4667 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4668 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4669 proc->pid, thread->pid,
4670 node_debug_id,
4671 (u64)node_ptr,
4672 (u64)node_cookie);
4673 if (ret)
4674 return ret;
4675 } break;
4676 case BINDER_WORK_DEAD_BINDER:
4677 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4678 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4679 struct binder_ref_death *death;
4680 uint32_t cmd;
4681 binder_uintptr_t cookie;
4682
4683 death = container_of(w, struct binder_ref_death, work);
4684 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4685 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4686 else
4687 cmd = BR_DEAD_BINDER;
4688 cookie = death->cookie;
4689
4690 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4691 "%d:%d %s %016llx\n",
4692 proc->pid, thread->pid,
4693 cmd == BR_DEAD_BINDER ?
4694 "BR_DEAD_BINDER" :
4695 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4696 (u64)cookie);
4697 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4698 binder_inner_proc_unlock(proc);
4699 kfree(death);
4700 binder_stats_deleted(BINDER_STAT_DEATH);
4701 } else {
4702 binder_enqueue_work_ilocked(
4703 w, &proc->delivered_death);
4704 binder_inner_proc_unlock(proc);
4705 }
4706 if (put_user(cmd, (uint32_t __user *)ptr))
4707 return -EFAULT;
4708 ptr += sizeof(uint32_t);
4709 if (put_user(cookie,
4710 (binder_uintptr_t __user *)ptr))
4711 return -EFAULT;
4712 ptr += sizeof(binder_uintptr_t);
4713 binder_stat_br(proc, thread, cmd);
4714 if (cmd == BR_DEAD_BINDER)
4715 goto done; /* DEAD_BINDER notifications can cause transactions */
4716 } break;
4717 default:
4718 binder_inner_proc_unlock(proc);
4719 pr_err("%d:%d: bad work type %d\n",
4720 proc->pid, thread->pid, w->type);
4721 break;
4722 }
4723
4724 if (!t)
4725 continue;
4726
4727 BUG_ON(t->buffer == NULL);
4728 if (t->buffer->target_node) {
4729 struct binder_node *target_node = t->buffer->target_node;
4730
4731 trd->target.ptr = target_node->ptr;
4732 trd->cookie = target_node->cookie;
4733 t->saved_priority = task_nice(current);
4734 if (t->priority < target_node->min_priority &&
4735 !(t->flags & TF_ONE_WAY))
4736 binder_set_nice(t->priority);
4737 else if (!(t->flags & TF_ONE_WAY) ||
4738 t->saved_priority > target_node->min_priority)
4739 binder_set_nice(target_node->min_priority);
4740 cmd = BR_TRANSACTION;
4741 } else {
4742 trd->target.ptr = 0;
4743 trd->cookie = 0;
4744 cmd = BR_REPLY;
4745 }
4746 trd->code = t->code;
4747 trd->flags = t->flags;
4748 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4749
4750 t_from = binder_get_txn_from(t);
4751 if (t_from) {
4752 struct task_struct *sender = t_from->proc->tsk;
4753
4754 trd->sender_pid =
4755 task_tgid_nr_ns(sender,
4756 task_active_pid_ns(current));
4757 } else {
4758 trd->sender_pid = 0;
4759 }
4760
4761 ret = binder_apply_fd_fixups(proc, t);
4762 if (ret) {
4763 struct binder_buffer *buffer = t->buffer;
4764 bool oneway = !!(t->flags & TF_ONE_WAY);
4765 int tid = t->debug_id;
4766
4767 if (t_from)
4768 binder_thread_dec_tmpref(t_from);
4769 buffer->transaction = NULL;
4770 binder_cleanup_transaction(t, "fd fixups failed",
4771 BR_FAILED_REPLY);
4772 binder_free_buf(proc, thread, buffer, true);
4773 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4774 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4775 proc->pid, thread->pid,
4776 oneway ? "async " :
4777 (cmd == BR_REPLY ? "reply " : ""),
4778 tid, BR_FAILED_REPLY, ret, __LINE__);
4779 if (cmd == BR_REPLY) {
4780 cmd = BR_FAILED_REPLY;
4781 if (put_user(cmd, (uint32_t __user *)ptr))
4782 return -EFAULT;
4783 ptr += sizeof(uint32_t);
4784 binder_stat_br(proc, thread, cmd);
4785 break;
4786 }
4787 continue;
4788 }
4789 trd->data_size = t->buffer->data_size;
4790 trd->offsets_size = t->buffer->offsets_size;
4791 trd->data.ptr.buffer = t->buffer->user_data;
4792 trd->data.ptr.offsets = trd->data.ptr.buffer +
4793 ALIGN(t->buffer->data_size,
4794 sizeof(void *));
4795
4796 tr.secctx = t->security_ctx;
4797 if (t->security_ctx) {
4798 cmd = BR_TRANSACTION_SEC_CTX;
4799 trsize = sizeof(tr);
4800 }
4801 if (put_user(cmd, (uint32_t __user *)ptr)) {
4802 if (t_from)
4803 binder_thread_dec_tmpref(t_from);
4804
4805 binder_cleanup_transaction(t, "put_user failed",
4806 BR_FAILED_REPLY);
4807
4808 return -EFAULT;
4809 }
4810 ptr += sizeof(uint32_t);
4811 if (copy_to_user(ptr, &tr, trsize)) {
4812 if (t_from)
4813 binder_thread_dec_tmpref(t_from);
4814
4815 binder_cleanup_transaction(t, "copy_to_user failed",
4816 BR_FAILED_REPLY);
4817
4818 return -EFAULT;
4819 }
4820 ptr += trsize;
4821
4822 trace_binder_transaction_received(t);
4823 binder_stat_br(proc, thread, cmd);
4824 binder_debug(BINDER_DEBUG_TRANSACTION,
4825 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4826 proc->pid, thread->pid,
4827 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4828 (cmd == BR_TRANSACTION_SEC_CTX) ?
4829 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4830 t->debug_id, t_from ? t_from->proc->pid : 0,
4831 t_from ? t_from->pid : 0, cmd,
4832 t->buffer->data_size, t->buffer->offsets_size,
4833 (u64)trd->data.ptr.buffer,
4834 (u64)trd->data.ptr.offsets);
4835
4836 if (t_from)
4837 binder_thread_dec_tmpref(t_from);
4838 t->buffer->allow_user_free = 1;
4839 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4840 binder_inner_proc_lock(thread->proc);
4841 t->to_parent = thread->transaction_stack;
4842 t->to_thread = thread;
4843 thread->transaction_stack = t;
4844 binder_inner_proc_unlock(thread->proc);
4845 } else {
4846 binder_free_transaction(t);
4847 }
4848 break;
4849 }
4850
4851 done:
4852
4853 *consumed = ptr - buffer;
4854 binder_inner_proc_lock(proc);
4855 if (proc->requested_threads == 0 &&
4856 list_empty(&thread->proc->waiting_threads) &&
4857 proc->requested_threads_started < proc->max_threads &&
4858 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4859 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4860 /*spawn a new thread if we leave this out */) {
4861 proc->requested_threads++;
4862 binder_inner_proc_unlock(proc);
4863 binder_debug(BINDER_DEBUG_THREADS,
4864 "%d:%d BR_SPAWN_LOOPER\n",
4865 proc->pid, thread->pid);
4866 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4867 return -EFAULT;
4868 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4869 } else
4870 binder_inner_proc_unlock(proc);
4871 return 0;
4872 }
4873
binder_release_work(struct binder_proc * proc,struct list_head * list)4874 static void binder_release_work(struct binder_proc *proc,
4875 struct list_head *list)
4876 {
4877 struct binder_work *w;
4878 enum binder_work_type wtype;
4879
4880 while (1) {
4881 binder_inner_proc_lock(proc);
4882 w = binder_dequeue_work_head_ilocked(list);
4883 wtype = w ? w->type : 0;
4884 binder_inner_proc_unlock(proc);
4885 if (!w)
4886 return;
4887
4888 switch (wtype) {
4889 case BINDER_WORK_TRANSACTION: {
4890 struct binder_transaction *t;
4891
4892 t = container_of(w, struct binder_transaction, work);
4893
4894 binder_cleanup_transaction(t, "process died.",
4895 BR_DEAD_REPLY);
4896 } break;
4897 case BINDER_WORK_RETURN_ERROR: {
4898 struct binder_error *e = container_of(
4899 w, struct binder_error, work);
4900
4901 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4902 "undelivered TRANSACTION_ERROR: %u\n",
4903 e->cmd);
4904 } break;
4905 case BINDER_WORK_TRANSACTION_PENDING:
4906 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4907 case BINDER_WORK_TRANSACTION_COMPLETE: {
4908 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4909 "undelivered TRANSACTION_COMPLETE\n");
4910 kfree(w);
4911 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4912 } break;
4913 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4914 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4915 struct binder_ref_death *death;
4916
4917 death = container_of(w, struct binder_ref_death, work);
4918 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4919 "undelivered death notification, %016llx\n",
4920 (u64)death->cookie);
4921 kfree(death);
4922 binder_stats_deleted(BINDER_STAT_DEATH);
4923 } break;
4924 case BINDER_WORK_NODE:
4925 break;
4926 default:
4927 pr_err("unexpected work type, %d, not freed\n",
4928 wtype);
4929 break;
4930 }
4931 }
4932
4933 }
4934
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)4935 static struct binder_thread *binder_get_thread_ilocked(
4936 struct binder_proc *proc, struct binder_thread *new_thread)
4937 {
4938 struct binder_thread *thread = NULL;
4939 struct rb_node *parent = NULL;
4940 struct rb_node **p = &proc->threads.rb_node;
4941
4942 while (*p) {
4943 parent = *p;
4944 thread = rb_entry(parent, struct binder_thread, rb_node);
4945
4946 if (current->pid < thread->pid)
4947 p = &(*p)->rb_left;
4948 else if (current->pid > thread->pid)
4949 p = &(*p)->rb_right;
4950 else
4951 return thread;
4952 }
4953 if (!new_thread)
4954 return NULL;
4955 thread = new_thread;
4956 binder_stats_created(BINDER_STAT_THREAD);
4957 thread->proc = proc;
4958 thread->pid = current->pid;
4959 atomic_set(&thread->tmp_ref, 0);
4960 init_waitqueue_head(&thread->wait);
4961 INIT_LIST_HEAD(&thread->todo);
4962 rb_link_node(&thread->rb_node, parent, p);
4963 rb_insert_color(&thread->rb_node, &proc->threads);
4964 thread->looper_need_return = true;
4965 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4966 thread->return_error.cmd = BR_OK;
4967 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4968 thread->reply_error.cmd = BR_OK;
4969 thread->ee.command = BR_OK;
4970 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4971 return thread;
4972 }
4973
binder_get_thread(struct binder_proc * proc)4974 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4975 {
4976 struct binder_thread *thread;
4977 struct binder_thread *new_thread;
4978
4979 binder_inner_proc_lock(proc);
4980 thread = binder_get_thread_ilocked(proc, NULL);
4981 binder_inner_proc_unlock(proc);
4982 if (!thread) {
4983 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4984 if (new_thread == NULL)
4985 return NULL;
4986 binder_inner_proc_lock(proc);
4987 thread = binder_get_thread_ilocked(proc, new_thread);
4988 binder_inner_proc_unlock(proc);
4989 if (thread != new_thread)
4990 kfree(new_thread);
4991 }
4992 return thread;
4993 }
4994
binder_free_proc(struct binder_proc * proc)4995 static void binder_free_proc(struct binder_proc *proc)
4996 {
4997 struct binder_device *device;
4998
4999 BUG_ON(!list_empty(&proc->todo));
5000 BUG_ON(!list_empty(&proc->delivered_death));
5001 if (proc->outstanding_txns)
5002 pr_warn("%s: Unexpected outstanding_txns %d\n",
5003 __func__, proc->outstanding_txns);
5004 device = container_of(proc->context, struct binder_device, context);
5005 if (refcount_dec_and_test(&device->ref)) {
5006 kfree(proc->context->name);
5007 kfree(device);
5008 }
5009 binder_alloc_deferred_release(&proc->alloc);
5010 put_task_struct(proc->tsk);
5011 put_cred(proc->cred);
5012 binder_stats_deleted(BINDER_STAT_PROC);
5013 dbitmap_free(&proc->dmap);
5014 kfree(proc);
5015 }
5016
binder_free_thread(struct binder_thread * thread)5017 static void binder_free_thread(struct binder_thread *thread)
5018 {
5019 BUG_ON(!list_empty(&thread->todo));
5020 binder_stats_deleted(BINDER_STAT_THREAD);
5021 binder_proc_dec_tmpref(thread->proc);
5022 kfree(thread);
5023 }
5024
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5025 static int binder_thread_release(struct binder_proc *proc,
5026 struct binder_thread *thread)
5027 {
5028 struct binder_transaction *t;
5029 struct binder_transaction *send_reply = NULL;
5030 int active_transactions = 0;
5031 struct binder_transaction *last_t = NULL;
5032
5033 binder_inner_proc_lock(thread->proc);
5034 /*
5035 * take a ref on the proc so it survives
5036 * after we remove this thread from proc->threads.
5037 * The corresponding dec is when we actually
5038 * free the thread in binder_free_thread()
5039 */
5040 proc->tmp_ref++;
5041 /*
5042 * take a ref on this thread to ensure it
5043 * survives while we are releasing it
5044 */
5045 atomic_inc(&thread->tmp_ref);
5046 rb_erase(&thread->rb_node, &proc->threads);
5047 t = thread->transaction_stack;
5048 if (t) {
5049 spin_lock(&t->lock);
5050 if (t->to_thread == thread)
5051 send_reply = t;
5052 } else {
5053 __acquire(&t->lock);
5054 }
5055 thread->is_dead = true;
5056
5057 while (t) {
5058 last_t = t;
5059 active_transactions++;
5060 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5061 "release %d:%d transaction %d %s, still active\n",
5062 proc->pid, thread->pid,
5063 t->debug_id,
5064 (t->to_thread == thread) ? "in" : "out");
5065
5066 if (t->to_thread == thread) {
5067 thread->proc->outstanding_txns--;
5068 t->to_proc = NULL;
5069 t->to_thread = NULL;
5070 if (t->buffer) {
5071 t->buffer->transaction = NULL;
5072 t->buffer = NULL;
5073 }
5074 t = t->to_parent;
5075 } else if (t->from == thread) {
5076 t->from = NULL;
5077 t = t->from_parent;
5078 } else
5079 BUG();
5080 spin_unlock(&last_t->lock);
5081 if (t)
5082 spin_lock(&t->lock);
5083 else
5084 __acquire(&t->lock);
5085 }
5086 /* annotation for sparse, lock not acquired in last iteration above */
5087 __release(&t->lock);
5088
5089 /*
5090 * If this thread used poll, make sure we remove the waitqueue from any
5091 * poll data structures holding it.
5092 */
5093 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5094 wake_up_pollfree(&thread->wait);
5095
5096 binder_inner_proc_unlock(thread->proc);
5097
5098 /*
5099 * This is needed to avoid races between wake_up_pollfree() above and
5100 * someone else removing the last entry from the queue for other reasons
5101 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5102 * descriptor being closed). Such other users hold an RCU read lock, so
5103 * we can be sure they're done after we call synchronize_rcu().
5104 */
5105 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5106 synchronize_rcu();
5107
5108 if (send_reply)
5109 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5110 binder_release_work(proc, &thread->todo);
5111 binder_thread_dec_tmpref(thread);
5112 return active_transactions;
5113 }
5114
binder_poll(struct file * filp,struct poll_table_struct * wait)5115 static __poll_t binder_poll(struct file *filp,
5116 struct poll_table_struct *wait)
5117 {
5118 struct binder_proc *proc = filp->private_data;
5119 struct binder_thread *thread = NULL;
5120 bool wait_for_proc_work;
5121
5122 thread = binder_get_thread(proc);
5123 if (!thread)
5124 return EPOLLERR;
5125
5126 binder_inner_proc_lock(thread->proc);
5127 thread->looper |= BINDER_LOOPER_STATE_POLL;
5128 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5129
5130 binder_inner_proc_unlock(thread->proc);
5131
5132 poll_wait(filp, &thread->wait, wait);
5133
5134 if (binder_has_work(thread, wait_for_proc_work))
5135 return EPOLLIN;
5136
5137 return 0;
5138 }
5139
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5140 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5141 struct binder_thread *thread)
5142 {
5143 int ret = 0;
5144 struct binder_proc *proc = filp->private_data;
5145 void __user *ubuf = (void __user *)arg;
5146 struct binder_write_read bwr;
5147
5148 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5149 ret = -EFAULT;
5150 goto out;
5151 }
5152 binder_debug(BINDER_DEBUG_READ_WRITE,
5153 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5154 proc->pid, thread->pid,
5155 (u64)bwr.write_size, (u64)bwr.write_buffer,
5156 (u64)bwr.read_size, (u64)bwr.read_buffer);
5157
5158 if (bwr.write_size > 0) {
5159 ret = binder_thread_write(proc, thread,
5160 bwr.write_buffer,
5161 bwr.write_size,
5162 &bwr.write_consumed);
5163 trace_binder_write_done(ret);
5164 if (ret < 0) {
5165 bwr.read_consumed = 0;
5166 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5167 ret = -EFAULT;
5168 goto out;
5169 }
5170 }
5171 if (bwr.read_size > 0) {
5172 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5173 bwr.read_size,
5174 &bwr.read_consumed,
5175 filp->f_flags & O_NONBLOCK);
5176 trace_binder_read_done(ret);
5177 binder_inner_proc_lock(proc);
5178 if (!binder_worklist_empty_ilocked(&proc->todo))
5179 binder_wakeup_proc_ilocked(proc);
5180 binder_inner_proc_unlock(proc);
5181 if (ret < 0) {
5182 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5183 ret = -EFAULT;
5184 goto out;
5185 }
5186 }
5187 binder_debug(BINDER_DEBUG_READ_WRITE,
5188 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5189 proc->pid, thread->pid,
5190 (u64)bwr.write_consumed, (u64)bwr.write_size,
5191 (u64)bwr.read_consumed, (u64)bwr.read_size);
5192 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5193 ret = -EFAULT;
5194 goto out;
5195 }
5196 out:
5197 return ret;
5198 }
5199
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5200 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5201 struct flat_binder_object *fbo)
5202 {
5203 int ret = 0;
5204 struct binder_proc *proc = filp->private_data;
5205 struct binder_context *context = proc->context;
5206 struct binder_node *new_node;
5207 kuid_t curr_euid = current_euid();
5208
5209 mutex_lock(&context->context_mgr_node_lock);
5210 if (context->binder_context_mgr_node) {
5211 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5212 ret = -EBUSY;
5213 goto out;
5214 }
5215 ret = security_binder_set_context_mgr(proc->cred);
5216 if (ret < 0)
5217 goto out;
5218 if (uid_valid(context->binder_context_mgr_uid)) {
5219 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5220 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5221 from_kuid(&init_user_ns, curr_euid),
5222 from_kuid(&init_user_ns,
5223 context->binder_context_mgr_uid));
5224 ret = -EPERM;
5225 goto out;
5226 }
5227 } else {
5228 context->binder_context_mgr_uid = curr_euid;
5229 }
5230 new_node = binder_new_node(proc, fbo);
5231 if (!new_node) {
5232 ret = -ENOMEM;
5233 goto out;
5234 }
5235 binder_node_lock(new_node);
5236 new_node->local_weak_refs++;
5237 new_node->local_strong_refs++;
5238 new_node->has_strong_ref = 1;
5239 new_node->has_weak_ref = 1;
5240 context->binder_context_mgr_node = new_node;
5241 binder_node_unlock(new_node);
5242 binder_put_node(new_node);
5243 out:
5244 mutex_unlock(&context->context_mgr_node_lock);
5245 return ret;
5246 }
5247
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5248 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5249 struct binder_node_info_for_ref *info)
5250 {
5251 struct binder_node *node;
5252 struct binder_context *context = proc->context;
5253 __u32 handle = info->handle;
5254
5255 if (info->strong_count || info->weak_count || info->reserved1 ||
5256 info->reserved2 || info->reserved3) {
5257 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5258 proc->pid);
5259 return -EINVAL;
5260 }
5261
5262 /* This ioctl may only be used by the context manager */
5263 mutex_lock(&context->context_mgr_node_lock);
5264 if (!context->binder_context_mgr_node ||
5265 context->binder_context_mgr_node->proc != proc) {
5266 mutex_unlock(&context->context_mgr_node_lock);
5267 return -EPERM;
5268 }
5269 mutex_unlock(&context->context_mgr_node_lock);
5270
5271 node = binder_get_node_from_ref(proc, handle, true, NULL);
5272 if (!node)
5273 return -EINVAL;
5274
5275 info->strong_count = node->local_strong_refs +
5276 node->internal_strong_refs;
5277 info->weak_count = node->local_weak_refs;
5278
5279 binder_put_node(node);
5280
5281 return 0;
5282 }
5283
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5284 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5285 struct binder_node_debug_info *info)
5286 {
5287 struct rb_node *n;
5288 binder_uintptr_t ptr = info->ptr;
5289
5290 memset(info, 0, sizeof(*info));
5291
5292 binder_inner_proc_lock(proc);
5293 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5294 struct binder_node *node = rb_entry(n, struct binder_node,
5295 rb_node);
5296 if (node->ptr > ptr) {
5297 info->ptr = node->ptr;
5298 info->cookie = node->cookie;
5299 info->has_strong_ref = node->has_strong_ref;
5300 info->has_weak_ref = node->has_weak_ref;
5301 break;
5302 }
5303 }
5304 binder_inner_proc_unlock(proc);
5305
5306 return 0;
5307 }
5308
binder_txns_pending_ilocked(struct binder_proc * proc)5309 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5310 {
5311 struct rb_node *n;
5312 struct binder_thread *thread;
5313
5314 if (proc->outstanding_txns > 0)
5315 return true;
5316
5317 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5318 thread = rb_entry(n, struct binder_thread, rb_node);
5319 if (thread->transaction_stack)
5320 return true;
5321 }
5322 return false;
5323 }
5324
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5325 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5326 struct binder_proc *target_proc)
5327 {
5328 int ret = 0;
5329
5330 if (!info->enable) {
5331 binder_inner_proc_lock(target_proc);
5332 target_proc->sync_recv = false;
5333 target_proc->async_recv = false;
5334 target_proc->is_frozen = false;
5335 binder_inner_proc_unlock(target_proc);
5336 return 0;
5337 }
5338
5339 /*
5340 * Freezing the target. Prevent new transactions by
5341 * setting frozen state. If timeout specified, wait
5342 * for transactions to drain.
5343 */
5344 binder_inner_proc_lock(target_proc);
5345 target_proc->sync_recv = false;
5346 target_proc->async_recv = false;
5347 target_proc->is_frozen = true;
5348 binder_inner_proc_unlock(target_proc);
5349
5350 if (info->timeout_ms > 0)
5351 ret = wait_event_interruptible_timeout(
5352 target_proc->freeze_wait,
5353 (!target_proc->outstanding_txns),
5354 msecs_to_jiffies(info->timeout_ms));
5355
5356 /* Check pending transactions that wait for reply */
5357 if (ret >= 0) {
5358 binder_inner_proc_lock(target_proc);
5359 if (binder_txns_pending_ilocked(target_proc))
5360 ret = -EAGAIN;
5361 binder_inner_proc_unlock(target_proc);
5362 }
5363
5364 if (ret < 0) {
5365 binder_inner_proc_lock(target_proc);
5366 target_proc->is_frozen = false;
5367 binder_inner_proc_unlock(target_proc);
5368 }
5369
5370 return ret;
5371 }
5372
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5373 static int binder_ioctl_get_freezer_info(
5374 struct binder_frozen_status_info *info)
5375 {
5376 struct binder_proc *target_proc;
5377 bool found = false;
5378 __u32 txns_pending;
5379
5380 info->sync_recv = 0;
5381 info->async_recv = 0;
5382
5383 mutex_lock(&binder_procs_lock);
5384 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5385 if (target_proc->pid == info->pid) {
5386 found = true;
5387 binder_inner_proc_lock(target_proc);
5388 txns_pending = binder_txns_pending_ilocked(target_proc);
5389 info->sync_recv |= target_proc->sync_recv |
5390 (txns_pending << 1);
5391 info->async_recv |= target_proc->async_recv;
5392 binder_inner_proc_unlock(target_proc);
5393 }
5394 }
5395 mutex_unlock(&binder_procs_lock);
5396
5397 if (!found)
5398 return -EINVAL;
5399
5400 return 0;
5401 }
5402
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5403 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5404 void __user *ubuf)
5405 {
5406 struct binder_extended_error ee;
5407
5408 binder_inner_proc_lock(thread->proc);
5409 ee = thread->ee;
5410 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5411 binder_inner_proc_unlock(thread->proc);
5412
5413 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5414 return -EFAULT;
5415
5416 return 0;
5417 }
5418
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5419 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5420 {
5421 int ret;
5422 struct binder_proc *proc = filp->private_data;
5423 struct binder_thread *thread;
5424 void __user *ubuf = (void __user *)arg;
5425
5426 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5427 proc->pid, current->pid, cmd, arg);*/
5428
5429 binder_selftest_alloc(&proc->alloc);
5430
5431 trace_binder_ioctl(cmd, arg);
5432
5433 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5434 if (ret)
5435 goto err_unlocked;
5436
5437 thread = binder_get_thread(proc);
5438 if (thread == NULL) {
5439 ret = -ENOMEM;
5440 goto err;
5441 }
5442
5443 switch (cmd) {
5444 case BINDER_WRITE_READ:
5445 ret = binder_ioctl_write_read(filp, arg, thread);
5446 if (ret)
5447 goto err;
5448 break;
5449 case BINDER_SET_MAX_THREADS: {
5450 u32 max_threads;
5451
5452 if (copy_from_user(&max_threads, ubuf,
5453 sizeof(max_threads))) {
5454 ret = -EINVAL;
5455 goto err;
5456 }
5457 binder_inner_proc_lock(proc);
5458 proc->max_threads = max_threads;
5459 binder_inner_proc_unlock(proc);
5460 break;
5461 }
5462 case BINDER_SET_CONTEXT_MGR_EXT: {
5463 struct flat_binder_object fbo;
5464
5465 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5466 ret = -EINVAL;
5467 goto err;
5468 }
5469 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5470 if (ret)
5471 goto err;
5472 break;
5473 }
5474 case BINDER_SET_CONTEXT_MGR:
5475 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5476 if (ret)
5477 goto err;
5478 break;
5479 case BINDER_THREAD_EXIT:
5480 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5481 proc->pid, thread->pid);
5482 binder_thread_release(proc, thread);
5483 thread = NULL;
5484 break;
5485 case BINDER_VERSION: {
5486 struct binder_version __user *ver = ubuf;
5487
5488 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5489 &ver->protocol_version)) {
5490 ret = -EINVAL;
5491 goto err;
5492 }
5493 break;
5494 }
5495 case BINDER_GET_NODE_INFO_FOR_REF: {
5496 struct binder_node_info_for_ref info;
5497
5498 if (copy_from_user(&info, ubuf, sizeof(info))) {
5499 ret = -EFAULT;
5500 goto err;
5501 }
5502
5503 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5504 if (ret < 0)
5505 goto err;
5506
5507 if (copy_to_user(ubuf, &info, sizeof(info))) {
5508 ret = -EFAULT;
5509 goto err;
5510 }
5511
5512 break;
5513 }
5514 case BINDER_GET_NODE_DEBUG_INFO: {
5515 struct binder_node_debug_info info;
5516
5517 if (copy_from_user(&info, ubuf, sizeof(info))) {
5518 ret = -EFAULT;
5519 goto err;
5520 }
5521
5522 ret = binder_ioctl_get_node_debug_info(proc, &info);
5523 if (ret < 0)
5524 goto err;
5525
5526 if (copy_to_user(ubuf, &info, sizeof(info))) {
5527 ret = -EFAULT;
5528 goto err;
5529 }
5530 break;
5531 }
5532 case BINDER_FREEZE: {
5533 struct binder_freeze_info info;
5534 struct binder_proc **target_procs = NULL, *target_proc;
5535 int target_procs_count = 0, i = 0;
5536
5537 ret = 0;
5538
5539 if (copy_from_user(&info, ubuf, sizeof(info))) {
5540 ret = -EFAULT;
5541 goto err;
5542 }
5543
5544 mutex_lock(&binder_procs_lock);
5545 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5546 if (target_proc->pid == info.pid)
5547 target_procs_count++;
5548 }
5549
5550 if (target_procs_count == 0) {
5551 mutex_unlock(&binder_procs_lock);
5552 ret = -EINVAL;
5553 goto err;
5554 }
5555
5556 target_procs = kcalloc(target_procs_count,
5557 sizeof(struct binder_proc *),
5558 GFP_KERNEL);
5559
5560 if (!target_procs) {
5561 mutex_unlock(&binder_procs_lock);
5562 ret = -ENOMEM;
5563 goto err;
5564 }
5565
5566 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5567 if (target_proc->pid != info.pid)
5568 continue;
5569
5570 binder_inner_proc_lock(target_proc);
5571 target_proc->tmp_ref++;
5572 binder_inner_proc_unlock(target_proc);
5573
5574 target_procs[i++] = target_proc;
5575 }
5576 mutex_unlock(&binder_procs_lock);
5577
5578 for (i = 0; i < target_procs_count; i++) {
5579 if (ret >= 0)
5580 ret = binder_ioctl_freeze(&info,
5581 target_procs[i]);
5582
5583 binder_proc_dec_tmpref(target_procs[i]);
5584 }
5585
5586 kfree(target_procs);
5587
5588 if (ret < 0)
5589 goto err;
5590 break;
5591 }
5592 case BINDER_GET_FROZEN_INFO: {
5593 struct binder_frozen_status_info info;
5594
5595 if (copy_from_user(&info, ubuf, sizeof(info))) {
5596 ret = -EFAULT;
5597 goto err;
5598 }
5599
5600 ret = binder_ioctl_get_freezer_info(&info);
5601 if (ret < 0)
5602 goto err;
5603
5604 if (copy_to_user(ubuf, &info, sizeof(info))) {
5605 ret = -EFAULT;
5606 goto err;
5607 }
5608 break;
5609 }
5610 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5611 uint32_t enable;
5612
5613 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5614 ret = -EFAULT;
5615 goto err;
5616 }
5617 binder_inner_proc_lock(proc);
5618 proc->oneway_spam_detection_enabled = (bool)enable;
5619 binder_inner_proc_unlock(proc);
5620 break;
5621 }
5622 case BINDER_GET_EXTENDED_ERROR:
5623 ret = binder_ioctl_get_extended_error(thread, ubuf);
5624 if (ret < 0)
5625 goto err;
5626 break;
5627 default:
5628 ret = -EINVAL;
5629 goto err;
5630 }
5631 ret = 0;
5632 err:
5633 if (thread)
5634 thread->looper_need_return = false;
5635 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5636 if (ret && ret != -EINTR)
5637 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5638 err_unlocked:
5639 trace_binder_ioctl_done(ret);
5640 return ret;
5641 }
5642
binder_vma_open(struct vm_area_struct * vma)5643 static void binder_vma_open(struct vm_area_struct *vma)
5644 {
5645 struct binder_proc *proc = vma->vm_private_data;
5646
5647 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5648 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5649 proc->pid, vma->vm_start, vma->vm_end,
5650 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5651 (unsigned long)pgprot_val(vma->vm_page_prot));
5652 }
5653
binder_vma_close(struct vm_area_struct * vma)5654 static void binder_vma_close(struct vm_area_struct *vma)
5655 {
5656 struct binder_proc *proc = vma->vm_private_data;
5657
5658 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5659 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5660 proc->pid, vma->vm_start, vma->vm_end,
5661 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5662 (unsigned long)pgprot_val(vma->vm_page_prot));
5663 binder_alloc_vma_close(&proc->alloc);
5664 }
5665
binder_vm_fault(struct vm_fault * vmf)5666 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5667 {
5668 return VM_FAULT_SIGBUS;
5669 }
5670
5671 static const struct vm_operations_struct binder_vm_ops = {
5672 .open = binder_vma_open,
5673 .close = binder_vma_close,
5674 .fault = binder_vm_fault,
5675 };
5676
binder_mmap(struct file * filp,struct vm_area_struct * vma)5677 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5678 {
5679 struct binder_proc *proc = filp->private_data;
5680
5681 if (proc->tsk != current->group_leader)
5682 return -EINVAL;
5683
5684 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5685 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5686 __func__, proc->pid, vma->vm_start, vma->vm_end,
5687 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5688 (unsigned long)pgprot_val(vma->vm_page_prot));
5689
5690 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5691 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5692 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5693 return -EPERM;
5694 }
5695 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5696
5697 vma->vm_ops = &binder_vm_ops;
5698 vma->vm_private_data = proc;
5699
5700 return binder_alloc_mmap_handler(&proc->alloc, vma);
5701 }
5702
binder_open(struct inode * nodp,struct file * filp)5703 static int binder_open(struct inode *nodp, struct file *filp)
5704 {
5705 struct binder_proc *proc, *itr;
5706 struct binder_device *binder_dev;
5707 struct binderfs_info *info;
5708 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5709 bool existing_pid = false;
5710
5711 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5712 current->group_leader->pid, current->pid);
5713
5714 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5715 if (proc == NULL)
5716 return -ENOMEM;
5717
5718 dbitmap_init(&proc->dmap);
5719 spin_lock_init(&proc->inner_lock);
5720 spin_lock_init(&proc->outer_lock);
5721 get_task_struct(current->group_leader);
5722 proc->tsk = current->group_leader;
5723 proc->cred = get_cred(filp->f_cred);
5724 INIT_LIST_HEAD(&proc->todo);
5725 init_waitqueue_head(&proc->freeze_wait);
5726 proc->default_priority = task_nice(current);
5727 /* binderfs stashes devices in i_private */
5728 if (is_binderfs_device(nodp)) {
5729 binder_dev = nodp->i_private;
5730 info = nodp->i_sb->s_fs_info;
5731 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5732 } else {
5733 binder_dev = container_of(filp->private_data,
5734 struct binder_device, miscdev);
5735 }
5736 refcount_inc(&binder_dev->ref);
5737 proc->context = &binder_dev->context;
5738 binder_alloc_init(&proc->alloc);
5739
5740 binder_stats_created(BINDER_STAT_PROC);
5741 proc->pid = current->group_leader->pid;
5742 INIT_LIST_HEAD(&proc->delivered_death);
5743 INIT_LIST_HEAD(&proc->waiting_threads);
5744 filp->private_data = proc;
5745
5746 mutex_lock(&binder_procs_lock);
5747 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5748 if (itr->pid == proc->pid) {
5749 existing_pid = true;
5750 break;
5751 }
5752 }
5753 hlist_add_head(&proc->proc_node, &binder_procs);
5754 mutex_unlock(&binder_procs_lock);
5755
5756 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5757 char strbuf[11];
5758
5759 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5760 /*
5761 * proc debug entries are shared between contexts.
5762 * Only create for the first PID to avoid debugfs log spamming
5763 * The printing code will anyway print all contexts for a given
5764 * PID so this is not a problem.
5765 */
5766 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5767 binder_debugfs_dir_entry_proc,
5768 (void *)(unsigned long)proc->pid,
5769 &proc_fops);
5770 }
5771
5772 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5773 char strbuf[11];
5774 struct dentry *binderfs_entry;
5775
5776 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5777 /*
5778 * Similar to debugfs, the process specific log file is shared
5779 * between contexts. Only create for the first PID.
5780 * This is ok since same as debugfs, the log file will contain
5781 * information on all contexts of a given PID.
5782 */
5783 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5784 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5785 if (!IS_ERR(binderfs_entry)) {
5786 proc->binderfs_entry = binderfs_entry;
5787 } else {
5788 int error;
5789
5790 error = PTR_ERR(binderfs_entry);
5791 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5792 strbuf, error);
5793 }
5794 }
5795
5796 return 0;
5797 }
5798
binder_flush(struct file * filp,fl_owner_t id)5799 static int binder_flush(struct file *filp, fl_owner_t id)
5800 {
5801 struct binder_proc *proc = filp->private_data;
5802
5803 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5804
5805 return 0;
5806 }
5807
binder_deferred_flush(struct binder_proc * proc)5808 static void binder_deferred_flush(struct binder_proc *proc)
5809 {
5810 struct rb_node *n;
5811 int wake_count = 0;
5812
5813 binder_inner_proc_lock(proc);
5814 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5815 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5816
5817 thread->looper_need_return = true;
5818 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5819 wake_up_interruptible(&thread->wait);
5820 wake_count++;
5821 }
5822 }
5823 binder_inner_proc_unlock(proc);
5824
5825 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5826 "binder_flush: %d woke %d threads\n", proc->pid,
5827 wake_count);
5828 }
5829
binder_release(struct inode * nodp,struct file * filp)5830 static int binder_release(struct inode *nodp, struct file *filp)
5831 {
5832 struct binder_proc *proc = filp->private_data;
5833
5834 debugfs_remove(proc->debugfs_entry);
5835
5836 if (proc->binderfs_entry) {
5837 binderfs_remove_file(proc->binderfs_entry);
5838 proc->binderfs_entry = NULL;
5839 }
5840
5841 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5842
5843 return 0;
5844 }
5845
binder_node_release(struct binder_node * node,int refs)5846 static int binder_node_release(struct binder_node *node, int refs)
5847 {
5848 struct binder_ref *ref;
5849 int death = 0;
5850 struct binder_proc *proc = node->proc;
5851
5852 binder_release_work(proc, &node->async_todo);
5853
5854 binder_node_lock(node);
5855 binder_inner_proc_lock(proc);
5856 binder_dequeue_work_ilocked(&node->work);
5857 /*
5858 * The caller must have taken a temporary ref on the node,
5859 */
5860 BUG_ON(!node->tmp_refs);
5861 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5862 binder_inner_proc_unlock(proc);
5863 binder_node_unlock(node);
5864 binder_free_node(node);
5865
5866 return refs;
5867 }
5868
5869 node->proc = NULL;
5870 node->local_strong_refs = 0;
5871 node->local_weak_refs = 0;
5872 binder_inner_proc_unlock(proc);
5873
5874 spin_lock(&binder_dead_nodes_lock);
5875 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5876 spin_unlock(&binder_dead_nodes_lock);
5877
5878 hlist_for_each_entry(ref, &node->refs, node_entry) {
5879 refs++;
5880 /*
5881 * Need the node lock to synchronize
5882 * with new notification requests and the
5883 * inner lock to synchronize with queued
5884 * death notifications.
5885 */
5886 binder_inner_proc_lock(ref->proc);
5887 if (!ref->death) {
5888 binder_inner_proc_unlock(ref->proc);
5889 continue;
5890 }
5891
5892 death++;
5893
5894 BUG_ON(!list_empty(&ref->death->work.entry));
5895 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5896 binder_enqueue_work_ilocked(&ref->death->work,
5897 &ref->proc->todo);
5898 binder_wakeup_proc_ilocked(ref->proc);
5899 binder_inner_proc_unlock(ref->proc);
5900 }
5901
5902 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5903 "node %d now dead, refs %d, death %d\n",
5904 node->debug_id, refs, death);
5905 binder_node_unlock(node);
5906 binder_put_node(node);
5907
5908 return refs;
5909 }
5910
binder_deferred_release(struct binder_proc * proc)5911 static void binder_deferred_release(struct binder_proc *proc)
5912 {
5913 struct binder_context *context = proc->context;
5914 struct rb_node *n;
5915 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5916
5917 mutex_lock(&binder_procs_lock);
5918 hlist_del(&proc->proc_node);
5919 mutex_unlock(&binder_procs_lock);
5920
5921 mutex_lock(&context->context_mgr_node_lock);
5922 if (context->binder_context_mgr_node &&
5923 context->binder_context_mgr_node->proc == proc) {
5924 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5925 "%s: %d context_mgr_node gone\n",
5926 __func__, proc->pid);
5927 context->binder_context_mgr_node = NULL;
5928 }
5929 mutex_unlock(&context->context_mgr_node_lock);
5930 binder_inner_proc_lock(proc);
5931 /*
5932 * Make sure proc stays alive after we
5933 * remove all the threads
5934 */
5935 proc->tmp_ref++;
5936
5937 proc->is_dead = true;
5938 proc->is_frozen = false;
5939 proc->sync_recv = false;
5940 proc->async_recv = false;
5941 threads = 0;
5942 active_transactions = 0;
5943 while ((n = rb_first(&proc->threads))) {
5944 struct binder_thread *thread;
5945
5946 thread = rb_entry(n, struct binder_thread, rb_node);
5947 binder_inner_proc_unlock(proc);
5948 threads++;
5949 active_transactions += binder_thread_release(proc, thread);
5950 binder_inner_proc_lock(proc);
5951 }
5952
5953 nodes = 0;
5954 incoming_refs = 0;
5955 while ((n = rb_first(&proc->nodes))) {
5956 struct binder_node *node;
5957
5958 node = rb_entry(n, struct binder_node, rb_node);
5959 nodes++;
5960 /*
5961 * take a temporary ref on the node before
5962 * calling binder_node_release() which will either
5963 * kfree() the node or call binder_put_node()
5964 */
5965 binder_inc_node_tmpref_ilocked(node);
5966 rb_erase(&node->rb_node, &proc->nodes);
5967 binder_inner_proc_unlock(proc);
5968 incoming_refs = binder_node_release(node, incoming_refs);
5969 binder_inner_proc_lock(proc);
5970 }
5971 binder_inner_proc_unlock(proc);
5972
5973 outgoing_refs = 0;
5974 binder_proc_lock(proc);
5975 while ((n = rb_first(&proc->refs_by_desc))) {
5976 struct binder_ref *ref;
5977
5978 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5979 outgoing_refs++;
5980 binder_cleanup_ref_olocked(ref);
5981 binder_proc_unlock(proc);
5982 binder_free_ref(ref);
5983 binder_proc_lock(proc);
5984 }
5985 binder_proc_unlock(proc);
5986
5987 binder_release_work(proc, &proc->todo);
5988 binder_release_work(proc, &proc->delivered_death);
5989
5990 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5991 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5992 __func__, proc->pid, threads, nodes, incoming_refs,
5993 outgoing_refs, active_transactions);
5994
5995 binder_proc_dec_tmpref(proc);
5996 }
5997
binder_deferred_func(struct work_struct * work)5998 static void binder_deferred_func(struct work_struct *work)
5999 {
6000 struct binder_proc *proc;
6001
6002 int defer;
6003
6004 do {
6005 mutex_lock(&binder_deferred_lock);
6006 if (!hlist_empty(&binder_deferred_list)) {
6007 proc = hlist_entry(binder_deferred_list.first,
6008 struct binder_proc, deferred_work_node);
6009 hlist_del_init(&proc->deferred_work_node);
6010 defer = proc->deferred_work;
6011 proc->deferred_work = 0;
6012 } else {
6013 proc = NULL;
6014 defer = 0;
6015 }
6016 mutex_unlock(&binder_deferred_lock);
6017
6018 if (defer & BINDER_DEFERRED_FLUSH)
6019 binder_deferred_flush(proc);
6020
6021 if (defer & BINDER_DEFERRED_RELEASE)
6022 binder_deferred_release(proc); /* frees proc */
6023 } while (proc);
6024 }
6025 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6026
6027 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6028 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6029 {
6030 mutex_lock(&binder_deferred_lock);
6031 proc->deferred_work |= defer;
6032 if (hlist_unhashed(&proc->deferred_work_node)) {
6033 hlist_add_head(&proc->deferred_work_node,
6034 &binder_deferred_list);
6035 schedule_work(&binder_deferred_work);
6036 }
6037 mutex_unlock(&binder_deferred_lock);
6038 }
6039
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6040 static void print_binder_transaction_ilocked(struct seq_file *m,
6041 struct binder_proc *proc,
6042 const char *prefix,
6043 struct binder_transaction *t)
6044 {
6045 struct binder_proc *to_proc;
6046 struct binder_buffer *buffer = t->buffer;
6047 ktime_t current_time = ktime_get();
6048
6049 spin_lock(&t->lock);
6050 to_proc = t->to_proc;
6051 seq_printf(m,
6052 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6053 prefix, t->debug_id, t,
6054 t->from_pid,
6055 t->from_tid,
6056 to_proc ? to_proc->pid : 0,
6057 t->to_thread ? t->to_thread->pid : 0,
6058 t->code, t->flags, t->priority, t->need_reply,
6059 ktime_ms_delta(current_time, t->start_time));
6060 spin_unlock(&t->lock);
6061
6062 if (proc != to_proc) {
6063 /*
6064 * Can only safely deref buffer if we are holding the
6065 * correct proc inner lock for this node
6066 */
6067 seq_puts(m, "\n");
6068 return;
6069 }
6070
6071 if (buffer == NULL) {
6072 seq_puts(m, " buffer free\n");
6073 return;
6074 }
6075 if (buffer->target_node)
6076 seq_printf(m, " node %d", buffer->target_node->debug_id);
6077 seq_printf(m, " size %zd:%zd offset %lx\n",
6078 buffer->data_size, buffer->offsets_size,
6079 proc->alloc.buffer - buffer->user_data);
6080 }
6081
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)6082 static void print_binder_work_ilocked(struct seq_file *m,
6083 struct binder_proc *proc,
6084 const char *prefix,
6085 const char *transaction_prefix,
6086 struct binder_work *w)
6087 {
6088 struct binder_node *node;
6089 struct binder_transaction *t;
6090
6091 switch (w->type) {
6092 case BINDER_WORK_TRANSACTION:
6093 t = container_of(w, struct binder_transaction, work);
6094 print_binder_transaction_ilocked(
6095 m, proc, transaction_prefix, t);
6096 break;
6097 case BINDER_WORK_RETURN_ERROR: {
6098 struct binder_error *e = container_of(
6099 w, struct binder_error, work);
6100
6101 seq_printf(m, "%stransaction error: %u\n",
6102 prefix, e->cmd);
6103 } break;
6104 case BINDER_WORK_TRANSACTION_COMPLETE:
6105 seq_printf(m, "%stransaction complete\n", prefix);
6106 break;
6107 case BINDER_WORK_NODE:
6108 node = container_of(w, struct binder_node, work);
6109 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6110 prefix, node->debug_id,
6111 (u64)node->ptr, (u64)node->cookie);
6112 break;
6113 case BINDER_WORK_DEAD_BINDER:
6114 seq_printf(m, "%shas dead binder\n", prefix);
6115 break;
6116 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6117 seq_printf(m, "%shas cleared dead binder\n", prefix);
6118 break;
6119 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6120 seq_printf(m, "%shas cleared death notification\n", prefix);
6121 break;
6122 default:
6123 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6124 break;
6125 }
6126 }
6127
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)6128 static void print_binder_thread_ilocked(struct seq_file *m,
6129 struct binder_thread *thread,
6130 int print_always)
6131 {
6132 struct binder_transaction *t;
6133 struct binder_work *w;
6134 size_t start_pos = m->count;
6135 size_t header_pos;
6136
6137 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6138 thread->pid, thread->looper,
6139 thread->looper_need_return,
6140 atomic_read(&thread->tmp_ref));
6141 header_pos = m->count;
6142 t = thread->transaction_stack;
6143 while (t) {
6144 if (t->from == thread) {
6145 print_binder_transaction_ilocked(m, thread->proc,
6146 " outgoing transaction", t);
6147 t = t->from_parent;
6148 } else if (t->to_thread == thread) {
6149 print_binder_transaction_ilocked(m, thread->proc,
6150 " incoming transaction", t);
6151 t = t->to_parent;
6152 } else {
6153 print_binder_transaction_ilocked(m, thread->proc,
6154 " bad transaction", t);
6155 t = NULL;
6156 }
6157 }
6158 list_for_each_entry(w, &thread->todo, entry) {
6159 print_binder_work_ilocked(m, thread->proc, " ",
6160 " pending transaction", w);
6161 }
6162 if (!print_always && m->count == header_pos)
6163 m->count = start_pos;
6164 }
6165
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)6166 static void print_binder_node_nilocked(struct seq_file *m,
6167 struct binder_node *node)
6168 {
6169 struct binder_ref *ref;
6170 struct binder_work *w;
6171 int count;
6172
6173 count = hlist_count_nodes(&node->refs);
6174
6175 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6176 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6177 node->has_strong_ref, node->has_weak_ref,
6178 node->local_strong_refs, node->local_weak_refs,
6179 node->internal_strong_refs, count, node->tmp_refs);
6180 if (count) {
6181 seq_puts(m, " proc");
6182 hlist_for_each_entry(ref, &node->refs, node_entry)
6183 seq_printf(m, " %d", ref->proc->pid);
6184 }
6185 seq_puts(m, "\n");
6186 if (node->proc) {
6187 list_for_each_entry(w, &node->async_todo, entry)
6188 print_binder_work_ilocked(m, node->proc, " ",
6189 " pending async transaction", w);
6190 }
6191 }
6192
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6193 static void print_binder_ref_olocked(struct seq_file *m,
6194 struct binder_ref *ref)
6195 {
6196 binder_node_lock(ref->node);
6197 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6198 ref->data.debug_id, ref->data.desc,
6199 ref->node->proc ? "" : "dead ",
6200 ref->node->debug_id, ref->data.strong,
6201 ref->data.weak, ref->death);
6202 binder_node_unlock(ref->node);
6203 }
6204
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)6205 static void print_binder_proc(struct seq_file *m,
6206 struct binder_proc *proc, int print_all)
6207 {
6208 struct binder_work *w;
6209 struct rb_node *n;
6210 size_t start_pos = m->count;
6211 size_t header_pos;
6212 struct binder_node *last_node = NULL;
6213
6214 seq_printf(m, "proc %d\n", proc->pid);
6215 seq_printf(m, "context %s\n", proc->context->name);
6216 header_pos = m->count;
6217
6218 binder_inner_proc_lock(proc);
6219 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6220 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6221 rb_node), print_all);
6222
6223 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6224 struct binder_node *node = rb_entry(n, struct binder_node,
6225 rb_node);
6226 if (!print_all && !node->has_async_transaction)
6227 continue;
6228
6229 /*
6230 * take a temporary reference on the node so it
6231 * survives and isn't removed from the tree
6232 * while we print it.
6233 */
6234 binder_inc_node_tmpref_ilocked(node);
6235 /* Need to drop inner lock to take node lock */
6236 binder_inner_proc_unlock(proc);
6237 if (last_node)
6238 binder_put_node(last_node);
6239 binder_node_inner_lock(node);
6240 print_binder_node_nilocked(m, node);
6241 binder_node_inner_unlock(node);
6242 last_node = node;
6243 binder_inner_proc_lock(proc);
6244 }
6245 binder_inner_proc_unlock(proc);
6246 if (last_node)
6247 binder_put_node(last_node);
6248
6249 if (print_all) {
6250 binder_proc_lock(proc);
6251 for (n = rb_first(&proc->refs_by_desc);
6252 n != NULL;
6253 n = rb_next(n))
6254 print_binder_ref_olocked(m, rb_entry(n,
6255 struct binder_ref,
6256 rb_node_desc));
6257 binder_proc_unlock(proc);
6258 }
6259 binder_alloc_print_allocated(m, &proc->alloc);
6260 binder_inner_proc_lock(proc);
6261 list_for_each_entry(w, &proc->todo, entry)
6262 print_binder_work_ilocked(m, proc, " ",
6263 " pending transaction", w);
6264 list_for_each_entry(w, &proc->delivered_death, entry) {
6265 seq_puts(m, " has delivered dead binder\n");
6266 break;
6267 }
6268 binder_inner_proc_unlock(proc);
6269 if (!print_all && m->count == header_pos)
6270 m->count = start_pos;
6271 }
6272
6273 static const char * const binder_return_strings[] = {
6274 "BR_ERROR",
6275 "BR_OK",
6276 "BR_TRANSACTION",
6277 "BR_REPLY",
6278 "BR_ACQUIRE_RESULT",
6279 "BR_DEAD_REPLY",
6280 "BR_TRANSACTION_COMPLETE",
6281 "BR_INCREFS",
6282 "BR_ACQUIRE",
6283 "BR_RELEASE",
6284 "BR_DECREFS",
6285 "BR_ATTEMPT_ACQUIRE",
6286 "BR_NOOP",
6287 "BR_SPAWN_LOOPER",
6288 "BR_FINISHED",
6289 "BR_DEAD_BINDER",
6290 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6291 "BR_FAILED_REPLY",
6292 "BR_FROZEN_REPLY",
6293 "BR_ONEWAY_SPAM_SUSPECT",
6294 "BR_TRANSACTION_PENDING_FROZEN"
6295 };
6296
6297 static const char * const binder_command_strings[] = {
6298 "BC_TRANSACTION",
6299 "BC_REPLY",
6300 "BC_ACQUIRE_RESULT",
6301 "BC_FREE_BUFFER",
6302 "BC_INCREFS",
6303 "BC_ACQUIRE",
6304 "BC_RELEASE",
6305 "BC_DECREFS",
6306 "BC_INCREFS_DONE",
6307 "BC_ACQUIRE_DONE",
6308 "BC_ATTEMPT_ACQUIRE",
6309 "BC_REGISTER_LOOPER",
6310 "BC_ENTER_LOOPER",
6311 "BC_EXIT_LOOPER",
6312 "BC_REQUEST_DEATH_NOTIFICATION",
6313 "BC_CLEAR_DEATH_NOTIFICATION",
6314 "BC_DEAD_BINDER_DONE",
6315 "BC_TRANSACTION_SG",
6316 "BC_REPLY_SG",
6317 };
6318
6319 static const char * const binder_objstat_strings[] = {
6320 "proc",
6321 "thread",
6322 "node",
6323 "ref",
6324 "death",
6325 "transaction",
6326 "transaction_complete"
6327 };
6328
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6329 static void print_binder_stats(struct seq_file *m, const char *prefix,
6330 struct binder_stats *stats)
6331 {
6332 int i;
6333
6334 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6335 ARRAY_SIZE(binder_command_strings));
6336 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6337 int temp = atomic_read(&stats->bc[i]);
6338
6339 if (temp)
6340 seq_printf(m, "%s%s: %d\n", prefix,
6341 binder_command_strings[i], temp);
6342 }
6343
6344 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6345 ARRAY_SIZE(binder_return_strings));
6346 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6347 int temp = atomic_read(&stats->br[i]);
6348
6349 if (temp)
6350 seq_printf(m, "%s%s: %d\n", prefix,
6351 binder_return_strings[i], temp);
6352 }
6353
6354 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6355 ARRAY_SIZE(binder_objstat_strings));
6356 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6357 ARRAY_SIZE(stats->obj_deleted));
6358 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6359 int created = atomic_read(&stats->obj_created[i]);
6360 int deleted = atomic_read(&stats->obj_deleted[i]);
6361
6362 if (created || deleted)
6363 seq_printf(m, "%s%s: active %d total %d\n",
6364 prefix,
6365 binder_objstat_strings[i],
6366 created - deleted,
6367 created);
6368 }
6369 }
6370
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6371 static void print_binder_proc_stats(struct seq_file *m,
6372 struct binder_proc *proc)
6373 {
6374 struct binder_work *w;
6375 struct binder_thread *thread;
6376 struct rb_node *n;
6377 int count, strong, weak, ready_threads;
6378 size_t free_async_space =
6379 binder_alloc_get_free_async_space(&proc->alloc);
6380
6381 seq_printf(m, "proc %d\n", proc->pid);
6382 seq_printf(m, "context %s\n", proc->context->name);
6383 count = 0;
6384 ready_threads = 0;
6385 binder_inner_proc_lock(proc);
6386 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6387 count++;
6388
6389 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6390 ready_threads++;
6391
6392 seq_printf(m, " threads: %d\n", count);
6393 seq_printf(m, " requested threads: %d+%d/%d\n"
6394 " ready threads %d\n"
6395 " free async space %zd\n", proc->requested_threads,
6396 proc->requested_threads_started, proc->max_threads,
6397 ready_threads,
6398 free_async_space);
6399 count = 0;
6400 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6401 count++;
6402 binder_inner_proc_unlock(proc);
6403 seq_printf(m, " nodes: %d\n", count);
6404 count = 0;
6405 strong = 0;
6406 weak = 0;
6407 binder_proc_lock(proc);
6408 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6409 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6410 rb_node_desc);
6411 count++;
6412 strong += ref->data.strong;
6413 weak += ref->data.weak;
6414 }
6415 binder_proc_unlock(proc);
6416 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6417
6418 count = binder_alloc_get_allocated_count(&proc->alloc);
6419 seq_printf(m, " buffers: %d\n", count);
6420
6421 binder_alloc_print_pages(m, &proc->alloc);
6422
6423 count = 0;
6424 binder_inner_proc_lock(proc);
6425 list_for_each_entry(w, &proc->todo, entry) {
6426 if (w->type == BINDER_WORK_TRANSACTION)
6427 count++;
6428 }
6429 binder_inner_proc_unlock(proc);
6430 seq_printf(m, " pending transactions: %d\n", count);
6431
6432 print_binder_stats(m, " ", &proc->stats);
6433 }
6434
state_show(struct seq_file * m,void * unused)6435 static int state_show(struct seq_file *m, void *unused)
6436 {
6437 struct binder_proc *proc;
6438 struct binder_node *node;
6439 struct binder_node *last_node = NULL;
6440
6441 seq_puts(m, "binder state:\n");
6442
6443 spin_lock(&binder_dead_nodes_lock);
6444 if (!hlist_empty(&binder_dead_nodes))
6445 seq_puts(m, "dead nodes:\n");
6446 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6447 /*
6448 * take a temporary reference on the node so it
6449 * survives and isn't removed from the list
6450 * while we print it.
6451 */
6452 node->tmp_refs++;
6453 spin_unlock(&binder_dead_nodes_lock);
6454 if (last_node)
6455 binder_put_node(last_node);
6456 binder_node_lock(node);
6457 print_binder_node_nilocked(m, node);
6458 binder_node_unlock(node);
6459 last_node = node;
6460 spin_lock(&binder_dead_nodes_lock);
6461 }
6462 spin_unlock(&binder_dead_nodes_lock);
6463 if (last_node)
6464 binder_put_node(last_node);
6465
6466 mutex_lock(&binder_procs_lock);
6467 hlist_for_each_entry(proc, &binder_procs, proc_node)
6468 print_binder_proc(m, proc, 1);
6469 mutex_unlock(&binder_procs_lock);
6470
6471 return 0;
6472 }
6473
stats_show(struct seq_file * m,void * unused)6474 static int stats_show(struct seq_file *m, void *unused)
6475 {
6476 struct binder_proc *proc;
6477
6478 seq_puts(m, "binder stats:\n");
6479
6480 print_binder_stats(m, "", &binder_stats);
6481
6482 mutex_lock(&binder_procs_lock);
6483 hlist_for_each_entry(proc, &binder_procs, proc_node)
6484 print_binder_proc_stats(m, proc);
6485 mutex_unlock(&binder_procs_lock);
6486
6487 return 0;
6488 }
6489
transactions_show(struct seq_file * m,void * unused)6490 static int transactions_show(struct seq_file *m, void *unused)
6491 {
6492 struct binder_proc *proc;
6493
6494 seq_puts(m, "binder transactions:\n");
6495 mutex_lock(&binder_procs_lock);
6496 hlist_for_each_entry(proc, &binder_procs, proc_node)
6497 print_binder_proc(m, proc, 0);
6498 mutex_unlock(&binder_procs_lock);
6499
6500 return 0;
6501 }
6502
proc_show(struct seq_file * m,void * unused)6503 static int proc_show(struct seq_file *m, void *unused)
6504 {
6505 struct binder_proc *itr;
6506 int pid = (unsigned long)m->private;
6507
6508 mutex_lock(&binder_procs_lock);
6509 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6510 if (itr->pid == pid) {
6511 seq_puts(m, "binder proc state:\n");
6512 print_binder_proc(m, itr, 1);
6513 }
6514 }
6515 mutex_unlock(&binder_procs_lock);
6516
6517 return 0;
6518 }
6519
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6520 static void print_binder_transaction_log_entry(struct seq_file *m,
6521 struct binder_transaction_log_entry *e)
6522 {
6523 int debug_id = READ_ONCE(e->debug_id_done);
6524 /*
6525 * read barrier to guarantee debug_id_done read before
6526 * we print the log values
6527 */
6528 smp_rmb();
6529 seq_printf(m,
6530 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6531 e->debug_id, (e->call_type == 2) ? "reply" :
6532 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6533 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6534 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6535 e->return_error, e->return_error_param,
6536 e->return_error_line);
6537 /*
6538 * read-barrier to guarantee read of debug_id_done after
6539 * done printing the fields of the entry
6540 */
6541 smp_rmb();
6542 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6543 "\n" : " (incomplete)\n");
6544 }
6545
transaction_log_show(struct seq_file * m,void * unused)6546 static int transaction_log_show(struct seq_file *m, void *unused)
6547 {
6548 struct binder_transaction_log *log = m->private;
6549 unsigned int log_cur = atomic_read(&log->cur);
6550 unsigned int count;
6551 unsigned int cur;
6552 int i;
6553
6554 count = log_cur + 1;
6555 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6556 0 : count % ARRAY_SIZE(log->entry);
6557 if (count > ARRAY_SIZE(log->entry) || log->full)
6558 count = ARRAY_SIZE(log->entry);
6559 for (i = 0; i < count; i++) {
6560 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6561
6562 print_binder_transaction_log_entry(m, &log->entry[index]);
6563 }
6564 return 0;
6565 }
6566
6567 const struct file_operations binder_fops = {
6568 .owner = THIS_MODULE,
6569 .poll = binder_poll,
6570 .unlocked_ioctl = binder_ioctl,
6571 .compat_ioctl = compat_ptr_ioctl,
6572 .mmap = binder_mmap,
6573 .open = binder_open,
6574 .flush = binder_flush,
6575 .release = binder_release,
6576 };
6577
6578 DEFINE_SHOW_ATTRIBUTE(state);
6579 DEFINE_SHOW_ATTRIBUTE(stats);
6580 DEFINE_SHOW_ATTRIBUTE(transactions);
6581 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6582
6583 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6584 {
6585 .name = "state",
6586 .mode = 0444,
6587 .fops = &state_fops,
6588 .data = NULL,
6589 },
6590 {
6591 .name = "stats",
6592 .mode = 0444,
6593 .fops = &stats_fops,
6594 .data = NULL,
6595 },
6596 {
6597 .name = "transactions",
6598 .mode = 0444,
6599 .fops = &transactions_fops,
6600 .data = NULL,
6601 },
6602 {
6603 .name = "transaction_log",
6604 .mode = 0444,
6605 .fops = &transaction_log_fops,
6606 .data = &binder_transaction_log,
6607 },
6608 {
6609 .name = "failed_transaction_log",
6610 .mode = 0444,
6611 .fops = &transaction_log_fops,
6612 .data = &binder_transaction_log_failed,
6613 },
6614 {} /* terminator */
6615 };
6616
init_binder_device(const char * name)6617 static int __init init_binder_device(const char *name)
6618 {
6619 int ret;
6620 struct binder_device *binder_device;
6621
6622 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6623 if (!binder_device)
6624 return -ENOMEM;
6625
6626 binder_device->miscdev.fops = &binder_fops;
6627 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6628 binder_device->miscdev.name = name;
6629
6630 refcount_set(&binder_device->ref, 1);
6631 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6632 binder_device->context.name = name;
6633 mutex_init(&binder_device->context.context_mgr_node_lock);
6634
6635 ret = misc_register(&binder_device->miscdev);
6636 if (ret < 0) {
6637 kfree(binder_device);
6638 return ret;
6639 }
6640
6641 hlist_add_head(&binder_device->hlist, &binder_devices);
6642
6643 return ret;
6644 }
6645
binder_init(void)6646 static int __init binder_init(void)
6647 {
6648 int ret;
6649 char *device_name, *device_tmp;
6650 struct binder_device *device;
6651 struct hlist_node *tmp;
6652 char *device_names = NULL;
6653 const struct binder_debugfs_entry *db_entry;
6654
6655 ret = binder_alloc_shrinker_init();
6656 if (ret)
6657 return ret;
6658
6659 atomic_set(&binder_transaction_log.cur, ~0U);
6660 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6661
6662 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6663
6664 binder_for_each_debugfs_entry(db_entry)
6665 debugfs_create_file(db_entry->name,
6666 db_entry->mode,
6667 binder_debugfs_dir_entry_root,
6668 db_entry->data,
6669 db_entry->fops);
6670
6671 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6672 binder_debugfs_dir_entry_root);
6673
6674 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6675 strcmp(binder_devices_param, "") != 0) {
6676 /*
6677 * Copy the module_parameter string, because we don't want to
6678 * tokenize it in-place.
6679 */
6680 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6681 if (!device_names) {
6682 ret = -ENOMEM;
6683 goto err_alloc_device_names_failed;
6684 }
6685
6686 device_tmp = device_names;
6687 while ((device_name = strsep(&device_tmp, ","))) {
6688 ret = init_binder_device(device_name);
6689 if (ret)
6690 goto err_init_binder_device_failed;
6691 }
6692 }
6693
6694 ret = init_binderfs();
6695 if (ret)
6696 goto err_init_binder_device_failed;
6697
6698 return ret;
6699
6700 err_init_binder_device_failed:
6701 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6702 misc_deregister(&device->miscdev);
6703 hlist_del(&device->hlist);
6704 kfree(device);
6705 }
6706
6707 kfree(device_names);
6708
6709 err_alloc_device_names_failed:
6710 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6711 binder_alloc_shrinker_exit();
6712
6713 return ret;
6714 }
6715
6716 device_initcall(binder_init);
6717
6718 #define CREATE_TRACE_POINTS
6719 #include "binder_trace.h"
6720
6721 MODULE_LICENSE("GPL v2");
6722