1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/string.h> 61 #include <linux/uaccess.h> 62 #include <linux/pid_namespace.h> 63 #include <linux/security.h> 64 #include <linux/spinlock.h> 65 #include <linux/ratelimit.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/sizes.h> 69 70 #include <uapi/linux/android/binder.h> 71 72 #include <linux/cacheflush.h> 73 74 #include "binder_internal.h" 75 #include "binder_trace.h" 76 77 static HLIST_HEAD(binder_deferred_list); 78 static DEFINE_MUTEX(binder_deferred_lock); 79 80 static HLIST_HEAD(binder_devices); 81 static HLIST_HEAD(binder_procs); 82 static DEFINE_MUTEX(binder_procs_lock); 83 84 static HLIST_HEAD(binder_dead_nodes); 85 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 86 87 static struct dentry *binder_debugfs_dir_entry_root; 88 static struct dentry *binder_debugfs_dir_entry_proc; 89 static atomic_t binder_last_id; 90 91 static int proc_show(struct seq_file *m, void *unused); 92 DEFINE_SHOW_ATTRIBUTE(proc); 93 94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 95 96 enum { 97 BINDER_DEBUG_USER_ERROR = 1U << 0, 98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 101 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 103 BINDER_DEBUG_READ_WRITE = 1U << 6, 104 BINDER_DEBUG_USER_REFS = 1U << 7, 105 BINDER_DEBUG_THREADS = 1U << 8, 106 BINDER_DEBUG_TRANSACTION = 1U << 9, 107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 108 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 111 BINDER_DEBUG_SPINLOCKS = 1U << 14, 112 }; 113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 115 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 116 117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 118 module_param_named(devices, binder_devices_param, charp, 0444); 119 120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 121 static int binder_stop_on_user_error; 122 123 static int binder_set_stop_on_user_error(const char *val, 124 const struct kernel_param *kp) 125 { 126 int ret; 127 128 ret = param_set_int(val, kp); 129 if (binder_stop_on_user_error < 2) 130 wake_up(&binder_user_error_wait); 131 return ret; 132 } 133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 134 param_get_int, &binder_stop_on_user_error, 0644); 135 136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...) 137 { 138 struct va_format vaf; 139 va_list args; 140 141 if (binder_debug_mask & mask) { 142 va_start(args, format); 143 vaf.va = &args; 144 vaf.fmt = format; 145 pr_info_ratelimited("%pV", &vaf); 146 va_end(args); 147 } 148 } 149 150 #define binder_txn_error(x...) \ 151 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) 152 153 static __printf(1, 2) void binder_user_error(const char *format, ...) 154 { 155 struct va_format vaf; 156 va_list args; 157 158 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { 159 va_start(args, format); 160 vaf.va = &args; 161 vaf.fmt = format; 162 pr_info_ratelimited("%pV", &vaf); 163 va_end(args); 164 } 165 166 if (binder_stop_on_user_error) 167 binder_stop_on_user_error = 2; 168 } 169 170 #define binder_set_extended_error(ee, _id, _command, _param) \ 171 do { \ 172 (ee)->id = _id; \ 173 (ee)->command = _command; \ 174 (ee)->param = _param; \ 175 } while (0) 176 177 #define to_flat_binder_object(hdr) \ 178 container_of(hdr, struct flat_binder_object, hdr) 179 180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 181 182 #define to_binder_buffer_object(hdr) \ 183 container_of(hdr, struct binder_buffer_object, hdr) 184 185 #define to_binder_fd_array_object(hdr) \ 186 container_of(hdr, struct binder_fd_array_object, hdr) 187 188 static struct binder_stats binder_stats; 189 190 static inline void binder_stats_deleted(enum binder_stat_types type) 191 { 192 atomic_inc(&binder_stats.obj_deleted[type]); 193 } 194 195 static inline void binder_stats_created(enum binder_stat_types type) 196 { 197 atomic_inc(&binder_stats.obj_created[type]); 198 } 199 200 struct binder_transaction_log_entry { 201 int debug_id; 202 int debug_id_done; 203 int call_type; 204 int from_proc; 205 int from_thread; 206 int target_handle; 207 int to_proc; 208 int to_thread; 209 int to_node; 210 int data_size; 211 int offsets_size; 212 int return_error_line; 213 uint32_t return_error; 214 uint32_t return_error_param; 215 char context_name[BINDERFS_MAX_NAME + 1]; 216 }; 217 218 struct binder_transaction_log { 219 atomic_t cur; 220 bool full; 221 struct binder_transaction_log_entry entry[32]; 222 }; 223 224 static struct binder_transaction_log binder_transaction_log; 225 static struct binder_transaction_log binder_transaction_log_failed; 226 227 static struct binder_transaction_log_entry *binder_transaction_log_add( 228 struct binder_transaction_log *log) 229 { 230 struct binder_transaction_log_entry *e; 231 unsigned int cur = atomic_inc_return(&log->cur); 232 233 if (cur >= ARRAY_SIZE(log->entry)) 234 log->full = true; 235 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 236 WRITE_ONCE(e->debug_id_done, 0); 237 /* 238 * write-barrier to synchronize access to e->debug_id_done. 239 * We make sure the initialized 0 value is seen before 240 * memset() other fields are zeroed by memset. 241 */ 242 smp_wmb(); 243 memset(e, 0, sizeof(*e)); 244 return e; 245 } 246 247 enum binder_deferred_state { 248 BINDER_DEFERRED_FLUSH = 0x01, 249 BINDER_DEFERRED_RELEASE = 0x02, 250 }; 251 252 enum { 253 BINDER_LOOPER_STATE_REGISTERED = 0x01, 254 BINDER_LOOPER_STATE_ENTERED = 0x02, 255 BINDER_LOOPER_STATE_EXITED = 0x04, 256 BINDER_LOOPER_STATE_INVALID = 0x08, 257 BINDER_LOOPER_STATE_WAITING = 0x10, 258 BINDER_LOOPER_STATE_POLL = 0x20, 259 }; 260 261 /** 262 * binder_proc_lock() - Acquire outer lock for given binder_proc 263 * @proc: struct binder_proc to acquire 264 * 265 * Acquires proc->outer_lock. Used to protect binder_ref 266 * structures associated with the given proc. 267 */ 268 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 269 static void 270 _binder_proc_lock(struct binder_proc *proc, int line) 271 __acquires(&proc->outer_lock) 272 { 273 binder_debug(BINDER_DEBUG_SPINLOCKS, 274 "%s: line=%d\n", __func__, line); 275 spin_lock(&proc->outer_lock); 276 } 277 278 /** 279 * binder_proc_unlock() - Release spinlock for given binder_proc 280 * @proc: struct binder_proc to acquire 281 * 282 * Release lock acquired via binder_proc_lock() 283 */ 284 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 285 static void 286 _binder_proc_unlock(struct binder_proc *proc, int line) 287 __releases(&proc->outer_lock) 288 { 289 binder_debug(BINDER_DEBUG_SPINLOCKS, 290 "%s: line=%d\n", __func__, line); 291 spin_unlock(&proc->outer_lock); 292 } 293 294 /** 295 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 296 * @proc: struct binder_proc to acquire 297 * 298 * Acquires proc->inner_lock. Used to protect todo lists 299 */ 300 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 301 static void 302 _binder_inner_proc_lock(struct binder_proc *proc, int line) 303 __acquires(&proc->inner_lock) 304 { 305 binder_debug(BINDER_DEBUG_SPINLOCKS, 306 "%s: line=%d\n", __func__, line); 307 spin_lock(&proc->inner_lock); 308 } 309 310 /** 311 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 312 * @proc: struct binder_proc to acquire 313 * 314 * Release lock acquired via binder_inner_proc_lock() 315 */ 316 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 317 static void 318 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 319 __releases(&proc->inner_lock) 320 { 321 binder_debug(BINDER_DEBUG_SPINLOCKS, 322 "%s: line=%d\n", __func__, line); 323 spin_unlock(&proc->inner_lock); 324 } 325 326 /** 327 * binder_node_lock() - Acquire spinlock for given binder_node 328 * @node: struct binder_node to acquire 329 * 330 * Acquires node->lock. Used to protect binder_node fields 331 */ 332 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 333 static void 334 _binder_node_lock(struct binder_node *node, int line) 335 __acquires(&node->lock) 336 { 337 binder_debug(BINDER_DEBUG_SPINLOCKS, 338 "%s: line=%d\n", __func__, line); 339 spin_lock(&node->lock); 340 } 341 342 /** 343 * binder_node_unlock() - Release spinlock for given binder_proc 344 * @node: struct binder_node to acquire 345 * 346 * Release lock acquired via binder_node_lock() 347 */ 348 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 349 static void 350 _binder_node_unlock(struct binder_node *node, int line) 351 __releases(&node->lock) 352 { 353 binder_debug(BINDER_DEBUG_SPINLOCKS, 354 "%s: line=%d\n", __func__, line); 355 spin_unlock(&node->lock); 356 } 357 358 /** 359 * binder_node_inner_lock() - Acquire node and inner locks 360 * @node: struct binder_node to acquire 361 * 362 * Acquires node->lock. If node->proc also acquires 363 * proc->inner_lock. Used to protect binder_node fields 364 */ 365 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 366 static void 367 _binder_node_inner_lock(struct binder_node *node, int line) 368 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 369 { 370 binder_debug(BINDER_DEBUG_SPINLOCKS, 371 "%s: line=%d\n", __func__, line); 372 spin_lock(&node->lock); 373 if (node->proc) 374 binder_inner_proc_lock(node->proc); 375 else 376 /* annotation for sparse */ 377 __acquire(&node->proc->inner_lock); 378 } 379 380 /** 381 * binder_node_unlock() - Release node and inner locks 382 * @node: struct binder_node to acquire 383 * 384 * Release lock acquired via binder_node_lock() 385 */ 386 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 387 static void 388 _binder_node_inner_unlock(struct binder_node *node, int line) 389 __releases(&node->lock) __releases(&node->proc->inner_lock) 390 { 391 struct binder_proc *proc = node->proc; 392 393 binder_debug(BINDER_DEBUG_SPINLOCKS, 394 "%s: line=%d\n", __func__, line); 395 if (proc) 396 binder_inner_proc_unlock(proc); 397 else 398 /* annotation for sparse */ 399 __release(&node->proc->inner_lock); 400 spin_unlock(&node->lock); 401 } 402 403 static bool binder_worklist_empty_ilocked(struct list_head *list) 404 { 405 return list_empty(list); 406 } 407 408 /** 409 * binder_worklist_empty() - Check if no items on the work list 410 * @proc: binder_proc associated with list 411 * @list: list to check 412 * 413 * Return: true if there are no items on list, else false 414 */ 415 static bool binder_worklist_empty(struct binder_proc *proc, 416 struct list_head *list) 417 { 418 bool ret; 419 420 binder_inner_proc_lock(proc); 421 ret = binder_worklist_empty_ilocked(list); 422 binder_inner_proc_unlock(proc); 423 return ret; 424 } 425 426 /** 427 * binder_enqueue_work_ilocked() - Add an item to the work list 428 * @work: struct binder_work to add to list 429 * @target_list: list to add work to 430 * 431 * Adds the work to the specified list. Asserts that work 432 * is not already on a list. 433 * 434 * Requires the proc->inner_lock to be held. 435 */ 436 static void 437 binder_enqueue_work_ilocked(struct binder_work *work, 438 struct list_head *target_list) 439 { 440 BUG_ON(target_list == NULL); 441 BUG_ON(work->entry.next && !list_empty(&work->entry)); 442 list_add_tail(&work->entry, target_list); 443 } 444 445 /** 446 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 447 * @thread: thread to queue work to 448 * @work: struct binder_work to add to list 449 * 450 * Adds the work to the todo list of the thread. Doesn't set the process_todo 451 * flag, which means that (if it wasn't already set) the thread will go to 452 * sleep without handling this work when it calls read. 453 * 454 * Requires the proc->inner_lock to be held. 455 */ 456 static void 457 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 458 struct binder_work *work) 459 { 460 WARN_ON(!list_empty(&thread->waiting_thread_node)); 461 binder_enqueue_work_ilocked(work, &thread->todo); 462 } 463 464 /** 465 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 466 * @thread: thread to queue work to 467 * @work: struct binder_work to add to list 468 * 469 * Adds the work to the todo list of the thread, and enables processing 470 * of the todo queue. 471 * 472 * Requires the proc->inner_lock to be held. 473 */ 474 static void 475 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 476 struct binder_work *work) 477 { 478 WARN_ON(!list_empty(&thread->waiting_thread_node)); 479 binder_enqueue_work_ilocked(work, &thread->todo); 480 thread->process_todo = true; 481 } 482 483 /** 484 * binder_enqueue_thread_work() - Add an item to the thread work list 485 * @thread: thread to queue work to 486 * @work: struct binder_work to add to list 487 * 488 * Adds the work to the todo list of the thread, and enables processing 489 * of the todo queue. 490 */ 491 static void 492 binder_enqueue_thread_work(struct binder_thread *thread, 493 struct binder_work *work) 494 { 495 binder_inner_proc_lock(thread->proc); 496 binder_enqueue_thread_work_ilocked(thread, work); 497 binder_inner_proc_unlock(thread->proc); 498 } 499 500 static void 501 binder_dequeue_work_ilocked(struct binder_work *work) 502 { 503 list_del_init(&work->entry); 504 } 505 506 /** 507 * binder_dequeue_work() - Removes an item from the work list 508 * @proc: binder_proc associated with list 509 * @work: struct binder_work to remove from list 510 * 511 * Removes the specified work item from whatever list it is on. 512 * Can safely be called if work is not on any list. 513 */ 514 static void 515 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 516 { 517 binder_inner_proc_lock(proc); 518 binder_dequeue_work_ilocked(work); 519 binder_inner_proc_unlock(proc); 520 } 521 522 static struct binder_work *binder_dequeue_work_head_ilocked( 523 struct list_head *list) 524 { 525 struct binder_work *w; 526 527 w = list_first_entry_or_null(list, struct binder_work, entry); 528 if (w) 529 list_del_init(&w->entry); 530 return w; 531 } 532 533 static void 534 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 535 static void binder_free_thread(struct binder_thread *thread); 536 static void binder_free_proc(struct binder_proc *proc); 537 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 538 539 static bool binder_has_work_ilocked(struct binder_thread *thread, 540 bool do_proc_work) 541 { 542 return thread->process_todo || 543 thread->looper_need_return || 544 (do_proc_work && 545 !binder_worklist_empty_ilocked(&thread->proc->todo)); 546 } 547 548 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 549 { 550 bool has_work; 551 552 binder_inner_proc_lock(thread->proc); 553 has_work = binder_has_work_ilocked(thread, do_proc_work); 554 binder_inner_proc_unlock(thread->proc); 555 556 return has_work; 557 } 558 559 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 560 { 561 return !thread->transaction_stack && 562 binder_worklist_empty_ilocked(&thread->todo) && 563 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 564 BINDER_LOOPER_STATE_REGISTERED)); 565 } 566 567 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 568 bool sync) 569 { 570 struct rb_node *n; 571 struct binder_thread *thread; 572 573 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 574 thread = rb_entry(n, struct binder_thread, rb_node); 575 if (thread->looper & BINDER_LOOPER_STATE_POLL && 576 binder_available_for_proc_work_ilocked(thread)) { 577 if (sync) 578 wake_up_interruptible_sync(&thread->wait); 579 else 580 wake_up_interruptible(&thread->wait); 581 } 582 } 583 } 584 585 /** 586 * binder_select_thread_ilocked() - selects a thread for doing proc work. 587 * @proc: process to select a thread from 588 * 589 * Note that calling this function moves the thread off the waiting_threads 590 * list, so it can only be woken up by the caller of this function, or a 591 * signal. Therefore, callers *should* always wake up the thread this function 592 * returns. 593 * 594 * Return: If there's a thread currently waiting for process work, 595 * returns that thread. Otherwise returns NULL. 596 */ 597 static struct binder_thread * 598 binder_select_thread_ilocked(struct binder_proc *proc) 599 { 600 struct binder_thread *thread; 601 602 assert_spin_locked(&proc->inner_lock); 603 thread = list_first_entry_or_null(&proc->waiting_threads, 604 struct binder_thread, 605 waiting_thread_node); 606 607 if (thread) 608 list_del_init(&thread->waiting_thread_node); 609 610 return thread; 611 } 612 613 /** 614 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 615 * @proc: process to wake up a thread in 616 * @thread: specific thread to wake-up (may be NULL) 617 * @sync: whether to do a synchronous wake-up 618 * 619 * This function wakes up a thread in the @proc process. 620 * The caller may provide a specific thread to wake-up in 621 * the @thread parameter. If @thread is NULL, this function 622 * will wake up threads that have called poll(). 623 * 624 * Note that for this function to work as expected, callers 625 * should first call binder_select_thread() to find a thread 626 * to handle the work (if they don't have a thread already), 627 * and pass the result into the @thread parameter. 628 */ 629 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 630 struct binder_thread *thread, 631 bool sync) 632 { 633 assert_spin_locked(&proc->inner_lock); 634 635 if (thread) { 636 if (sync) 637 wake_up_interruptible_sync(&thread->wait); 638 else 639 wake_up_interruptible(&thread->wait); 640 return; 641 } 642 643 /* Didn't find a thread waiting for proc work; this can happen 644 * in two scenarios: 645 * 1. All threads are busy handling transactions 646 * In that case, one of those threads should call back into 647 * the kernel driver soon and pick up this work. 648 * 2. Threads are using the (e)poll interface, in which case 649 * they may be blocked on the waitqueue without having been 650 * added to waiting_threads. For this case, we just iterate 651 * over all threads not handling transaction work, and 652 * wake them all up. We wake all because we don't know whether 653 * a thread that called into (e)poll is handling non-binder 654 * work currently. 655 */ 656 binder_wakeup_poll_threads_ilocked(proc, sync); 657 } 658 659 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 660 { 661 struct binder_thread *thread = binder_select_thread_ilocked(proc); 662 663 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 664 } 665 666 static void binder_set_nice(long nice) 667 { 668 long min_nice; 669 670 if (can_nice(current, nice)) { 671 set_user_nice(current, nice); 672 return; 673 } 674 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 675 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 676 "%d: nice value %ld not allowed use %ld instead\n", 677 current->pid, nice, min_nice); 678 set_user_nice(current, min_nice); 679 if (min_nice <= MAX_NICE) 680 return; 681 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 682 } 683 684 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 685 binder_uintptr_t ptr) 686 { 687 struct rb_node *n = proc->nodes.rb_node; 688 struct binder_node *node; 689 690 assert_spin_locked(&proc->inner_lock); 691 692 while (n) { 693 node = rb_entry(n, struct binder_node, rb_node); 694 695 if (ptr < node->ptr) 696 n = n->rb_left; 697 else if (ptr > node->ptr) 698 n = n->rb_right; 699 else { 700 /* 701 * take an implicit weak reference 702 * to ensure node stays alive until 703 * call to binder_put_node() 704 */ 705 binder_inc_node_tmpref_ilocked(node); 706 return node; 707 } 708 } 709 return NULL; 710 } 711 712 static struct binder_node *binder_get_node(struct binder_proc *proc, 713 binder_uintptr_t ptr) 714 { 715 struct binder_node *node; 716 717 binder_inner_proc_lock(proc); 718 node = binder_get_node_ilocked(proc, ptr); 719 binder_inner_proc_unlock(proc); 720 return node; 721 } 722 723 static struct binder_node *binder_init_node_ilocked( 724 struct binder_proc *proc, 725 struct binder_node *new_node, 726 struct flat_binder_object *fp) 727 { 728 struct rb_node **p = &proc->nodes.rb_node; 729 struct rb_node *parent = NULL; 730 struct binder_node *node; 731 binder_uintptr_t ptr = fp ? fp->binder : 0; 732 binder_uintptr_t cookie = fp ? fp->cookie : 0; 733 __u32 flags = fp ? fp->flags : 0; 734 735 assert_spin_locked(&proc->inner_lock); 736 737 while (*p) { 738 739 parent = *p; 740 node = rb_entry(parent, struct binder_node, rb_node); 741 742 if (ptr < node->ptr) 743 p = &(*p)->rb_left; 744 else if (ptr > node->ptr) 745 p = &(*p)->rb_right; 746 else { 747 /* 748 * A matching node is already in 749 * the rb tree. Abandon the init 750 * and return it. 751 */ 752 binder_inc_node_tmpref_ilocked(node); 753 return node; 754 } 755 } 756 node = new_node; 757 binder_stats_created(BINDER_STAT_NODE); 758 node->tmp_refs++; 759 rb_link_node(&node->rb_node, parent, p); 760 rb_insert_color(&node->rb_node, &proc->nodes); 761 node->debug_id = atomic_inc_return(&binder_last_id); 762 node->proc = proc; 763 node->ptr = ptr; 764 node->cookie = cookie; 765 node->work.type = BINDER_WORK_NODE; 766 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 767 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 768 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 769 spin_lock_init(&node->lock); 770 INIT_LIST_HEAD(&node->work.entry); 771 INIT_LIST_HEAD(&node->async_todo); 772 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 773 "%d:%d node %d u%016llx c%016llx created\n", 774 proc->pid, current->pid, node->debug_id, 775 (u64)node->ptr, (u64)node->cookie); 776 777 return node; 778 } 779 780 static struct binder_node *binder_new_node(struct binder_proc *proc, 781 struct flat_binder_object *fp) 782 { 783 struct binder_node *node; 784 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 785 786 if (!new_node) 787 return NULL; 788 binder_inner_proc_lock(proc); 789 node = binder_init_node_ilocked(proc, new_node, fp); 790 binder_inner_proc_unlock(proc); 791 if (node != new_node) 792 /* 793 * The node was already added by another thread 794 */ 795 kfree(new_node); 796 797 return node; 798 } 799 800 static void binder_free_node(struct binder_node *node) 801 { 802 kfree(node); 803 binder_stats_deleted(BINDER_STAT_NODE); 804 } 805 806 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 807 int internal, 808 struct list_head *target_list) 809 { 810 struct binder_proc *proc = node->proc; 811 812 assert_spin_locked(&node->lock); 813 if (proc) 814 assert_spin_locked(&proc->inner_lock); 815 if (strong) { 816 if (internal) { 817 if (target_list == NULL && 818 node->internal_strong_refs == 0 && 819 !(node->proc && 820 node == node->proc->context->binder_context_mgr_node && 821 node->has_strong_ref)) { 822 pr_err("invalid inc strong node for %d\n", 823 node->debug_id); 824 return -EINVAL; 825 } 826 node->internal_strong_refs++; 827 } else 828 node->local_strong_refs++; 829 if (!node->has_strong_ref && target_list) { 830 struct binder_thread *thread = container_of(target_list, 831 struct binder_thread, todo); 832 binder_dequeue_work_ilocked(&node->work); 833 BUG_ON(&thread->todo != target_list); 834 binder_enqueue_deferred_thread_work_ilocked(thread, 835 &node->work); 836 } 837 } else { 838 if (!internal) 839 node->local_weak_refs++; 840 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 841 if (target_list == NULL) { 842 pr_err("invalid inc weak node for %d\n", 843 node->debug_id); 844 return -EINVAL; 845 } 846 /* 847 * See comment above 848 */ 849 binder_enqueue_work_ilocked(&node->work, target_list); 850 } 851 } 852 return 0; 853 } 854 855 static int binder_inc_node(struct binder_node *node, int strong, int internal, 856 struct list_head *target_list) 857 { 858 int ret; 859 860 binder_node_inner_lock(node); 861 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 862 binder_node_inner_unlock(node); 863 864 return ret; 865 } 866 867 static bool binder_dec_node_nilocked(struct binder_node *node, 868 int strong, int internal) 869 { 870 struct binder_proc *proc = node->proc; 871 872 assert_spin_locked(&node->lock); 873 if (proc) 874 assert_spin_locked(&proc->inner_lock); 875 if (strong) { 876 if (internal) 877 node->internal_strong_refs--; 878 else 879 node->local_strong_refs--; 880 if (node->local_strong_refs || node->internal_strong_refs) 881 return false; 882 } else { 883 if (!internal) 884 node->local_weak_refs--; 885 if (node->local_weak_refs || node->tmp_refs || 886 !hlist_empty(&node->refs)) 887 return false; 888 } 889 890 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 891 if (list_empty(&node->work.entry)) { 892 binder_enqueue_work_ilocked(&node->work, &proc->todo); 893 binder_wakeup_proc_ilocked(proc); 894 } 895 } else { 896 if (hlist_empty(&node->refs) && !node->local_strong_refs && 897 !node->local_weak_refs && !node->tmp_refs) { 898 if (proc) { 899 binder_dequeue_work_ilocked(&node->work); 900 rb_erase(&node->rb_node, &proc->nodes); 901 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 902 "refless node %d deleted\n", 903 node->debug_id); 904 } else { 905 BUG_ON(!list_empty(&node->work.entry)); 906 spin_lock(&binder_dead_nodes_lock); 907 /* 908 * tmp_refs could have changed so 909 * check it again 910 */ 911 if (node->tmp_refs) { 912 spin_unlock(&binder_dead_nodes_lock); 913 return false; 914 } 915 hlist_del(&node->dead_node); 916 spin_unlock(&binder_dead_nodes_lock); 917 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 918 "dead node %d deleted\n", 919 node->debug_id); 920 } 921 return true; 922 } 923 } 924 return false; 925 } 926 927 static void binder_dec_node(struct binder_node *node, int strong, int internal) 928 { 929 bool free_node; 930 931 binder_node_inner_lock(node); 932 free_node = binder_dec_node_nilocked(node, strong, internal); 933 binder_node_inner_unlock(node); 934 if (free_node) 935 binder_free_node(node); 936 } 937 938 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 939 { 940 /* 941 * No call to binder_inc_node() is needed since we 942 * don't need to inform userspace of any changes to 943 * tmp_refs 944 */ 945 node->tmp_refs++; 946 } 947 948 /** 949 * binder_inc_node_tmpref() - take a temporary reference on node 950 * @node: node to reference 951 * 952 * Take reference on node to prevent the node from being freed 953 * while referenced only by a local variable. The inner lock is 954 * needed to serialize with the node work on the queue (which 955 * isn't needed after the node is dead). If the node is dead 956 * (node->proc is NULL), use binder_dead_nodes_lock to protect 957 * node->tmp_refs against dead-node-only cases where the node 958 * lock cannot be acquired (eg traversing the dead node list to 959 * print nodes) 960 */ 961 static void binder_inc_node_tmpref(struct binder_node *node) 962 { 963 binder_node_lock(node); 964 if (node->proc) 965 binder_inner_proc_lock(node->proc); 966 else 967 spin_lock(&binder_dead_nodes_lock); 968 binder_inc_node_tmpref_ilocked(node); 969 if (node->proc) 970 binder_inner_proc_unlock(node->proc); 971 else 972 spin_unlock(&binder_dead_nodes_lock); 973 binder_node_unlock(node); 974 } 975 976 /** 977 * binder_dec_node_tmpref() - remove a temporary reference on node 978 * @node: node to reference 979 * 980 * Release temporary reference on node taken via binder_inc_node_tmpref() 981 */ 982 static void binder_dec_node_tmpref(struct binder_node *node) 983 { 984 bool free_node; 985 986 binder_node_inner_lock(node); 987 if (!node->proc) 988 spin_lock(&binder_dead_nodes_lock); 989 else 990 __acquire(&binder_dead_nodes_lock); 991 node->tmp_refs--; 992 BUG_ON(node->tmp_refs < 0); 993 if (!node->proc) 994 spin_unlock(&binder_dead_nodes_lock); 995 else 996 __release(&binder_dead_nodes_lock); 997 /* 998 * Call binder_dec_node() to check if all refcounts are 0 999 * and cleanup is needed. Calling with strong=0 and internal=1 1000 * causes no actual reference to be released in binder_dec_node(). 1001 * If that changes, a change is needed here too. 1002 */ 1003 free_node = binder_dec_node_nilocked(node, 0, 1); 1004 binder_node_inner_unlock(node); 1005 if (free_node) 1006 binder_free_node(node); 1007 } 1008 1009 static void binder_put_node(struct binder_node *node) 1010 { 1011 binder_dec_node_tmpref(node); 1012 } 1013 1014 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1015 u32 desc, bool need_strong_ref) 1016 { 1017 struct rb_node *n = proc->refs_by_desc.rb_node; 1018 struct binder_ref *ref; 1019 1020 while (n) { 1021 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1022 1023 if (desc < ref->data.desc) { 1024 n = n->rb_left; 1025 } else if (desc > ref->data.desc) { 1026 n = n->rb_right; 1027 } else if (need_strong_ref && !ref->data.strong) { 1028 binder_user_error("tried to use weak ref as strong ref\n"); 1029 return NULL; 1030 } else { 1031 return ref; 1032 } 1033 } 1034 return NULL; 1035 } 1036 1037 /** 1038 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1039 * @proc: binder_proc that owns the ref 1040 * @node: binder_node of target 1041 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1042 * 1043 * Look up the ref for the given node and return it if it exists 1044 * 1045 * If it doesn't exist and the caller provides a newly allocated 1046 * ref, initialize the fields of the newly allocated ref and insert 1047 * into the given proc rb_trees and node refs list. 1048 * 1049 * Return: the ref for node. It is possible that another thread 1050 * allocated/initialized the ref first in which case the 1051 * returned ref would be different than the passed-in 1052 * new_ref. new_ref must be kfree'd by the caller in 1053 * this case. 1054 */ 1055 static struct binder_ref *binder_get_ref_for_node_olocked( 1056 struct binder_proc *proc, 1057 struct binder_node *node, 1058 struct binder_ref *new_ref) 1059 { 1060 struct binder_context *context = proc->context; 1061 struct rb_node **p = &proc->refs_by_node.rb_node; 1062 struct rb_node *parent = NULL; 1063 struct binder_ref *ref; 1064 struct rb_node *n; 1065 1066 while (*p) { 1067 parent = *p; 1068 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1069 1070 if (node < ref->node) 1071 p = &(*p)->rb_left; 1072 else if (node > ref->node) 1073 p = &(*p)->rb_right; 1074 else 1075 return ref; 1076 } 1077 if (!new_ref) 1078 return NULL; 1079 1080 binder_stats_created(BINDER_STAT_REF); 1081 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1082 new_ref->proc = proc; 1083 new_ref->node = node; 1084 rb_link_node(&new_ref->rb_node_node, parent, p); 1085 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1086 1087 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1088 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1089 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1090 if (ref->data.desc > new_ref->data.desc) 1091 break; 1092 new_ref->data.desc = ref->data.desc + 1; 1093 } 1094 1095 p = &proc->refs_by_desc.rb_node; 1096 while (*p) { 1097 parent = *p; 1098 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1099 1100 if (new_ref->data.desc < ref->data.desc) 1101 p = &(*p)->rb_left; 1102 else if (new_ref->data.desc > ref->data.desc) 1103 p = &(*p)->rb_right; 1104 else 1105 BUG(); 1106 } 1107 rb_link_node(&new_ref->rb_node_desc, parent, p); 1108 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1109 1110 binder_node_lock(node); 1111 hlist_add_head(&new_ref->node_entry, &node->refs); 1112 1113 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1114 "%d new ref %d desc %d for node %d\n", 1115 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1116 node->debug_id); 1117 binder_node_unlock(node); 1118 return new_ref; 1119 } 1120 1121 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1122 { 1123 bool delete_node = false; 1124 1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1126 "%d delete ref %d desc %d for node %d\n", 1127 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1128 ref->node->debug_id); 1129 1130 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1131 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1132 1133 binder_node_inner_lock(ref->node); 1134 if (ref->data.strong) 1135 binder_dec_node_nilocked(ref->node, 1, 1); 1136 1137 hlist_del(&ref->node_entry); 1138 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1139 binder_node_inner_unlock(ref->node); 1140 /* 1141 * Clear ref->node unless we want the caller to free the node 1142 */ 1143 if (!delete_node) { 1144 /* 1145 * The caller uses ref->node to determine 1146 * whether the node needs to be freed. Clear 1147 * it since the node is still alive. 1148 */ 1149 ref->node = NULL; 1150 } 1151 1152 if (ref->death) { 1153 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1154 "%d delete ref %d desc %d has death notification\n", 1155 ref->proc->pid, ref->data.debug_id, 1156 ref->data.desc); 1157 binder_dequeue_work(ref->proc, &ref->death->work); 1158 binder_stats_deleted(BINDER_STAT_DEATH); 1159 } 1160 binder_stats_deleted(BINDER_STAT_REF); 1161 } 1162 1163 /** 1164 * binder_inc_ref_olocked() - increment the ref for given handle 1165 * @ref: ref to be incremented 1166 * @strong: if true, strong increment, else weak 1167 * @target_list: list to queue node work on 1168 * 1169 * Increment the ref. @ref->proc->outer_lock must be held on entry 1170 * 1171 * Return: 0, if successful, else errno 1172 */ 1173 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1174 struct list_head *target_list) 1175 { 1176 int ret; 1177 1178 if (strong) { 1179 if (ref->data.strong == 0) { 1180 ret = binder_inc_node(ref->node, 1, 1, target_list); 1181 if (ret) 1182 return ret; 1183 } 1184 ref->data.strong++; 1185 } else { 1186 if (ref->data.weak == 0) { 1187 ret = binder_inc_node(ref->node, 0, 1, target_list); 1188 if (ret) 1189 return ret; 1190 } 1191 ref->data.weak++; 1192 } 1193 return 0; 1194 } 1195 1196 /** 1197 * binder_dec_ref() - dec the ref for given handle 1198 * @ref: ref to be decremented 1199 * @strong: if true, strong decrement, else weak 1200 * 1201 * Decrement the ref. 1202 * 1203 * Return: true if ref is cleaned up and ready to be freed 1204 */ 1205 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1206 { 1207 if (strong) { 1208 if (ref->data.strong == 0) { 1209 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1210 ref->proc->pid, ref->data.debug_id, 1211 ref->data.desc, ref->data.strong, 1212 ref->data.weak); 1213 return false; 1214 } 1215 ref->data.strong--; 1216 if (ref->data.strong == 0) 1217 binder_dec_node(ref->node, strong, 1); 1218 } else { 1219 if (ref->data.weak == 0) { 1220 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1221 ref->proc->pid, ref->data.debug_id, 1222 ref->data.desc, ref->data.strong, 1223 ref->data.weak); 1224 return false; 1225 } 1226 ref->data.weak--; 1227 } 1228 if (ref->data.strong == 0 && ref->data.weak == 0) { 1229 binder_cleanup_ref_olocked(ref); 1230 return true; 1231 } 1232 return false; 1233 } 1234 1235 /** 1236 * binder_get_node_from_ref() - get the node from the given proc/desc 1237 * @proc: proc containing the ref 1238 * @desc: the handle associated with the ref 1239 * @need_strong_ref: if true, only return node if ref is strong 1240 * @rdata: the id/refcount data for the ref 1241 * 1242 * Given a proc and ref handle, return the associated binder_node 1243 * 1244 * Return: a binder_node or NULL if not found or not strong when strong required 1245 */ 1246 static struct binder_node *binder_get_node_from_ref( 1247 struct binder_proc *proc, 1248 u32 desc, bool need_strong_ref, 1249 struct binder_ref_data *rdata) 1250 { 1251 struct binder_node *node; 1252 struct binder_ref *ref; 1253 1254 binder_proc_lock(proc); 1255 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1256 if (!ref) 1257 goto err_no_ref; 1258 node = ref->node; 1259 /* 1260 * Take an implicit reference on the node to ensure 1261 * it stays alive until the call to binder_put_node() 1262 */ 1263 binder_inc_node_tmpref(node); 1264 if (rdata) 1265 *rdata = ref->data; 1266 binder_proc_unlock(proc); 1267 1268 return node; 1269 1270 err_no_ref: 1271 binder_proc_unlock(proc); 1272 return NULL; 1273 } 1274 1275 /** 1276 * binder_free_ref() - free the binder_ref 1277 * @ref: ref to free 1278 * 1279 * Free the binder_ref. Free the binder_node indicated by ref->node 1280 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1281 */ 1282 static void binder_free_ref(struct binder_ref *ref) 1283 { 1284 if (ref->node) 1285 binder_free_node(ref->node); 1286 kfree(ref->death); 1287 kfree(ref); 1288 } 1289 1290 /** 1291 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1292 * @proc: proc containing the ref 1293 * @desc: the handle associated with the ref 1294 * @increment: true=inc reference, false=dec reference 1295 * @strong: true=strong reference, false=weak reference 1296 * @rdata: the id/refcount data for the ref 1297 * 1298 * Given a proc and ref handle, increment or decrement the ref 1299 * according to "increment" arg. 1300 * 1301 * Return: 0 if successful, else errno 1302 */ 1303 static int binder_update_ref_for_handle(struct binder_proc *proc, 1304 uint32_t desc, bool increment, bool strong, 1305 struct binder_ref_data *rdata) 1306 { 1307 int ret = 0; 1308 struct binder_ref *ref; 1309 bool delete_ref = false; 1310 1311 binder_proc_lock(proc); 1312 ref = binder_get_ref_olocked(proc, desc, strong); 1313 if (!ref) { 1314 ret = -EINVAL; 1315 goto err_no_ref; 1316 } 1317 if (increment) 1318 ret = binder_inc_ref_olocked(ref, strong, NULL); 1319 else 1320 delete_ref = binder_dec_ref_olocked(ref, strong); 1321 1322 if (rdata) 1323 *rdata = ref->data; 1324 binder_proc_unlock(proc); 1325 1326 if (delete_ref) 1327 binder_free_ref(ref); 1328 return ret; 1329 1330 err_no_ref: 1331 binder_proc_unlock(proc); 1332 return ret; 1333 } 1334 1335 /** 1336 * binder_dec_ref_for_handle() - dec the ref for given handle 1337 * @proc: proc containing the ref 1338 * @desc: the handle associated with the ref 1339 * @strong: true=strong reference, false=weak reference 1340 * @rdata: the id/refcount data for the ref 1341 * 1342 * Just calls binder_update_ref_for_handle() to decrement the ref. 1343 * 1344 * Return: 0 if successful, else errno 1345 */ 1346 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1347 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1348 { 1349 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1350 } 1351 1352 1353 /** 1354 * binder_inc_ref_for_node() - increment the ref for given proc/node 1355 * @proc: proc containing the ref 1356 * @node: target node 1357 * @strong: true=strong reference, false=weak reference 1358 * @target_list: worklist to use if node is incremented 1359 * @rdata: the id/refcount data for the ref 1360 * 1361 * Given a proc and node, increment the ref. Create the ref if it 1362 * doesn't already exist 1363 * 1364 * Return: 0 if successful, else errno 1365 */ 1366 static int binder_inc_ref_for_node(struct binder_proc *proc, 1367 struct binder_node *node, 1368 bool strong, 1369 struct list_head *target_list, 1370 struct binder_ref_data *rdata) 1371 { 1372 struct binder_ref *ref; 1373 struct binder_ref *new_ref = NULL; 1374 int ret = 0; 1375 1376 binder_proc_lock(proc); 1377 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1378 if (!ref) { 1379 binder_proc_unlock(proc); 1380 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1381 if (!new_ref) 1382 return -ENOMEM; 1383 binder_proc_lock(proc); 1384 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1385 } 1386 ret = binder_inc_ref_olocked(ref, strong, target_list); 1387 *rdata = ref->data; 1388 if (ret && ref == new_ref) { 1389 /* 1390 * Cleanup the failed reference here as the target 1391 * could now be dead and have already released its 1392 * references by now. Calling on the new reference 1393 * with strong=0 and a tmp_refs will not decrement 1394 * the node. The new_ref gets kfree'd below. 1395 */ 1396 binder_cleanup_ref_olocked(new_ref); 1397 ref = NULL; 1398 } 1399 1400 binder_proc_unlock(proc); 1401 if (new_ref && ref != new_ref) 1402 /* 1403 * Another thread created the ref first so 1404 * free the one we allocated 1405 */ 1406 kfree(new_ref); 1407 return ret; 1408 } 1409 1410 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1411 struct binder_transaction *t) 1412 { 1413 BUG_ON(!target_thread); 1414 assert_spin_locked(&target_thread->proc->inner_lock); 1415 BUG_ON(target_thread->transaction_stack != t); 1416 BUG_ON(target_thread->transaction_stack->from != target_thread); 1417 target_thread->transaction_stack = 1418 target_thread->transaction_stack->from_parent; 1419 t->from = NULL; 1420 } 1421 1422 /** 1423 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1424 * @thread: thread to decrement 1425 * 1426 * A thread needs to be kept alive while being used to create or 1427 * handle a transaction. binder_get_txn_from() is used to safely 1428 * extract t->from from a binder_transaction and keep the thread 1429 * indicated by t->from from being freed. When done with that 1430 * binder_thread, this function is called to decrement the 1431 * tmp_ref and free if appropriate (thread has been released 1432 * and no transaction being processed by the driver) 1433 */ 1434 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1435 { 1436 /* 1437 * atomic is used to protect the counter value while 1438 * it cannot reach zero or thread->is_dead is false 1439 */ 1440 binder_inner_proc_lock(thread->proc); 1441 atomic_dec(&thread->tmp_ref); 1442 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1443 binder_inner_proc_unlock(thread->proc); 1444 binder_free_thread(thread); 1445 return; 1446 } 1447 binder_inner_proc_unlock(thread->proc); 1448 } 1449 1450 /** 1451 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1452 * @proc: proc to decrement 1453 * 1454 * A binder_proc needs to be kept alive while being used to create or 1455 * handle a transaction. proc->tmp_ref is incremented when 1456 * creating a new transaction or the binder_proc is currently in-use 1457 * by threads that are being released. When done with the binder_proc, 1458 * this function is called to decrement the counter and free the 1459 * proc if appropriate (proc has been released, all threads have 1460 * been released and not currenly in-use to process a transaction). 1461 */ 1462 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1463 { 1464 binder_inner_proc_lock(proc); 1465 proc->tmp_ref--; 1466 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1467 !proc->tmp_ref) { 1468 binder_inner_proc_unlock(proc); 1469 binder_free_proc(proc); 1470 return; 1471 } 1472 binder_inner_proc_unlock(proc); 1473 } 1474 1475 /** 1476 * binder_get_txn_from() - safely extract the "from" thread in transaction 1477 * @t: binder transaction for t->from 1478 * 1479 * Atomically return the "from" thread and increment the tmp_ref 1480 * count for the thread to ensure it stays alive until 1481 * binder_thread_dec_tmpref() is called. 1482 * 1483 * Return: the value of t->from 1484 */ 1485 static struct binder_thread *binder_get_txn_from( 1486 struct binder_transaction *t) 1487 { 1488 struct binder_thread *from; 1489 1490 spin_lock(&t->lock); 1491 from = t->from; 1492 if (from) 1493 atomic_inc(&from->tmp_ref); 1494 spin_unlock(&t->lock); 1495 return from; 1496 } 1497 1498 /** 1499 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1500 * @t: binder transaction for t->from 1501 * 1502 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1503 * to guarantee that the thread cannot be released while operating on it. 1504 * The caller must call binder_inner_proc_unlock() to release the inner lock 1505 * as well as call binder_dec_thread_txn() to release the reference. 1506 * 1507 * Return: the value of t->from 1508 */ 1509 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1510 struct binder_transaction *t) 1511 __acquires(&t->from->proc->inner_lock) 1512 { 1513 struct binder_thread *from; 1514 1515 from = binder_get_txn_from(t); 1516 if (!from) { 1517 __acquire(&from->proc->inner_lock); 1518 return NULL; 1519 } 1520 binder_inner_proc_lock(from->proc); 1521 if (t->from) { 1522 BUG_ON(from != t->from); 1523 return from; 1524 } 1525 binder_inner_proc_unlock(from->proc); 1526 __acquire(&from->proc->inner_lock); 1527 binder_thread_dec_tmpref(from); 1528 return NULL; 1529 } 1530 1531 /** 1532 * binder_free_txn_fixups() - free unprocessed fd fixups 1533 * @t: binder transaction for t->from 1534 * 1535 * If the transaction is being torn down prior to being 1536 * processed by the target process, free all of the 1537 * fd fixups and fput the file structs. It is safe to 1538 * call this function after the fixups have been 1539 * processed -- in that case, the list will be empty. 1540 */ 1541 static void binder_free_txn_fixups(struct binder_transaction *t) 1542 { 1543 struct binder_txn_fd_fixup *fixup, *tmp; 1544 1545 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1546 fput(fixup->file); 1547 if (fixup->target_fd >= 0) 1548 put_unused_fd(fixup->target_fd); 1549 list_del(&fixup->fixup_entry); 1550 kfree(fixup); 1551 } 1552 } 1553 1554 static void binder_txn_latency_free(struct binder_transaction *t) 1555 { 1556 int from_proc, from_thread, to_proc, to_thread; 1557 1558 spin_lock(&t->lock); 1559 from_proc = t->from ? t->from->proc->pid : 0; 1560 from_thread = t->from ? t->from->pid : 0; 1561 to_proc = t->to_proc ? t->to_proc->pid : 0; 1562 to_thread = t->to_thread ? t->to_thread->pid : 0; 1563 spin_unlock(&t->lock); 1564 1565 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); 1566 } 1567 1568 static void binder_free_transaction(struct binder_transaction *t) 1569 { 1570 struct binder_proc *target_proc = t->to_proc; 1571 1572 if (target_proc) { 1573 binder_inner_proc_lock(target_proc); 1574 target_proc->outstanding_txns--; 1575 if (target_proc->outstanding_txns < 0) 1576 pr_warn("%s: Unexpected outstanding_txns %d\n", 1577 __func__, target_proc->outstanding_txns); 1578 if (!target_proc->outstanding_txns && target_proc->is_frozen) 1579 wake_up_interruptible_all(&target_proc->freeze_wait); 1580 if (t->buffer) 1581 t->buffer->transaction = NULL; 1582 binder_inner_proc_unlock(target_proc); 1583 } 1584 if (trace_binder_txn_latency_free_enabled()) 1585 binder_txn_latency_free(t); 1586 /* 1587 * If the transaction has no target_proc, then 1588 * t->buffer->transaction has already been cleared. 1589 */ 1590 binder_free_txn_fixups(t); 1591 kfree(t); 1592 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1593 } 1594 1595 static void binder_send_failed_reply(struct binder_transaction *t, 1596 uint32_t error_code) 1597 { 1598 struct binder_thread *target_thread; 1599 struct binder_transaction *next; 1600 1601 BUG_ON(t->flags & TF_ONE_WAY); 1602 while (1) { 1603 target_thread = binder_get_txn_from_and_acq_inner(t); 1604 if (target_thread) { 1605 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1606 "send failed reply for transaction %d to %d:%d\n", 1607 t->debug_id, 1608 target_thread->proc->pid, 1609 target_thread->pid); 1610 1611 binder_pop_transaction_ilocked(target_thread, t); 1612 if (target_thread->reply_error.cmd == BR_OK) { 1613 target_thread->reply_error.cmd = error_code; 1614 binder_enqueue_thread_work_ilocked( 1615 target_thread, 1616 &target_thread->reply_error.work); 1617 wake_up_interruptible(&target_thread->wait); 1618 } else { 1619 /* 1620 * Cannot get here for normal operation, but 1621 * we can if multiple synchronous transactions 1622 * are sent without blocking for responses. 1623 * Just ignore the 2nd error in this case. 1624 */ 1625 pr_warn("Unexpected reply error: %u\n", 1626 target_thread->reply_error.cmd); 1627 } 1628 binder_inner_proc_unlock(target_thread->proc); 1629 binder_thread_dec_tmpref(target_thread); 1630 binder_free_transaction(t); 1631 return; 1632 } 1633 __release(&target_thread->proc->inner_lock); 1634 next = t->from_parent; 1635 1636 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1637 "send failed reply for transaction %d, target dead\n", 1638 t->debug_id); 1639 1640 binder_free_transaction(t); 1641 if (next == NULL) { 1642 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1643 "reply failed, no target thread at root\n"); 1644 return; 1645 } 1646 t = next; 1647 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1648 "reply failed, no target thread -- retry %d\n", 1649 t->debug_id); 1650 } 1651 } 1652 1653 /** 1654 * binder_cleanup_transaction() - cleans up undelivered transaction 1655 * @t: transaction that needs to be cleaned up 1656 * @reason: reason the transaction wasn't delivered 1657 * @error_code: error to return to caller (if synchronous call) 1658 */ 1659 static void binder_cleanup_transaction(struct binder_transaction *t, 1660 const char *reason, 1661 uint32_t error_code) 1662 { 1663 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 1664 binder_send_failed_reply(t, error_code); 1665 } else { 1666 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 1667 "undelivered transaction %d, %s\n", 1668 t->debug_id, reason); 1669 binder_free_transaction(t); 1670 } 1671 } 1672 1673 /** 1674 * binder_get_object() - gets object and checks for valid metadata 1675 * @proc: binder_proc owning the buffer 1676 * @u: sender's user pointer to base of buffer 1677 * @buffer: binder_buffer that we're parsing. 1678 * @offset: offset in the @buffer at which to validate an object. 1679 * @object: struct binder_object to read into 1680 * 1681 * Copy the binder object at the given offset into @object. If @u is 1682 * provided then the copy is from the sender's buffer. If not, then 1683 * it is copied from the target's @buffer. 1684 * 1685 * Return: If there's a valid metadata object at @offset, the 1686 * size of that object. Otherwise, it returns zero. The object 1687 * is read into the struct binder_object pointed to by @object. 1688 */ 1689 static size_t binder_get_object(struct binder_proc *proc, 1690 const void __user *u, 1691 struct binder_buffer *buffer, 1692 unsigned long offset, 1693 struct binder_object *object) 1694 { 1695 size_t read_size; 1696 struct binder_object_header *hdr; 1697 size_t object_size = 0; 1698 1699 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 1700 if (offset > buffer->data_size || read_size < sizeof(*hdr)) 1701 return 0; 1702 if (u) { 1703 if (copy_from_user(object, u + offset, read_size)) 1704 return 0; 1705 } else { 1706 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 1707 offset, read_size)) 1708 return 0; 1709 } 1710 1711 /* Ok, now see if we read a complete object. */ 1712 hdr = &object->hdr; 1713 switch (hdr->type) { 1714 case BINDER_TYPE_BINDER: 1715 case BINDER_TYPE_WEAK_BINDER: 1716 case BINDER_TYPE_HANDLE: 1717 case BINDER_TYPE_WEAK_HANDLE: 1718 object_size = sizeof(struct flat_binder_object); 1719 break; 1720 case BINDER_TYPE_FD: 1721 object_size = sizeof(struct binder_fd_object); 1722 break; 1723 case BINDER_TYPE_PTR: 1724 object_size = sizeof(struct binder_buffer_object); 1725 break; 1726 case BINDER_TYPE_FDA: 1727 object_size = sizeof(struct binder_fd_array_object); 1728 break; 1729 default: 1730 return 0; 1731 } 1732 if (offset <= buffer->data_size - object_size && 1733 buffer->data_size >= object_size) 1734 return object_size; 1735 else 1736 return 0; 1737 } 1738 1739 /** 1740 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1741 * @proc: binder_proc owning the buffer 1742 * @b: binder_buffer containing the object 1743 * @object: struct binder_object to read into 1744 * @index: index in offset array at which the binder_buffer_object is 1745 * located 1746 * @start_offset: points to the start of the offset array 1747 * @object_offsetp: offset of @object read from @b 1748 * @num_valid: the number of valid offsets in the offset array 1749 * 1750 * Return: If @index is within the valid range of the offset array 1751 * described by @start and @num_valid, and if there's a valid 1752 * binder_buffer_object at the offset found in index @index 1753 * of the offset array, that object is returned. Otherwise, 1754 * %NULL is returned. 1755 * Note that the offset found in index @index itself is not 1756 * verified; this function assumes that @num_valid elements 1757 * from @start were previously verified to have valid offsets. 1758 * If @object_offsetp is non-NULL, then the offset within 1759 * @b is written to it. 1760 */ 1761 static struct binder_buffer_object *binder_validate_ptr( 1762 struct binder_proc *proc, 1763 struct binder_buffer *b, 1764 struct binder_object *object, 1765 binder_size_t index, 1766 binder_size_t start_offset, 1767 binder_size_t *object_offsetp, 1768 binder_size_t num_valid) 1769 { 1770 size_t object_size; 1771 binder_size_t object_offset; 1772 unsigned long buffer_offset; 1773 1774 if (index >= num_valid) 1775 return NULL; 1776 1777 buffer_offset = start_offset + sizeof(binder_size_t) * index; 1778 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1779 b, buffer_offset, 1780 sizeof(object_offset))) 1781 return NULL; 1782 object_size = binder_get_object(proc, NULL, b, object_offset, object); 1783 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 1784 return NULL; 1785 if (object_offsetp) 1786 *object_offsetp = object_offset; 1787 1788 return &object->bbo; 1789 } 1790 1791 /** 1792 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1793 * @proc: binder_proc owning the buffer 1794 * @b: transaction buffer 1795 * @objects_start_offset: offset to start of objects buffer 1796 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 1797 * @fixup_offset: start offset in @buffer to fix up 1798 * @last_obj_offset: offset to last binder_buffer_object that we fixed 1799 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 1800 * 1801 * Return: %true if a fixup in buffer @buffer at offset @offset is 1802 * allowed. 1803 * 1804 * For safety reasons, we only allow fixups inside a buffer to happen 1805 * at increasing offsets; additionally, we only allow fixup on the last 1806 * buffer object that was verified, or one of its parents. 1807 * 1808 * Example of what is allowed: 1809 * 1810 * A 1811 * B (parent = A, offset = 0) 1812 * C (parent = A, offset = 16) 1813 * D (parent = C, offset = 0) 1814 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1815 * 1816 * Examples of what is not allowed: 1817 * 1818 * Decreasing offsets within the same parent: 1819 * A 1820 * C (parent = A, offset = 16) 1821 * B (parent = A, offset = 0) // decreasing offset within A 1822 * 1823 * Referring to a parent that wasn't the last object or any of its parents: 1824 * A 1825 * B (parent = A, offset = 0) 1826 * C (parent = A, offset = 0) 1827 * C (parent = A, offset = 16) 1828 * D (parent = B, offset = 0) // B is not A or any of A's parents 1829 */ 1830 static bool binder_validate_fixup(struct binder_proc *proc, 1831 struct binder_buffer *b, 1832 binder_size_t objects_start_offset, 1833 binder_size_t buffer_obj_offset, 1834 binder_size_t fixup_offset, 1835 binder_size_t last_obj_offset, 1836 binder_size_t last_min_offset) 1837 { 1838 if (!last_obj_offset) { 1839 /* Nothing to fix up in */ 1840 return false; 1841 } 1842 1843 while (last_obj_offset != buffer_obj_offset) { 1844 unsigned long buffer_offset; 1845 struct binder_object last_object; 1846 struct binder_buffer_object *last_bbo; 1847 size_t object_size = binder_get_object(proc, NULL, b, 1848 last_obj_offset, 1849 &last_object); 1850 if (object_size != sizeof(*last_bbo)) 1851 return false; 1852 1853 last_bbo = &last_object.bbo; 1854 /* 1855 * Safe to retrieve the parent of last_obj, since it 1856 * was already previously verified by the driver. 1857 */ 1858 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1859 return false; 1860 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 1861 buffer_offset = objects_start_offset + 1862 sizeof(binder_size_t) * last_bbo->parent; 1863 if (binder_alloc_copy_from_buffer(&proc->alloc, 1864 &last_obj_offset, 1865 b, buffer_offset, 1866 sizeof(last_obj_offset))) 1867 return false; 1868 } 1869 return (fixup_offset >= last_min_offset); 1870 } 1871 1872 /** 1873 * struct binder_task_work_cb - for deferred close 1874 * 1875 * @twork: callback_head for task work 1876 * @fd: fd to close 1877 * 1878 * Structure to pass task work to be handled after 1879 * returning from binder_ioctl() via task_work_add(). 1880 */ 1881 struct binder_task_work_cb { 1882 struct callback_head twork; 1883 struct file *file; 1884 }; 1885 1886 /** 1887 * binder_do_fd_close() - close list of file descriptors 1888 * @twork: callback head for task work 1889 * 1890 * It is not safe to call ksys_close() during the binder_ioctl() 1891 * function if there is a chance that binder's own file descriptor 1892 * might be closed. This is to meet the requirements for using 1893 * fdget() (see comments for __fget_light()). Therefore use 1894 * task_work_add() to schedule the close operation once we have 1895 * returned from binder_ioctl(). This function is a callback 1896 * for that mechanism and does the actual ksys_close() on the 1897 * given file descriptor. 1898 */ 1899 static void binder_do_fd_close(struct callback_head *twork) 1900 { 1901 struct binder_task_work_cb *twcb = container_of(twork, 1902 struct binder_task_work_cb, twork); 1903 1904 fput(twcb->file); 1905 kfree(twcb); 1906 } 1907 1908 /** 1909 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 1910 * @fd: file-descriptor to close 1911 * 1912 * See comments in binder_do_fd_close(). This function is used to schedule 1913 * a file-descriptor to be closed after returning from binder_ioctl(). 1914 */ 1915 static void binder_deferred_fd_close(int fd) 1916 { 1917 struct binder_task_work_cb *twcb; 1918 1919 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 1920 if (!twcb) 1921 return; 1922 init_task_work(&twcb->twork, binder_do_fd_close); 1923 twcb->file = close_fd_get_file(fd); 1924 if (twcb->file) { 1925 // pin it until binder_do_fd_close(); see comments there 1926 get_file(twcb->file); 1927 filp_close(twcb->file, current->files); 1928 task_work_add(current, &twcb->twork, TWA_RESUME); 1929 } else { 1930 kfree(twcb); 1931 } 1932 } 1933 1934 static void binder_transaction_buffer_release(struct binder_proc *proc, 1935 struct binder_thread *thread, 1936 struct binder_buffer *buffer, 1937 binder_size_t failed_at, 1938 bool is_failure) 1939 { 1940 int debug_id = buffer->debug_id; 1941 binder_size_t off_start_offset, buffer_offset, off_end_offset; 1942 1943 binder_debug(BINDER_DEBUG_TRANSACTION, 1944 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 1945 proc->pid, buffer->debug_id, 1946 buffer->data_size, buffer->offsets_size, 1947 (unsigned long long)failed_at); 1948 1949 if (buffer->target_node) 1950 binder_dec_node(buffer->target_node, 1, 0); 1951 1952 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 1953 off_end_offset = is_failure && failed_at ? failed_at : 1954 off_start_offset + buffer->offsets_size; 1955 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 1956 buffer_offset += sizeof(binder_size_t)) { 1957 struct binder_object_header *hdr; 1958 size_t object_size = 0; 1959 struct binder_object object; 1960 binder_size_t object_offset; 1961 1962 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1963 buffer, buffer_offset, 1964 sizeof(object_offset))) 1965 object_size = binder_get_object(proc, NULL, buffer, 1966 object_offset, &object); 1967 if (object_size == 0) { 1968 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1969 debug_id, (u64)object_offset, buffer->data_size); 1970 continue; 1971 } 1972 hdr = &object.hdr; 1973 switch (hdr->type) { 1974 case BINDER_TYPE_BINDER: 1975 case BINDER_TYPE_WEAK_BINDER: { 1976 struct flat_binder_object *fp; 1977 struct binder_node *node; 1978 1979 fp = to_flat_binder_object(hdr); 1980 node = binder_get_node(proc, fp->binder); 1981 if (node == NULL) { 1982 pr_err("transaction release %d bad node %016llx\n", 1983 debug_id, (u64)fp->binder); 1984 break; 1985 } 1986 binder_debug(BINDER_DEBUG_TRANSACTION, 1987 " node %d u%016llx\n", 1988 node->debug_id, (u64)node->ptr); 1989 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1990 0); 1991 binder_put_node(node); 1992 } break; 1993 case BINDER_TYPE_HANDLE: 1994 case BINDER_TYPE_WEAK_HANDLE: { 1995 struct flat_binder_object *fp; 1996 struct binder_ref_data rdata; 1997 int ret; 1998 1999 fp = to_flat_binder_object(hdr); 2000 ret = binder_dec_ref_for_handle(proc, fp->handle, 2001 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2002 2003 if (ret) { 2004 pr_err("transaction release %d bad handle %d, ret = %d\n", 2005 debug_id, fp->handle, ret); 2006 break; 2007 } 2008 binder_debug(BINDER_DEBUG_TRANSACTION, 2009 " ref %d desc %d\n", 2010 rdata.debug_id, rdata.desc); 2011 } break; 2012 2013 case BINDER_TYPE_FD: { 2014 /* 2015 * No need to close the file here since user-space 2016 * closes it for successfully delivered 2017 * transactions. For transactions that weren't 2018 * delivered, the new fd was never allocated so 2019 * there is no need to close and the fput on the 2020 * file is done when the transaction is torn 2021 * down. 2022 */ 2023 } break; 2024 case BINDER_TYPE_PTR: 2025 /* 2026 * Nothing to do here, this will get cleaned up when the 2027 * transaction buffer gets freed 2028 */ 2029 break; 2030 case BINDER_TYPE_FDA: { 2031 struct binder_fd_array_object *fda; 2032 struct binder_buffer_object *parent; 2033 struct binder_object ptr_object; 2034 binder_size_t fda_offset; 2035 size_t fd_index; 2036 binder_size_t fd_buf_size; 2037 binder_size_t num_valid; 2038 2039 if (is_failure) { 2040 /* 2041 * The fd fixups have not been applied so no 2042 * fds need to be closed. 2043 */ 2044 continue; 2045 } 2046 2047 num_valid = (buffer_offset - off_start_offset) / 2048 sizeof(binder_size_t); 2049 fda = to_binder_fd_array_object(hdr); 2050 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2051 fda->parent, 2052 off_start_offset, 2053 NULL, 2054 num_valid); 2055 if (!parent) { 2056 pr_err("transaction release %d bad parent offset\n", 2057 debug_id); 2058 continue; 2059 } 2060 fd_buf_size = sizeof(u32) * fda->num_fds; 2061 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2062 pr_err("transaction release %d invalid number of fds (%lld)\n", 2063 debug_id, (u64)fda->num_fds); 2064 continue; 2065 } 2066 if (fd_buf_size > parent->length || 2067 fda->parent_offset > parent->length - fd_buf_size) { 2068 /* No space for all file descriptors here. */ 2069 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2070 debug_id, (u64)fda->num_fds); 2071 continue; 2072 } 2073 /* 2074 * the source data for binder_buffer_object is visible 2075 * to user-space and the @buffer element is the user 2076 * pointer to the buffer_object containing the fd_array. 2077 * Convert the address to an offset relative to 2078 * the base of the transaction buffer. 2079 */ 2080 fda_offset = 2081 (parent->buffer - (uintptr_t)buffer->user_data) + 2082 fda->parent_offset; 2083 for (fd_index = 0; fd_index < fda->num_fds; 2084 fd_index++) { 2085 u32 fd; 2086 int err; 2087 binder_size_t offset = fda_offset + 2088 fd_index * sizeof(fd); 2089 2090 err = binder_alloc_copy_from_buffer( 2091 &proc->alloc, &fd, buffer, 2092 offset, sizeof(fd)); 2093 WARN_ON(err); 2094 if (!err) { 2095 binder_deferred_fd_close(fd); 2096 /* 2097 * Need to make sure the thread goes 2098 * back to userspace to complete the 2099 * deferred close 2100 */ 2101 if (thread) 2102 thread->looper_need_return = true; 2103 } 2104 } 2105 } break; 2106 default: 2107 pr_err("transaction release %d bad object type %x\n", 2108 debug_id, hdr->type); 2109 break; 2110 } 2111 } 2112 } 2113 2114 static int binder_translate_binder(struct flat_binder_object *fp, 2115 struct binder_transaction *t, 2116 struct binder_thread *thread) 2117 { 2118 struct binder_node *node; 2119 struct binder_proc *proc = thread->proc; 2120 struct binder_proc *target_proc = t->to_proc; 2121 struct binder_ref_data rdata; 2122 int ret = 0; 2123 2124 node = binder_get_node(proc, fp->binder); 2125 if (!node) { 2126 node = binder_new_node(proc, fp); 2127 if (!node) 2128 return -ENOMEM; 2129 } 2130 if (fp->cookie != node->cookie) { 2131 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2132 proc->pid, thread->pid, (u64)fp->binder, 2133 node->debug_id, (u64)fp->cookie, 2134 (u64)node->cookie); 2135 ret = -EINVAL; 2136 goto done; 2137 } 2138 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2139 ret = -EPERM; 2140 goto done; 2141 } 2142 2143 ret = binder_inc_ref_for_node(target_proc, node, 2144 fp->hdr.type == BINDER_TYPE_BINDER, 2145 &thread->todo, &rdata); 2146 if (ret) 2147 goto done; 2148 2149 if (fp->hdr.type == BINDER_TYPE_BINDER) 2150 fp->hdr.type = BINDER_TYPE_HANDLE; 2151 else 2152 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2153 fp->binder = 0; 2154 fp->handle = rdata.desc; 2155 fp->cookie = 0; 2156 2157 trace_binder_transaction_node_to_ref(t, node, &rdata); 2158 binder_debug(BINDER_DEBUG_TRANSACTION, 2159 " node %d u%016llx -> ref %d desc %d\n", 2160 node->debug_id, (u64)node->ptr, 2161 rdata.debug_id, rdata.desc); 2162 done: 2163 binder_put_node(node); 2164 return ret; 2165 } 2166 2167 static int binder_translate_handle(struct flat_binder_object *fp, 2168 struct binder_transaction *t, 2169 struct binder_thread *thread) 2170 { 2171 struct binder_proc *proc = thread->proc; 2172 struct binder_proc *target_proc = t->to_proc; 2173 struct binder_node *node; 2174 struct binder_ref_data src_rdata; 2175 int ret = 0; 2176 2177 node = binder_get_node_from_ref(proc, fp->handle, 2178 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2179 if (!node) { 2180 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2181 proc->pid, thread->pid, fp->handle); 2182 return -EINVAL; 2183 } 2184 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2185 ret = -EPERM; 2186 goto done; 2187 } 2188 2189 binder_node_lock(node); 2190 if (node->proc == target_proc) { 2191 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2192 fp->hdr.type = BINDER_TYPE_BINDER; 2193 else 2194 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2195 fp->binder = node->ptr; 2196 fp->cookie = node->cookie; 2197 if (node->proc) 2198 binder_inner_proc_lock(node->proc); 2199 else 2200 __acquire(&node->proc->inner_lock); 2201 binder_inc_node_nilocked(node, 2202 fp->hdr.type == BINDER_TYPE_BINDER, 2203 0, NULL); 2204 if (node->proc) 2205 binder_inner_proc_unlock(node->proc); 2206 else 2207 __release(&node->proc->inner_lock); 2208 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2209 binder_debug(BINDER_DEBUG_TRANSACTION, 2210 " ref %d desc %d -> node %d u%016llx\n", 2211 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2212 (u64)node->ptr); 2213 binder_node_unlock(node); 2214 } else { 2215 struct binder_ref_data dest_rdata; 2216 2217 binder_node_unlock(node); 2218 ret = binder_inc_ref_for_node(target_proc, node, 2219 fp->hdr.type == BINDER_TYPE_HANDLE, 2220 NULL, &dest_rdata); 2221 if (ret) 2222 goto done; 2223 2224 fp->binder = 0; 2225 fp->handle = dest_rdata.desc; 2226 fp->cookie = 0; 2227 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2228 &dest_rdata); 2229 binder_debug(BINDER_DEBUG_TRANSACTION, 2230 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2231 src_rdata.debug_id, src_rdata.desc, 2232 dest_rdata.debug_id, dest_rdata.desc, 2233 node->debug_id); 2234 } 2235 done: 2236 binder_put_node(node); 2237 return ret; 2238 } 2239 2240 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2241 struct binder_transaction *t, 2242 struct binder_thread *thread, 2243 struct binder_transaction *in_reply_to) 2244 { 2245 struct binder_proc *proc = thread->proc; 2246 struct binder_proc *target_proc = t->to_proc; 2247 struct binder_txn_fd_fixup *fixup; 2248 struct file *file; 2249 int ret = 0; 2250 bool target_allows_fd; 2251 2252 if (in_reply_to) 2253 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2254 else 2255 target_allows_fd = t->buffer->target_node->accept_fds; 2256 if (!target_allows_fd) { 2257 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2258 proc->pid, thread->pid, 2259 in_reply_to ? "reply" : "transaction", 2260 fd); 2261 ret = -EPERM; 2262 goto err_fd_not_accepted; 2263 } 2264 2265 file = fget(fd); 2266 if (!file) { 2267 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2268 proc->pid, thread->pid, fd); 2269 ret = -EBADF; 2270 goto err_fget; 2271 } 2272 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); 2273 if (ret < 0) { 2274 ret = -EPERM; 2275 goto err_security; 2276 } 2277 2278 /* 2279 * Add fixup record for this transaction. The allocation 2280 * of the fd in the target needs to be done from a 2281 * target thread. 2282 */ 2283 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2284 if (!fixup) { 2285 ret = -ENOMEM; 2286 goto err_alloc; 2287 } 2288 fixup->file = file; 2289 fixup->offset = fd_offset; 2290 fixup->target_fd = -1; 2291 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2292 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2293 2294 return ret; 2295 2296 err_alloc: 2297 err_security: 2298 fput(file); 2299 err_fget: 2300 err_fd_not_accepted: 2301 return ret; 2302 } 2303 2304 /** 2305 * struct binder_ptr_fixup - data to be fixed-up in target buffer 2306 * @offset offset in target buffer to fixup 2307 * @skip_size bytes to skip in copy (fixup will be written later) 2308 * @fixup_data data to write at fixup offset 2309 * @node list node 2310 * 2311 * This is used for the pointer fixup list (pf) which is created and consumed 2312 * during binder_transaction() and is only accessed locally. No 2313 * locking is necessary. 2314 * 2315 * The list is ordered by @offset. 2316 */ 2317 struct binder_ptr_fixup { 2318 binder_size_t offset; 2319 size_t skip_size; 2320 binder_uintptr_t fixup_data; 2321 struct list_head node; 2322 }; 2323 2324 /** 2325 * struct binder_sg_copy - scatter-gather data to be copied 2326 * @offset offset in target buffer 2327 * @sender_uaddr user address in source buffer 2328 * @length bytes to copy 2329 * @node list node 2330 * 2331 * This is used for the sg copy list (sgc) which is created and consumed 2332 * during binder_transaction() and is only accessed locally. No 2333 * locking is necessary. 2334 * 2335 * The list is ordered by @offset. 2336 */ 2337 struct binder_sg_copy { 2338 binder_size_t offset; 2339 const void __user *sender_uaddr; 2340 size_t length; 2341 struct list_head node; 2342 }; 2343 2344 /** 2345 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data 2346 * @alloc: binder_alloc associated with @buffer 2347 * @buffer: binder buffer in target process 2348 * @sgc_head: list_head of scatter-gather copy list 2349 * @pf_head: list_head of pointer fixup list 2350 * 2351 * Processes all elements of @sgc_head, applying fixups from @pf_head 2352 * and copying the scatter-gather data from the source process' user 2353 * buffer to the target's buffer. It is expected that the list creation 2354 * and processing all occurs during binder_transaction() so these lists 2355 * are only accessed in local context. 2356 * 2357 * Return: 0=success, else -errno 2358 */ 2359 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, 2360 struct binder_buffer *buffer, 2361 struct list_head *sgc_head, 2362 struct list_head *pf_head) 2363 { 2364 int ret = 0; 2365 struct binder_sg_copy *sgc, *tmpsgc; 2366 struct binder_ptr_fixup *tmppf; 2367 struct binder_ptr_fixup *pf = 2368 list_first_entry_or_null(pf_head, struct binder_ptr_fixup, 2369 node); 2370 2371 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2372 size_t bytes_copied = 0; 2373 2374 while (bytes_copied < sgc->length) { 2375 size_t copy_size; 2376 size_t bytes_left = sgc->length - bytes_copied; 2377 size_t offset = sgc->offset + bytes_copied; 2378 2379 /* 2380 * We copy up to the fixup (pointed to by pf) 2381 */ 2382 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) 2383 : bytes_left; 2384 if (!ret && copy_size) 2385 ret = binder_alloc_copy_user_to_buffer( 2386 alloc, buffer, 2387 offset, 2388 sgc->sender_uaddr + bytes_copied, 2389 copy_size); 2390 bytes_copied += copy_size; 2391 if (copy_size != bytes_left) { 2392 BUG_ON(!pf); 2393 /* we stopped at a fixup offset */ 2394 if (pf->skip_size) { 2395 /* 2396 * we are just skipping. This is for 2397 * BINDER_TYPE_FDA where the translated 2398 * fds will be fixed up when we get 2399 * to target context. 2400 */ 2401 bytes_copied += pf->skip_size; 2402 } else { 2403 /* apply the fixup indicated by pf */ 2404 if (!ret) 2405 ret = binder_alloc_copy_to_buffer( 2406 alloc, buffer, 2407 pf->offset, 2408 &pf->fixup_data, 2409 sizeof(pf->fixup_data)); 2410 bytes_copied += sizeof(pf->fixup_data); 2411 } 2412 list_del(&pf->node); 2413 kfree(pf); 2414 pf = list_first_entry_or_null(pf_head, 2415 struct binder_ptr_fixup, node); 2416 } 2417 } 2418 list_del(&sgc->node); 2419 kfree(sgc); 2420 } 2421 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2422 BUG_ON(pf->skip_size == 0); 2423 list_del(&pf->node); 2424 kfree(pf); 2425 } 2426 BUG_ON(!list_empty(sgc_head)); 2427 2428 return ret > 0 ? -EINVAL : ret; 2429 } 2430 2431 /** 2432 * binder_cleanup_deferred_txn_lists() - free specified lists 2433 * @sgc_head: list_head of scatter-gather copy list 2434 * @pf_head: list_head of pointer fixup list 2435 * 2436 * Called to clean up @sgc_head and @pf_head if there is an 2437 * error. 2438 */ 2439 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, 2440 struct list_head *pf_head) 2441 { 2442 struct binder_sg_copy *sgc, *tmpsgc; 2443 struct binder_ptr_fixup *pf, *tmppf; 2444 2445 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2446 list_del(&sgc->node); 2447 kfree(sgc); 2448 } 2449 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2450 list_del(&pf->node); 2451 kfree(pf); 2452 } 2453 } 2454 2455 /** 2456 * binder_defer_copy() - queue a scatter-gather buffer for copy 2457 * @sgc_head: list_head of scatter-gather copy list 2458 * @offset: binder buffer offset in target process 2459 * @sender_uaddr: user address in source process 2460 * @length: bytes to copy 2461 * 2462 * Specify a scatter-gather block to be copied. The actual copy must 2463 * be deferred until all the needed fixups are identified and queued. 2464 * Then the copy and fixups are done together so un-translated values 2465 * from the source are never visible in the target buffer. 2466 * 2467 * We are guaranteed that repeated calls to this function will have 2468 * monotonically increasing @offset values so the list will naturally 2469 * be ordered. 2470 * 2471 * Return: 0=success, else -errno 2472 */ 2473 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, 2474 const void __user *sender_uaddr, size_t length) 2475 { 2476 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); 2477 2478 if (!bc) 2479 return -ENOMEM; 2480 2481 bc->offset = offset; 2482 bc->sender_uaddr = sender_uaddr; 2483 bc->length = length; 2484 INIT_LIST_HEAD(&bc->node); 2485 2486 /* 2487 * We are guaranteed that the deferred copies are in-order 2488 * so just add to the tail. 2489 */ 2490 list_add_tail(&bc->node, sgc_head); 2491 2492 return 0; 2493 } 2494 2495 /** 2496 * binder_add_fixup() - queue a fixup to be applied to sg copy 2497 * @pf_head: list_head of binder ptr fixup list 2498 * @offset: binder buffer offset in target process 2499 * @fixup: bytes to be copied for fixup 2500 * @skip_size: bytes to skip when copying (fixup will be applied later) 2501 * 2502 * Add the specified fixup to a list ordered by @offset. When copying 2503 * the scatter-gather buffers, the fixup will be copied instead of 2504 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup 2505 * will be applied later (in target process context), so we just skip 2506 * the bytes specified by @skip_size. If @skip_size is 0, we copy the 2507 * value in @fixup. 2508 * 2509 * This function is called *mostly* in @offset order, but there are 2510 * exceptions. Since out-of-order inserts are relatively uncommon, 2511 * we insert the new element by searching backward from the tail of 2512 * the list. 2513 * 2514 * Return: 0=success, else -errno 2515 */ 2516 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, 2517 binder_uintptr_t fixup, size_t skip_size) 2518 { 2519 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); 2520 struct binder_ptr_fixup *tmppf; 2521 2522 if (!pf) 2523 return -ENOMEM; 2524 2525 pf->offset = offset; 2526 pf->fixup_data = fixup; 2527 pf->skip_size = skip_size; 2528 INIT_LIST_HEAD(&pf->node); 2529 2530 /* Fixups are *mostly* added in-order, but there are some 2531 * exceptions. Look backwards through list for insertion point. 2532 */ 2533 list_for_each_entry_reverse(tmppf, pf_head, node) { 2534 if (tmppf->offset < pf->offset) { 2535 list_add(&pf->node, &tmppf->node); 2536 return 0; 2537 } 2538 } 2539 /* 2540 * if we get here, then the new offset is the lowest so 2541 * insert at the head 2542 */ 2543 list_add(&pf->node, pf_head); 2544 return 0; 2545 } 2546 2547 static int binder_translate_fd_array(struct list_head *pf_head, 2548 struct binder_fd_array_object *fda, 2549 const void __user *sender_ubuffer, 2550 struct binder_buffer_object *parent, 2551 struct binder_buffer_object *sender_uparent, 2552 struct binder_transaction *t, 2553 struct binder_thread *thread, 2554 struct binder_transaction *in_reply_to) 2555 { 2556 binder_size_t fdi, fd_buf_size; 2557 binder_size_t fda_offset; 2558 const void __user *sender_ufda_base; 2559 struct binder_proc *proc = thread->proc; 2560 int ret; 2561 2562 if (fda->num_fds == 0) 2563 return 0; 2564 2565 fd_buf_size = sizeof(u32) * fda->num_fds; 2566 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2567 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2568 proc->pid, thread->pid, (u64)fda->num_fds); 2569 return -EINVAL; 2570 } 2571 if (fd_buf_size > parent->length || 2572 fda->parent_offset > parent->length - fd_buf_size) { 2573 /* No space for all file descriptors here. */ 2574 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2575 proc->pid, thread->pid, (u64)fda->num_fds); 2576 return -EINVAL; 2577 } 2578 /* 2579 * the source data for binder_buffer_object is visible 2580 * to user-space and the @buffer element is the user 2581 * pointer to the buffer_object containing the fd_array. 2582 * Convert the address to an offset relative to 2583 * the base of the transaction buffer. 2584 */ 2585 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2586 fda->parent_offset; 2587 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + 2588 fda->parent_offset; 2589 2590 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || 2591 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { 2592 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2593 proc->pid, thread->pid); 2594 return -EINVAL; 2595 } 2596 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); 2597 if (ret) 2598 return ret; 2599 2600 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2601 u32 fd; 2602 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2603 binder_size_t sender_uoffset = fdi * sizeof(fd); 2604 2605 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); 2606 if (!ret) 2607 ret = binder_translate_fd(fd, offset, t, thread, 2608 in_reply_to); 2609 if (ret) 2610 return ret > 0 ? -EINVAL : ret; 2611 } 2612 return 0; 2613 } 2614 2615 static int binder_fixup_parent(struct list_head *pf_head, 2616 struct binder_transaction *t, 2617 struct binder_thread *thread, 2618 struct binder_buffer_object *bp, 2619 binder_size_t off_start_offset, 2620 binder_size_t num_valid, 2621 binder_size_t last_fixup_obj_off, 2622 binder_size_t last_fixup_min_off) 2623 { 2624 struct binder_buffer_object *parent; 2625 struct binder_buffer *b = t->buffer; 2626 struct binder_proc *proc = thread->proc; 2627 struct binder_proc *target_proc = t->to_proc; 2628 struct binder_object object; 2629 binder_size_t buffer_offset; 2630 binder_size_t parent_offset; 2631 2632 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2633 return 0; 2634 2635 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2636 off_start_offset, &parent_offset, 2637 num_valid); 2638 if (!parent) { 2639 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2640 proc->pid, thread->pid); 2641 return -EINVAL; 2642 } 2643 2644 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2645 parent_offset, bp->parent_offset, 2646 last_fixup_obj_off, 2647 last_fixup_min_off)) { 2648 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2649 proc->pid, thread->pid); 2650 return -EINVAL; 2651 } 2652 2653 if (parent->length < sizeof(binder_uintptr_t) || 2654 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2655 /* No space for a pointer here! */ 2656 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2657 proc->pid, thread->pid); 2658 return -EINVAL; 2659 } 2660 buffer_offset = bp->parent_offset + 2661 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2662 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); 2663 } 2664 2665 /** 2666 * binder_can_update_transaction() - Can a txn be superseded by an updated one? 2667 * @t1: the pending async txn in the frozen process 2668 * @t2: the new async txn to supersede the outdated pending one 2669 * 2670 * Return: true if t2 can supersede t1 2671 * false if t2 can not supersede t1 2672 */ 2673 static bool binder_can_update_transaction(struct binder_transaction *t1, 2674 struct binder_transaction *t2) 2675 { 2676 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != 2677 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) 2678 return false; 2679 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && 2680 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && 2681 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && 2682 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) 2683 return true; 2684 return false; 2685 } 2686 2687 /** 2688 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction 2689 * @t: new async transaction 2690 * @target_list: list to find outdated transaction 2691 * 2692 * Return: the outdated transaction if found 2693 * NULL if no outdated transacton can be found 2694 * 2695 * Requires the proc->inner_lock to be held. 2696 */ 2697 static struct binder_transaction * 2698 binder_find_outdated_transaction_ilocked(struct binder_transaction *t, 2699 struct list_head *target_list) 2700 { 2701 struct binder_work *w; 2702 2703 list_for_each_entry(w, target_list, entry) { 2704 struct binder_transaction *t_queued; 2705 2706 if (w->type != BINDER_WORK_TRANSACTION) 2707 continue; 2708 t_queued = container_of(w, struct binder_transaction, work); 2709 if (binder_can_update_transaction(t_queued, t)) 2710 return t_queued; 2711 } 2712 return NULL; 2713 } 2714 2715 /** 2716 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2717 * @t: transaction to send 2718 * @proc: process to send the transaction to 2719 * @thread: thread in @proc to send the transaction to (may be NULL) 2720 * 2721 * This function queues a transaction to the specified process. It will try 2722 * to find a thread in the target process to handle the transaction and 2723 * wake it up. If no thread is found, the work is queued to the proc 2724 * waitqueue. 2725 * 2726 * If the @thread parameter is not NULL, the transaction is always queued 2727 * to the waitlist of that specific thread. 2728 * 2729 * Return: 0 if the transaction was successfully queued 2730 * BR_DEAD_REPLY if the target process or thread is dead 2731 * BR_FROZEN_REPLY if the target process or thread is frozen 2732 */ 2733 static int binder_proc_transaction(struct binder_transaction *t, 2734 struct binder_proc *proc, 2735 struct binder_thread *thread) 2736 { 2737 struct binder_node *node = t->buffer->target_node; 2738 bool oneway = !!(t->flags & TF_ONE_WAY); 2739 bool pending_async = false; 2740 struct binder_transaction *t_outdated = NULL; 2741 2742 BUG_ON(!node); 2743 binder_node_lock(node); 2744 if (oneway) { 2745 BUG_ON(thread); 2746 if (node->has_async_transaction) 2747 pending_async = true; 2748 else 2749 node->has_async_transaction = true; 2750 } 2751 2752 binder_inner_proc_lock(proc); 2753 if (proc->is_frozen) { 2754 proc->sync_recv |= !oneway; 2755 proc->async_recv |= oneway; 2756 } 2757 2758 if ((proc->is_frozen && !oneway) || proc->is_dead || 2759 (thread && thread->is_dead)) { 2760 binder_inner_proc_unlock(proc); 2761 binder_node_unlock(node); 2762 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; 2763 } 2764 2765 if (!thread && !pending_async) 2766 thread = binder_select_thread_ilocked(proc); 2767 2768 if (thread) { 2769 binder_enqueue_thread_work_ilocked(thread, &t->work); 2770 } else if (!pending_async) { 2771 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2772 } else { 2773 if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) { 2774 t_outdated = binder_find_outdated_transaction_ilocked(t, 2775 &node->async_todo); 2776 if (t_outdated) { 2777 binder_debug(BINDER_DEBUG_TRANSACTION, 2778 "txn %d supersedes %d\n", 2779 t->debug_id, t_outdated->debug_id); 2780 list_del_init(&t_outdated->work.entry); 2781 proc->outstanding_txns--; 2782 } 2783 } 2784 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2785 } 2786 2787 if (!pending_async) 2788 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2789 2790 proc->outstanding_txns++; 2791 binder_inner_proc_unlock(proc); 2792 binder_node_unlock(node); 2793 2794 /* 2795 * To reduce potential contention, free the outdated transaction and 2796 * buffer after releasing the locks. 2797 */ 2798 if (t_outdated) { 2799 struct binder_buffer *buffer = t_outdated->buffer; 2800 2801 t_outdated->buffer = NULL; 2802 buffer->transaction = NULL; 2803 trace_binder_transaction_update_buffer_release(buffer); 2804 binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); 2805 binder_alloc_free_buf(&proc->alloc, buffer); 2806 kfree(t_outdated); 2807 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2808 } 2809 2810 return 0; 2811 } 2812 2813 /** 2814 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2815 * @node: struct binder_node for which to get refs 2816 * @proc: returns @node->proc if valid 2817 * @error: if no @proc then returns BR_DEAD_REPLY 2818 * 2819 * User-space normally keeps the node alive when creating a transaction 2820 * since it has a reference to the target. The local strong ref keeps it 2821 * alive if the sending process dies before the target process processes 2822 * the transaction. If the source process is malicious or has a reference 2823 * counting bug, relying on the local strong ref can fail. 2824 * 2825 * Since user-space can cause the local strong ref to go away, we also take 2826 * a tmpref on the node to ensure it survives while we are constructing 2827 * the transaction. We also need a tmpref on the proc while we are 2828 * constructing the transaction, so we take that here as well. 2829 * 2830 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2831 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2832 * target proc has died, @error is set to BR_DEAD_REPLY 2833 */ 2834 static struct binder_node *binder_get_node_refs_for_txn( 2835 struct binder_node *node, 2836 struct binder_proc **procp, 2837 uint32_t *error) 2838 { 2839 struct binder_node *target_node = NULL; 2840 2841 binder_node_inner_lock(node); 2842 if (node->proc) { 2843 target_node = node; 2844 binder_inc_node_nilocked(node, 1, 0, NULL); 2845 binder_inc_node_tmpref_ilocked(node); 2846 node->proc->tmp_ref++; 2847 *procp = node->proc; 2848 } else 2849 *error = BR_DEAD_REPLY; 2850 binder_node_inner_unlock(node); 2851 2852 return target_node; 2853 } 2854 2855 static void binder_set_txn_from_error(struct binder_transaction *t, int id, 2856 uint32_t command, int32_t param) 2857 { 2858 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); 2859 2860 if (!from) { 2861 /* annotation for sparse */ 2862 __release(&from->proc->inner_lock); 2863 return; 2864 } 2865 2866 /* don't override existing errors */ 2867 if (from->ee.command == BR_OK) 2868 binder_set_extended_error(&from->ee, id, command, param); 2869 binder_inner_proc_unlock(from->proc); 2870 binder_thread_dec_tmpref(from); 2871 } 2872 2873 static void binder_transaction(struct binder_proc *proc, 2874 struct binder_thread *thread, 2875 struct binder_transaction_data *tr, int reply, 2876 binder_size_t extra_buffers_size) 2877 { 2878 int ret; 2879 struct binder_transaction *t; 2880 struct binder_work *w; 2881 struct binder_work *tcomplete; 2882 binder_size_t buffer_offset = 0; 2883 binder_size_t off_start_offset, off_end_offset; 2884 binder_size_t off_min; 2885 binder_size_t sg_buf_offset, sg_buf_end_offset; 2886 binder_size_t user_offset = 0; 2887 struct binder_proc *target_proc = NULL; 2888 struct binder_thread *target_thread = NULL; 2889 struct binder_node *target_node = NULL; 2890 struct binder_transaction *in_reply_to = NULL; 2891 struct binder_transaction_log_entry *e; 2892 uint32_t return_error = 0; 2893 uint32_t return_error_param = 0; 2894 uint32_t return_error_line = 0; 2895 binder_size_t last_fixup_obj_off = 0; 2896 binder_size_t last_fixup_min_off = 0; 2897 struct binder_context *context = proc->context; 2898 int t_debug_id = atomic_inc_return(&binder_last_id); 2899 char *secctx = NULL; 2900 u32 secctx_sz = 0; 2901 struct list_head sgc_head; 2902 struct list_head pf_head; 2903 const void __user *user_buffer = (const void __user *) 2904 (uintptr_t)tr->data.ptr.buffer; 2905 INIT_LIST_HEAD(&sgc_head); 2906 INIT_LIST_HEAD(&pf_head); 2907 2908 e = binder_transaction_log_add(&binder_transaction_log); 2909 e->debug_id = t_debug_id; 2910 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2911 e->from_proc = proc->pid; 2912 e->from_thread = thread->pid; 2913 e->target_handle = tr->target.handle; 2914 e->data_size = tr->data_size; 2915 e->offsets_size = tr->offsets_size; 2916 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); 2917 2918 binder_inner_proc_lock(proc); 2919 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); 2920 binder_inner_proc_unlock(proc); 2921 2922 if (reply) { 2923 binder_inner_proc_lock(proc); 2924 in_reply_to = thread->transaction_stack; 2925 if (in_reply_to == NULL) { 2926 binder_inner_proc_unlock(proc); 2927 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2928 proc->pid, thread->pid); 2929 return_error = BR_FAILED_REPLY; 2930 return_error_param = -EPROTO; 2931 return_error_line = __LINE__; 2932 goto err_empty_call_stack; 2933 } 2934 if (in_reply_to->to_thread != thread) { 2935 spin_lock(&in_reply_to->lock); 2936 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2937 proc->pid, thread->pid, in_reply_to->debug_id, 2938 in_reply_to->to_proc ? 2939 in_reply_to->to_proc->pid : 0, 2940 in_reply_to->to_thread ? 2941 in_reply_to->to_thread->pid : 0); 2942 spin_unlock(&in_reply_to->lock); 2943 binder_inner_proc_unlock(proc); 2944 return_error = BR_FAILED_REPLY; 2945 return_error_param = -EPROTO; 2946 return_error_line = __LINE__; 2947 in_reply_to = NULL; 2948 goto err_bad_call_stack; 2949 } 2950 thread->transaction_stack = in_reply_to->to_parent; 2951 binder_inner_proc_unlock(proc); 2952 binder_set_nice(in_reply_to->saved_priority); 2953 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2954 if (target_thread == NULL) { 2955 /* annotation for sparse */ 2956 __release(&target_thread->proc->inner_lock); 2957 binder_txn_error("%d:%d reply target not found\n", 2958 thread->pid, proc->pid); 2959 return_error = BR_DEAD_REPLY; 2960 return_error_line = __LINE__; 2961 goto err_dead_binder; 2962 } 2963 if (target_thread->transaction_stack != in_reply_to) { 2964 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2965 proc->pid, thread->pid, 2966 target_thread->transaction_stack ? 2967 target_thread->transaction_stack->debug_id : 0, 2968 in_reply_to->debug_id); 2969 binder_inner_proc_unlock(target_thread->proc); 2970 return_error = BR_FAILED_REPLY; 2971 return_error_param = -EPROTO; 2972 return_error_line = __LINE__; 2973 in_reply_to = NULL; 2974 target_thread = NULL; 2975 goto err_dead_binder; 2976 } 2977 target_proc = target_thread->proc; 2978 target_proc->tmp_ref++; 2979 binder_inner_proc_unlock(target_thread->proc); 2980 } else { 2981 if (tr->target.handle) { 2982 struct binder_ref *ref; 2983 2984 /* 2985 * There must already be a strong ref 2986 * on this node. If so, do a strong 2987 * increment on the node to ensure it 2988 * stays alive until the transaction is 2989 * done. 2990 */ 2991 binder_proc_lock(proc); 2992 ref = binder_get_ref_olocked(proc, tr->target.handle, 2993 true); 2994 if (ref) { 2995 target_node = binder_get_node_refs_for_txn( 2996 ref->node, &target_proc, 2997 &return_error); 2998 } else { 2999 binder_user_error("%d:%d got transaction to invalid handle, %u\n", 3000 proc->pid, thread->pid, tr->target.handle); 3001 return_error = BR_FAILED_REPLY; 3002 } 3003 binder_proc_unlock(proc); 3004 } else { 3005 mutex_lock(&context->context_mgr_node_lock); 3006 target_node = context->binder_context_mgr_node; 3007 if (target_node) 3008 target_node = binder_get_node_refs_for_txn( 3009 target_node, &target_proc, 3010 &return_error); 3011 else 3012 return_error = BR_DEAD_REPLY; 3013 mutex_unlock(&context->context_mgr_node_lock); 3014 if (target_node && target_proc->pid == proc->pid) { 3015 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 3016 proc->pid, thread->pid); 3017 return_error = BR_FAILED_REPLY; 3018 return_error_param = -EINVAL; 3019 return_error_line = __LINE__; 3020 goto err_invalid_target_handle; 3021 } 3022 } 3023 if (!target_node) { 3024 binder_txn_error("%d:%d cannot find target node\n", 3025 thread->pid, proc->pid); 3026 /* 3027 * return_error is set above 3028 */ 3029 return_error_param = -EINVAL; 3030 return_error_line = __LINE__; 3031 goto err_dead_binder; 3032 } 3033 e->to_node = target_node->debug_id; 3034 if (WARN_ON(proc == target_proc)) { 3035 binder_txn_error("%d:%d self transactions not allowed\n", 3036 thread->pid, proc->pid); 3037 return_error = BR_FAILED_REPLY; 3038 return_error_param = -EINVAL; 3039 return_error_line = __LINE__; 3040 goto err_invalid_target_handle; 3041 } 3042 if (security_binder_transaction(proc->cred, 3043 target_proc->cred) < 0) { 3044 binder_txn_error("%d:%d transaction credentials failed\n", 3045 thread->pid, proc->pid); 3046 return_error = BR_FAILED_REPLY; 3047 return_error_param = -EPERM; 3048 return_error_line = __LINE__; 3049 goto err_invalid_target_handle; 3050 } 3051 binder_inner_proc_lock(proc); 3052 3053 w = list_first_entry_or_null(&thread->todo, 3054 struct binder_work, entry); 3055 if (!(tr->flags & TF_ONE_WAY) && w && 3056 w->type == BINDER_WORK_TRANSACTION) { 3057 /* 3058 * Do not allow new outgoing transaction from a 3059 * thread that has a transaction at the head of 3060 * its todo list. Only need to check the head 3061 * because binder_select_thread_ilocked picks a 3062 * thread from proc->waiting_threads to enqueue 3063 * the transaction, and nothing is queued to the 3064 * todo list while the thread is on waiting_threads. 3065 */ 3066 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 3067 proc->pid, thread->pid); 3068 binder_inner_proc_unlock(proc); 3069 return_error = BR_FAILED_REPLY; 3070 return_error_param = -EPROTO; 3071 return_error_line = __LINE__; 3072 goto err_bad_todo_list; 3073 } 3074 3075 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 3076 struct binder_transaction *tmp; 3077 3078 tmp = thread->transaction_stack; 3079 if (tmp->to_thread != thread) { 3080 spin_lock(&tmp->lock); 3081 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3082 proc->pid, thread->pid, tmp->debug_id, 3083 tmp->to_proc ? tmp->to_proc->pid : 0, 3084 tmp->to_thread ? 3085 tmp->to_thread->pid : 0); 3086 spin_unlock(&tmp->lock); 3087 binder_inner_proc_unlock(proc); 3088 return_error = BR_FAILED_REPLY; 3089 return_error_param = -EPROTO; 3090 return_error_line = __LINE__; 3091 goto err_bad_call_stack; 3092 } 3093 while (tmp) { 3094 struct binder_thread *from; 3095 3096 spin_lock(&tmp->lock); 3097 from = tmp->from; 3098 if (from && from->proc == target_proc) { 3099 atomic_inc(&from->tmp_ref); 3100 target_thread = from; 3101 spin_unlock(&tmp->lock); 3102 break; 3103 } 3104 spin_unlock(&tmp->lock); 3105 tmp = tmp->from_parent; 3106 } 3107 } 3108 binder_inner_proc_unlock(proc); 3109 } 3110 if (target_thread) 3111 e->to_thread = target_thread->pid; 3112 e->to_proc = target_proc->pid; 3113 3114 /* TODO: reuse incoming transaction for reply */ 3115 t = kzalloc(sizeof(*t), GFP_KERNEL); 3116 if (t == NULL) { 3117 binder_txn_error("%d:%d cannot allocate transaction\n", 3118 thread->pid, proc->pid); 3119 return_error = BR_FAILED_REPLY; 3120 return_error_param = -ENOMEM; 3121 return_error_line = __LINE__; 3122 goto err_alloc_t_failed; 3123 } 3124 INIT_LIST_HEAD(&t->fd_fixups); 3125 binder_stats_created(BINDER_STAT_TRANSACTION); 3126 spin_lock_init(&t->lock); 3127 3128 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3129 if (tcomplete == NULL) { 3130 binder_txn_error("%d:%d cannot allocate work for transaction\n", 3131 thread->pid, proc->pid); 3132 return_error = BR_FAILED_REPLY; 3133 return_error_param = -ENOMEM; 3134 return_error_line = __LINE__; 3135 goto err_alloc_tcomplete_failed; 3136 } 3137 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3138 3139 t->debug_id = t_debug_id; 3140 3141 if (reply) 3142 binder_debug(BINDER_DEBUG_TRANSACTION, 3143 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3144 proc->pid, thread->pid, t->debug_id, 3145 target_proc->pid, target_thread->pid, 3146 (u64)tr->data.ptr.buffer, 3147 (u64)tr->data.ptr.offsets, 3148 (u64)tr->data_size, (u64)tr->offsets_size, 3149 (u64)extra_buffers_size); 3150 else 3151 binder_debug(BINDER_DEBUG_TRANSACTION, 3152 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3153 proc->pid, thread->pid, t->debug_id, 3154 target_proc->pid, target_node->debug_id, 3155 (u64)tr->data.ptr.buffer, 3156 (u64)tr->data.ptr.offsets, 3157 (u64)tr->data_size, (u64)tr->offsets_size, 3158 (u64)extra_buffers_size); 3159 3160 if (!reply && !(tr->flags & TF_ONE_WAY)) 3161 t->from = thread; 3162 else 3163 t->from = NULL; 3164 t->sender_euid = task_euid(proc->tsk); 3165 t->to_proc = target_proc; 3166 t->to_thread = target_thread; 3167 t->code = tr->code; 3168 t->flags = tr->flags; 3169 t->priority = task_nice(current); 3170 3171 if (target_node && target_node->txn_security_ctx) { 3172 u32 secid; 3173 size_t added_size; 3174 3175 security_cred_getsecid(proc->cred, &secid); 3176 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3177 if (ret) { 3178 binder_txn_error("%d:%d failed to get security context\n", 3179 thread->pid, proc->pid); 3180 return_error = BR_FAILED_REPLY; 3181 return_error_param = ret; 3182 return_error_line = __LINE__; 3183 goto err_get_secctx_failed; 3184 } 3185 added_size = ALIGN(secctx_sz, sizeof(u64)); 3186 extra_buffers_size += added_size; 3187 if (extra_buffers_size < added_size) { 3188 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", 3189 thread->pid, proc->pid); 3190 return_error = BR_FAILED_REPLY; 3191 return_error_param = -EINVAL; 3192 return_error_line = __LINE__; 3193 goto err_bad_extra_size; 3194 } 3195 } 3196 3197 trace_binder_transaction(reply, t, target_node); 3198 3199 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3200 tr->offsets_size, extra_buffers_size, 3201 !reply && (t->flags & TF_ONE_WAY), current->tgid); 3202 if (IS_ERR(t->buffer)) { 3203 char *s; 3204 3205 ret = PTR_ERR(t->buffer); 3206 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" 3207 : (ret == -ENOSPC) ? ": no space left" 3208 : (ret == -ENOMEM) ? ": memory allocation failed" 3209 : ""; 3210 binder_txn_error("cannot allocate buffer%s", s); 3211 3212 return_error_param = PTR_ERR(t->buffer); 3213 return_error = return_error_param == -ESRCH ? 3214 BR_DEAD_REPLY : BR_FAILED_REPLY; 3215 return_error_line = __LINE__; 3216 t->buffer = NULL; 3217 goto err_binder_alloc_buf_failed; 3218 } 3219 if (secctx) { 3220 int err; 3221 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3222 ALIGN(tr->offsets_size, sizeof(void *)) + 3223 ALIGN(extra_buffers_size, sizeof(void *)) - 3224 ALIGN(secctx_sz, sizeof(u64)); 3225 3226 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3227 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 3228 t->buffer, buf_offset, 3229 secctx, secctx_sz); 3230 if (err) { 3231 t->security_ctx = 0; 3232 WARN_ON(1); 3233 } 3234 security_release_secctx(secctx, secctx_sz); 3235 secctx = NULL; 3236 } 3237 t->buffer->debug_id = t->debug_id; 3238 t->buffer->transaction = t; 3239 t->buffer->target_node = target_node; 3240 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); 3241 trace_binder_transaction_alloc_buf(t->buffer); 3242 3243 if (binder_alloc_copy_user_to_buffer( 3244 &target_proc->alloc, 3245 t->buffer, 3246 ALIGN(tr->data_size, sizeof(void *)), 3247 (const void __user *) 3248 (uintptr_t)tr->data.ptr.offsets, 3249 tr->offsets_size)) { 3250 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3251 proc->pid, thread->pid); 3252 return_error = BR_FAILED_REPLY; 3253 return_error_param = -EFAULT; 3254 return_error_line = __LINE__; 3255 goto err_copy_data_failed; 3256 } 3257 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3258 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3259 proc->pid, thread->pid, (u64)tr->offsets_size); 3260 return_error = BR_FAILED_REPLY; 3261 return_error_param = -EINVAL; 3262 return_error_line = __LINE__; 3263 goto err_bad_offset; 3264 } 3265 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3266 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3267 proc->pid, thread->pid, 3268 (u64)extra_buffers_size); 3269 return_error = BR_FAILED_REPLY; 3270 return_error_param = -EINVAL; 3271 return_error_line = __LINE__; 3272 goto err_bad_offset; 3273 } 3274 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3275 buffer_offset = off_start_offset; 3276 off_end_offset = off_start_offset + tr->offsets_size; 3277 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3278 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 3279 ALIGN(secctx_sz, sizeof(u64)); 3280 off_min = 0; 3281 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3282 buffer_offset += sizeof(binder_size_t)) { 3283 struct binder_object_header *hdr; 3284 size_t object_size; 3285 struct binder_object object; 3286 binder_size_t object_offset; 3287 binder_size_t copy_size; 3288 3289 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 3290 &object_offset, 3291 t->buffer, 3292 buffer_offset, 3293 sizeof(object_offset))) { 3294 binder_txn_error("%d:%d copy offset from buffer failed\n", 3295 thread->pid, proc->pid); 3296 return_error = BR_FAILED_REPLY; 3297 return_error_param = -EINVAL; 3298 return_error_line = __LINE__; 3299 goto err_bad_offset; 3300 } 3301 3302 /* 3303 * Copy the source user buffer up to the next object 3304 * that will be processed. 3305 */ 3306 copy_size = object_offset - user_offset; 3307 if (copy_size && (user_offset > object_offset || 3308 binder_alloc_copy_user_to_buffer( 3309 &target_proc->alloc, 3310 t->buffer, user_offset, 3311 user_buffer + user_offset, 3312 copy_size))) { 3313 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3314 proc->pid, thread->pid); 3315 return_error = BR_FAILED_REPLY; 3316 return_error_param = -EFAULT; 3317 return_error_line = __LINE__; 3318 goto err_copy_data_failed; 3319 } 3320 object_size = binder_get_object(target_proc, user_buffer, 3321 t->buffer, object_offset, &object); 3322 if (object_size == 0 || object_offset < off_min) { 3323 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3324 proc->pid, thread->pid, 3325 (u64)object_offset, 3326 (u64)off_min, 3327 (u64)t->buffer->data_size); 3328 return_error = BR_FAILED_REPLY; 3329 return_error_param = -EINVAL; 3330 return_error_line = __LINE__; 3331 goto err_bad_offset; 3332 } 3333 /* 3334 * Set offset to the next buffer fragment to be 3335 * copied 3336 */ 3337 user_offset = object_offset + object_size; 3338 3339 hdr = &object.hdr; 3340 off_min = object_offset + object_size; 3341 switch (hdr->type) { 3342 case BINDER_TYPE_BINDER: 3343 case BINDER_TYPE_WEAK_BINDER: { 3344 struct flat_binder_object *fp; 3345 3346 fp = to_flat_binder_object(hdr); 3347 ret = binder_translate_binder(fp, t, thread); 3348 3349 if (ret < 0 || 3350 binder_alloc_copy_to_buffer(&target_proc->alloc, 3351 t->buffer, 3352 object_offset, 3353 fp, sizeof(*fp))) { 3354 binder_txn_error("%d:%d translate binder failed\n", 3355 thread->pid, proc->pid); 3356 return_error = BR_FAILED_REPLY; 3357 return_error_param = ret; 3358 return_error_line = __LINE__; 3359 goto err_translate_failed; 3360 } 3361 } break; 3362 case BINDER_TYPE_HANDLE: 3363 case BINDER_TYPE_WEAK_HANDLE: { 3364 struct flat_binder_object *fp; 3365 3366 fp = to_flat_binder_object(hdr); 3367 ret = binder_translate_handle(fp, t, thread); 3368 if (ret < 0 || 3369 binder_alloc_copy_to_buffer(&target_proc->alloc, 3370 t->buffer, 3371 object_offset, 3372 fp, sizeof(*fp))) { 3373 binder_txn_error("%d:%d translate handle failed\n", 3374 thread->pid, proc->pid); 3375 return_error = BR_FAILED_REPLY; 3376 return_error_param = ret; 3377 return_error_line = __LINE__; 3378 goto err_translate_failed; 3379 } 3380 } break; 3381 3382 case BINDER_TYPE_FD: { 3383 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3384 binder_size_t fd_offset = object_offset + 3385 (uintptr_t)&fp->fd - (uintptr_t)fp; 3386 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3387 thread, in_reply_to); 3388 3389 fp->pad_binder = 0; 3390 if (ret < 0 || 3391 binder_alloc_copy_to_buffer(&target_proc->alloc, 3392 t->buffer, 3393 object_offset, 3394 fp, sizeof(*fp))) { 3395 binder_txn_error("%d:%d translate fd failed\n", 3396 thread->pid, proc->pid); 3397 return_error = BR_FAILED_REPLY; 3398 return_error_param = ret; 3399 return_error_line = __LINE__; 3400 goto err_translate_failed; 3401 } 3402 } break; 3403 case BINDER_TYPE_FDA: { 3404 struct binder_object ptr_object; 3405 binder_size_t parent_offset; 3406 struct binder_object user_object; 3407 size_t user_parent_size; 3408 struct binder_fd_array_object *fda = 3409 to_binder_fd_array_object(hdr); 3410 size_t num_valid = (buffer_offset - off_start_offset) / 3411 sizeof(binder_size_t); 3412 struct binder_buffer_object *parent = 3413 binder_validate_ptr(target_proc, t->buffer, 3414 &ptr_object, fda->parent, 3415 off_start_offset, 3416 &parent_offset, 3417 num_valid); 3418 if (!parent) { 3419 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3420 proc->pid, thread->pid); 3421 return_error = BR_FAILED_REPLY; 3422 return_error_param = -EINVAL; 3423 return_error_line = __LINE__; 3424 goto err_bad_parent; 3425 } 3426 if (!binder_validate_fixup(target_proc, t->buffer, 3427 off_start_offset, 3428 parent_offset, 3429 fda->parent_offset, 3430 last_fixup_obj_off, 3431 last_fixup_min_off)) { 3432 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3433 proc->pid, thread->pid); 3434 return_error = BR_FAILED_REPLY; 3435 return_error_param = -EINVAL; 3436 return_error_line = __LINE__; 3437 goto err_bad_parent; 3438 } 3439 /* 3440 * We need to read the user version of the parent 3441 * object to get the original user offset 3442 */ 3443 user_parent_size = 3444 binder_get_object(proc, user_buffer, t->buffer, 3445 parent_offset, &user_object); 3446 if (user_parent_size != sizeof(user_object.bbo)) { 3447 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", 3448 proc->pid, thread->pid, 3449 user_parent_size, 3450 sizeof(user_object.bbo)); 3451 return_error = BR_FAILED_REPLY; 3452 return_error_param = -EINVAL; 3453 return_error_line = __LINE__; 3454 goto err_bad_parent; 3455 } 3456 ret = binder_translate_fd_array(&pf_head, fda, 3457 user_buffer, parent, 3458 &user_object.bbo, t, 3459 thread, in_reply_to); 3460 if (!ret) 3461 ret = binder_alloc_copy_to_buffer(&target_proc->alloc, 3462 t->buffer, 3463 object_offset, 3464 fda, sizeof(*fda)); 3465 if (ret) { 3466 binder_txn_error("%d:%d translate fd array failed\n", 3467 thread->pid, proc->pid); 3468 return_error = BR_FAILED_REPLY; 3469 return_error_param = ret > 0 ? -EINVAL : ret; 3470 return_error_line = __LINE__; 3471 goto err_translate_failed; 3472 } 3473 last_fixup_obj_off = parent_offset; 3474 last_fixup_min_off = 3475 fda->parent_offset + sizeof(u32) * fda->num_fds; 3476 } break; 3477 case BINDER_TYPE_PTR: { 3478 struct binder_buffer_object *bp = 3479 to_binder_buffer_object(hdr); 3480 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3481 size_t num_valid; 3482 3483 if (bp->length > buf_left) { 3484 binder_user_error("%d:%d got transaction with too large buffer\n", 3485 proc->pid, thread->pid); 3486 return_error = BR_FAILED_REPLY; 3487 return_error_param = -EINVAL; 3488 return_error_line = __LINE__; 3489 goto err_bad_offset; 3490 } 3491 ret = binder_defer_copy(&sgc_head, sg_buf_offset, 3492 (const void __user *)(uintptr_t)bp->buffer, 3493 bp->length); 3494 if (ret) { 3495 binder_txn_error("%d:%d deferred copy failed\n", 3496 thread->pid, proc->pid); 3497 return_error = BR_FAILED_REPLY; 3498 return_error_param = ret; 3499 return_error_line = __LINE__; 3500 goto err_translate_failed; 3501 } 3502 /* Fixup buffer pointer to target proc address space */ 3503 bp->buffer = (uintptr_t) 3504 t->buffer->user_data + sg_buf_offset; 3505 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3506 3507 num_valid = (buffer_offset - off_start_offset) / 3508 sizeof(binder_size_t); 3509 ret = binder_fixup_parent(&pf_head, t, 3510 thread, bp, 3511 off_start_offset, 3512 num_valid, 3513 last_fixup_obj_off, 3514 last_fixup_min_off); 3515 if (ret < 0 || 3516 binder_alloc_copy_to_buffer(&target_proc->alloc, 3517 t->buffer, 3518 object_offset, 3519 bp, sizeof(*bp))) { 3520 binder_txn_error("%d:%d failed to fixup parent\n", 3521 thread->pid, proc->pid); 3522 return_error = BR_FAILED_REPLY; 3523 return_error_param = ret; 3524 return_error_line = __LINE__; 3525 goto err_translate_failed; 3526 } 3527 last_fixup_obj_off = object_offset; 3528 last_fixup_min_off = 0; 3529 } break; 3530 default: 3531 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3532 proc->pid, thread->pid, hdr->type); 3533 return_error = BR_FAILED_REPLY; 3534 return_error_param = -EINVAL; 3535 return_error_line = __LINE__; 3536 goto err_bad_object_type; 3537 } 3538 } 3539 /* Done processing objects, copy the rest of the buffer */ 3540 if (binder_alloc_copy_user_to_buffer( 3541 &target_proc->alloc, 3542 t->buffer, user_offset, 3543 user_buffer + user_offset, 3544 tr->data_size - user_offset)) { 3545 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3546 proc->pid, thread->pid); 3547 return_error = BR_FAILED_REPLY; 3548 return_error_param = -EFAULT; 3549 return_error_line = __LINE__; 3550 goto err_copy_data_failed; 3551 } 3552 3553 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, 3554 &sgc_head, &pf_head); 3555 if (ret) { 3556 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3557 proc->pid, thread->pid); 3558 return_error = BR_FAILED_REPLY; 3559 return_error_param = ret; 3560 return_error_line = __LINE__; 3561 goto err_copy_data_failed; 3562 } 3563 if (t->buffer->oneway_spam_suspect) 3564 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; 3565 else 3566 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3567 t->work.type = BINDER_WORK_TRANSACTION; 3568 3569 if (reply) { 3570 binder_enqueue_thread_work(thread, tcomplete); 3571 binder_inner_proc_lock(target_proc); 3572 if (target_thread->is_dead) { 3573 return_error = BR_DEAD_REPLY; 3574 binder_inner_proc_unlock(target_proc); 3575 goto err_dead_proc_or_thread; 3576 } 3577 BUG_ON(t->buffer->async_transaction != 0); 3578 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3579 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3580 target_proc->outstanding_txns++; 3581 binder_inner_proc_unlock(target_proc); 3582 wake_up_interruptible_sync(&target_thread->wait); 3583 binder_free_transaction(in_reply_to); 3584 } else if (!(t->flags & TF_ONE_WAY)) { 3585 BUG_ON(t->buffer->async_transaction != 0); 3586 binder_inner_proc_lock(proc); 3587 /* 3588 * Defer the TRANSACTION_COMPLETE, so we don't return to 3589 * userspace immediately; this allows the target process to 3590 * immediately start processing this transaction, reducing 3591 * latency. We will then return the TRANSACTION_COMPLETE when 3592 * the target replies (or there is an error). 3593 */ 3594 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3595 t->need_reply = 1; 3596 t->from_parent = thread->transaction_stack; 3597 thread->transaction_stack = t; 3598 binder_inner_proc_unlock(proc); 3599 return_error = binder_proc_transaction(t, 3600 target_proc, target_thread); 3601 if (return_error) { 3602 binder_inner_proc_lock(proc); 3603 binder_pop_transaction_ilocked(thread, t); 3604 binder_inner_proc_unlock(proc); 3605 goto err_dead_proc_or_thread; 3606 } 3607 } else { 3608 BUG_ON(target_node == NULL); 3609 BUG_ON(t->buffer->async_transaction != 1); 3610 binder_enqueue_thread_work(thread, tcomplete); 3611 return_error = binder_proc_transaction(t, target_proc, NULL); 3612 if (return_error) 3613 goto err_dead_proc_or_thread; 3614 } 3615 if (target_thread) 3616 binder_thread_dec_tmpref(target_thread); 3617 binder_proc_dec_tmpref(target_proc); 3618 if (target_node) 3619 binder_dec_node_tmpref(target_node); 3620 /* 3621 * write barrier to synchronize with initialization 3622 * of log entry 3623 */ 3624 smp_wmb(); 3625 WRITE_ONCE(e->debug_id_done, t_debug_id); 3626 return; 3627 3628 err_dead_proc_or_thread: 3629 binder_txn_error("%d:%d dead process or thread\n", 3630 thread->pid, proc->pid); 3631 return_error_line = __LINE__; 3632 binder_dequeue_work(proc, tcomplete); 3633 err_translate_failed: 3634 err_bad_object_type: 3635 err_bad_offset: 3636 err_bad_parent: 3637 err_copy_data_failed: 3638 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); 3639 binder_free_txn_fixups(t); 3640 trace_binder_transaction_failed_buffer_release(t->buffer); 3641 binder_transaction_buffer_release(target_proc, NULL, t->buffer, 3642 buffer_offset, true); 3643 if (target_node) 3644 binder_dec_node_tmpref(target_node); 3645 target_node = NULL; 3646 t->buffer->transaction = NULL; 3647 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3648 err_binder_alloc_buf_failed: 3649 err_bad_extra_size: 3650 if (secctx) 3651 security_release_secctx(secctx, secctx_sz); 3652 err_get_secctx_failed: 3653 kfree(tcomplete); 3654 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3655 err_alloc_tcomplete_failed: 3656 if (trace_binder_txn_latency_free_enabled()) 3657 binder_txn_latency_free(t); 3658 kfree(t); 3659 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3660 err_alloc_t_failed: 3661 err_bad_todo_list: 3662 err_bad_call_stack: 3663 err_empty_call_stack: 3664 err_dead_binder: 3665 err_invalid_target_handle: 3666 if (target_node) { 3667 binder_dec_node(target_node, 1, 0); 3668 binder_dec_node_tmpref(target_node); 3669 } 3670 3671 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3672 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", 3673 proc->pid, thread->pid, reply ? "reply" : 3674 (tr->flags & TF_ONE_WAY ? "async" : "call"), 3675 target_proc ? target_proc->pid : 0, 3676 target_thread ? target_thread->pid : 0, 3677 t_debug_id, return_error, return_error_param, 3678 (u64)tr->data_size, (u64)tr->offsets_size, 3679 return_error_line); 3680 3681 if (target_thread) 3682 binder_thread_dec_tmpref(target_thread); 3683 if (target_proc) 3684 binder_proc_dec_tmpref(target_proc); 3685 3686 { 3687 struct binder_transaction_log_entry *fe; 3688 3689 e->return_error = return_error; 3690 e->return_error_param = return_error_param; 3691 e->return_error_line = return_error_line; 3692 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3693 *fe = *e; 3694 /* 3695 * write barrier to synchronize with initialization 3696 * of log entry 3697 */ 3698 smp_wmb(); 3699 WRITE_ONCE(e->debug_id_done, t_debug_id); 3700 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3701 } 3702 3703 BUG_ON(thread->return_error.cmd != BR_OK); 3704 if (in_reply_to) { 3705 binder_set_txn_from_error(in_reply_to, t_debug_id, 3706 return_error, return_error_param); 3707 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3708 binder_enqueue_thread_work(thread, &thread->return_error.work); 3709 binder_send_failed_reply(in_reply_to, return_error); 3710 } else { 3711 binder_inner_proc_lock(proc); 3712 binder_set_extended_error(&thread->ee, t_debug_id, 3713 return_error, return_error_param); 3714 binder_inner_proc_unlock(proc); 3715 thread->return_error.cmd = return_error; 3716 binder_enqueue_thread_work(thread, &thread->return_error.work); 3717 } 3718 } 3719 3720 /** 3721 * binder_free_buf() - free the specified buffer 3722 * @proc: binder proc that owns buffer 3723 * @buffer: buffer to be freed 3724 * @is_failure: failed to send transaction 3725 * 3726 * If buffer for an async transaction, enqueue the next async 3727 * transaction from the node. 3728 * 3729 * Cleanup buffer and free it. 3730 */ 3731 static void 3732 binder_free_buf(struct binder_proc *proc, 3733 struct binder_thread *thread, 3734 struct binder_buffer *buffer, bool is_failure) 3735 { 3736 binder_inner_proc_lock(proc); 3737 if (buffer->transaction) { 3738 buffer->transaction->buffer = NULL; 3739 buffer->transaction = NULL; 3740 } 3741 binder_inner_proc_unlock(proc); 3742 if (buffer->async_transaction && buffer->target_node) { 3743 struct binder_node *buf_node; 3744 struct binder_work *w; 3745 3746 buf_node = buffer->target_node; 3747 binder_node_inner_lock(buf_node); 3748 BUG_ON(!buf_node->has_async_transaction); 3749 BUG_ON(buf_node->proc != proc); 3750 w = binder_dequeue_work_head_ilocked( 3751 &buf_node->async_todo); 3752 if (!w) { 3753 buf_node->has_async_transaction = false; 3754 } else { 3755 binder_enqueue_work_ilocked( 3756 w, &proc->todo); 3757 binder_wakeup_proc_ilocked(proc); 3758 } 3759 binder_node_inner_unlock(buf_node); 3760 } 3761 trace_binder_transaction_buffer_release(buffer); 3762 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); 3763 binder_alloc_free_buf(&proc->alloc, buffer); 3764 } 3765 3766 static int binder_thread_write(struct binder_proc *proc, 3767 struct binder_thread *thread, 3768 binder_uintptr_t binder_buffer, size_t size, 3769 binder_size_t *consumed) 3770 { 3771 uint32_t cmd; 3772 struct binder_context *context = proc->context; 3773 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3774 void __user *ptr = buffer + *consumed; 3775 void __user *end = buffer + size; 3776 3777 while (ptr < end && thread->return_error.cmd == BR_OK) { 3778 int ret; 3779 3780 if (get_user(cmd, (uint32_t __user *)ptr)) 3781 return -EFAULT; 3782 ptr += sizeof(uint32_t); 3783 trace_binder_command(cmd); 3784 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3785 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3786 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3787 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3788 } 3789 switch (cmd) { 3790 case BC_INCREFS: 3791 case BC_ACQUIRE: 3792 case BC_RELEASE: 3793 case BC_DECREFS: { 3794 uint32_t target; 3795 const char *debug_string; 3796 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3797 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3798 struct binder_ref_data rdata; 3799 3800 if (get_user(target, (uint32_t __user *)ptr)) 3801 return -EFAULT; 3802 3803 ptr += sizeof(uint32_t); 3804 ret = -1; 3805 if (increment && !target) { 3806 struct binder_node *ctx_mgr_node; 3807 3808 mutex_lock(&context->context_mgr_node_lock); 3809 ctx_mgr_node = context->binder_context_mgr_node; 3810 if (ctx_mgr_node) { 3811 if (ctx_mgr_node->proc == proc) { 3812 binder_user_error("%d:%d context manager tried to acquire desc 0\n", 3813 proc->pid, thread->pid); 3814 mutex_unlock(&context->context_mgr_node_lock); 3815 return -EINVAL; 3816 } 3817 ret = binder_inc_ref_for_node( 3818 proc, ctx_mgr_node, 3819 strong, NULL, &rdata); 3820 } 3821 mutex_unlock(&context->context_mgr_node_lock); 3822 } 3823 if (ret) 3824 ret = binder_update_ref_for_handle( 3825 proc, target, increment, strong, 3826 &rdata); 3827 if (!ret && rdata.desc != target) { 3828 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3829 proc->pid, thread->pid, 3830 target, rdata.desc); 3831 } 3832 switch (cmd) { 3833 case BC_INCREFS: 3834 debug_string = "IncRefs"; 3835 break; 3836 case BC_ACQUIRE: 3837 debug_string = "Acquire"; 3838 break; 3839 case BC_RELEASE: 3840 debug_string = "Release"; 3841 break; 3842 case BC_DECREFS: 3843 default: 3844 debug_string = "DecRefs"; 3845 break; 3846 } 3847 if (ret) { 3848 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3849 proc->pid, thread->pid, debug_string, 3850 strong, target, ret); 3851 break; 3852 } 3853 binder_debug(BINDER_DEBUG_USER_REFS, 3854 "%d:%d %s ref %d desc %d s %d w %d\n", 3855 proc->pid, thread->pid, debug_string, 3856 rdata.debug_id, rdata.desc, rdata.strong, 3857 rdata.weak); 3858 break; 3859 } 3860 case BC_INCREFS_DONE: 3861 case BC_ACQUIRE_DONE: { 3862 binder_uintptr_t node_ptr; 3863 binder_uintptr_t cookie; 3864 struct binder_node *node; 3865 bool free_node; 3866 3867 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3868 return -EFAULT; 3869 ptr += sizeof(binder_uintptr_t); 3870 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3871 return -EFAULT; 3872 ptr += sizeof(binder_uintptr_t); 3873 node = binder_get_node(proc, node_ptr); 3874 if (node == NULL) { 3875 binder_user_error("%d:%d %s u%016llx no match\n", 3876 proc->pid, thread->pid, 3877 cmd == BC_INCREFS_DONE ? 3878 "BC_INCREFS_DONE" : 3879 "BC_ACQUIRE_DONE", 3880 (u64)node_ptr); 3881 break; 3882 } 3883 if (cookie != node->cookie) { 3884 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3885 proc->pid, thread->pid, 3886 cmd == BC_INCREFS_DONE ? 3887 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3888 (u64)node_ptr, node->debug_id, 3889 (u64)cookie, (u64)node->cookie); 3890 binder_put_node(node); 3891 break; 3892 } 3893 binder_node_inner_lock(node); 3894 if (cmd == BC_ACQUIRE_DONE) { 3895 if (node->pending_strong_ref == 0) { 3896 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3897 proc->pid, thread->pid, 3898 node->debug_id); 3899 binder_node_inner_unlock(node); 3900 binder_put_node(node); 3901 break; 3902 } 3903 node->pending_strong_ref = 0; 3904 } else { 3905 if (node->pending_weak_ref == 0) { 3906 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3907 proc->pid, thread->pid, 3908 node->debug_id); 3909 binder_node_inner_unlock(node); 3910 binder_put_node(node); 3911 break; 3912 } 3913 node->pending_weak_ref = 0; 3914 } 3915 free_node = binder_dec_node_nilocked(node, 3916 cmd == BC_ACQUIRE_DONE, 0); 3917 WARN_ON(free_node); 3918 binder_debug(BINDER_DEBUG_USER_REFS, 3919 "%d:%d %s node %d ls %d lw %d tr %d\n", 3920 proc->pid, thread->pid, 3921 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3922 node->debug_id, node->local_strong_refs, 3923 node->local_weak_refs, node->tmp_refs); 3924 binder_node_inner_unlock(node); 3925 binder_put_node(node); 3926 break; 3927 } 3928 case BC_ATTEMPT_ACQUIRE: 3929 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3930 return -EINVAL; 3931 case BC_ACQUIRE_RESULT: 3932 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3933 return -EINVAL; 3934 3935 case BC_FREE_BUFFER: { 3936 binder_uintptr_t data_ptr; 3937 struct binder_buffer *buffer; 3938 3939 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3940 return -EFAULT; 3941 ptr += sizeof(binder_uintptr_t); 3942 3943 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3944 data_ptr); 3945 if (IS_ERR_OR_NULL(buffer)) { 3946 if (PTR_ERR(buffer) == -EPERM) { 3947 binder_user_error( 3948 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3949 proc->pid, thread->pid, 3950 (u64)data_ptr); 3951 } else { 3952 binder_user_error( 3953 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3954 proc->pid, thread->pid, 3955 (u64)data_ptr); 3956 } 3957 break; 3958 } 3959 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3960 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3961 proc->pid, thread->pid, (u64)data_ptr, 3962 buffer->debug_id, 3963 buffer->transaction ? "active" : "finished"); 3964 binder_free_buf(proc, thread, buffer, false); 3965 break; 3966 } 3967 3968 case BC_TRANSACTION_SG: 3969 case BC_REPLY_SG: { 3970 struct binder_transaction_data_sg tr; 3971 3972 if (copy_from_user(&tr, ptr, sizeof(tr))) 3973 return -EFAULT; 3974 ptr += sizeof(tr); 3975 binder_transaction(proc, thread, &tr.transaction_data, 3976 cmd == BC_REPLY_SG, tr.buffers_size); 3977 break; 3978 } 3979 case BC_TRANSACTION: 3980 case BC_REPLY: { 3981 struct binder_transaction_data tr; 3982 3983 if (copy_from_user(&tr, ptr, sizeof(tr))) 3984 return -EFAULT; 3985 ptr += sizeof(tr); 3986 binder_transaction(proc, thread, &tr, 3987 cmd == BC_REPLY, 0); 3988 break; 3989 } 3990 3991 case BC_REGISTER_LOOPER: 3992 binder_debug(BINDER_DEBUG_THREADS, 3993 "%d:%d BC_REGISTER_LOOPER\n", 3994 proc->pid, thread->pid); 3995 binder_inner_proc_lock(proc); 3996 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3997 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3998 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3999 proc->pid, thread->pid); 4000 } else if (proc->requested_threads == 0) { 4001 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4002 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 4003 proc->pid, thread->pid); 4004 } else { 4005 proc->requested_threads--; 4006 proc->requested_threads_started++; 4007 } 4008 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 4009 binder_inner_proc_unlock(proc); 4010 break; 4011 case BC_ENTER_LOOPER: 4012 binder_debug(BINDER_DEBUG_THREADS, 4013 "%d:%d BC_ENTER_LOOPER\n", 4014 proc->pid, thread->pid); 4015 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 4016 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4017 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 4018 proc->pid, thread->pid); 4019 } 4020 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 4021 break; 4022 case BC_EXIT_LOOPER: 4023 binder_debug(BINDER_DEBUG_THREADS, 4024 "%d:%d BC_EXIT_LOOPER\n", 4025 proc->pid, thread->pid); 4026 thread->looper |= BINDER_LOOPER_STATE_EXITED; 4027 break; 4028 4029 case BC_REQUEST_DEATH_NOTIFICATION: 4030 case BC_CLEAR_DEATH_NOTIFICATION: { 4031 uint32_t target; 4032 binder_uintptr_t cookie; 4033 struct binder_ref *ref; 4034 struct binder_ref_death *death = NULL; 4035 4036 if (get_user(target, (uint32_t __user *)ptr)) 4037 return -EFAULT; 4038 ptr += sizeof(uint32_t); 4039 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4040 return -EFAULT; 4041 ptr += sizeof(binder_uintptr_t); 4042 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 4043 /* 4044 * Allocate memory for death notification 4045 * before taking lock 4046 */ 4047 death = kzalloc(sizeof(*death), GFP_KERNEL); 4048 if (death == NULL) { 4049 WARN_ON(thread->return_error.cmd != 4050 BR_OK); 4051 thread->return_error.cmd = BR_ERROR; 4052 binder_enqueue_thread_work( 4053 thread, 4054 &thread->return_error.work); 4055 binder_debug( 4056 BINDER_DEBUG_FAILED_TRANSACTION, 4057 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 4058 proc->pid, thread->pid); 4059 break; 4060 } 4061 } 4062 binder_proc_lock(proc); 4063 ref = binder_get_ref_olocked(proc, target, false); 4064 if (ref == NULL) { 4065 binder_user_error("%d:%d %s invalid ref %d\n", 4066 proc->pid, thread->pid, 4067 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 4068 "BC_REQUEST_DEATH_NOTIFICATION" : 4069 "BC_CLEAR_DEATH_NOTIFICATION", 4070 target); 4071 binder_proc_unlock(proc); 4072 kfree(death); 4073 break; 4074 } 4075 4076 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4077 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 4078 proc->pid, thread->pid, 4079 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 4080 "BC_REQUEST_DEATH_NOTIFICATION" : 4081 "BC_CLEAR_DEATH_NOTIFICATION", 4082 (u64)cookie, ref->data.debug_id, 4083 ref->data.desc, ref->data.strong, 4084 ref->data.weak, ref->node->debug_id); 4085 4086 binder_node_lock(ref->node); 4087 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 4088 if (ref->death) { 4089 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 4090 proc->pid, thread->pid); 4091 binder_node_unlock(ref->node); 4092 binder_proc_unlock(proc); 4093 kfree(death); 4094 break; 4095 } 4096 binder_stats_created(BINDER_STAT_DEATH); 4097 INIT_LIST_HEAD(&death->work.entry); 4098 death->cookie = cookie; 4099 ref->death = death; 4100 if (ref->node->proc == NULL) { 4101 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4102 4103 binder_inner_proc_lock(proc); 4104 binder_enqueue_work_ilocked( 4105 &ref->death->work, &proc->todo); 4106 binder_wakeup_proc_ilocked(proc); 4107 binder_inner_proc_unlock(proc); 4108 } 4109 } else { 4110 if (ref->death == NULL) { 4111 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 4112 proc->pid, thread->pid); 4113 binder_node_unlock(ref->node); 4114 binder_proc_unlock(proc); 4115 break; 4116 } 4117 death = ref->death; 4118 if (death->cookie != cookie) { 4119 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 4120 proc->pid, thread->pid, 4121 (u64)death->cookie, 4122 (u64)cookie); 4123 binder_node_unlock(ref->node); 4124 binder_proc_unlock(proc); 4125 break; 4126 } 4127 ref->death = NULL; 4128 binder_inner_proc_lock(proc); 4129 if (list_empty(&death->work.entry)) { 4130 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4131 if (thread->looper & 4132 (BINDER_LOOPER_STATE_REGISTERED | 4133 BINDER_LOOPER_STATE_ENTERED)) 4134 binder_enqueue_thread_work_ilocked( 4135 thread, 4136 &death->work); 4137 else { 4138 binder_enqueue_work_ilocked( 4139 &death->work, 4140 &proc->todo); 4141 binder_wakeup_proc_ilocked( 4142 proc); 4143 } 4144 } else { 4145 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 4146 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 4147 } 4148 binder_inner_proc_unlock(proc); 4149 } 4150 binder_node_unlock(ref->node); 4151 binder_proc_unlock(proc); 4152 } break; 4153 case BC_DEAD_BINDER_DONE: { 4154 struct binder_work *w; 4155 binder_uintptr_t cookie; 4156 struct binder_ref_death *death = NULL; 4157 4158 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4159 return -EFAULT; 4160 4161 ptr += sizeof(cookie); 4162 binder_inner_proc_lock(proc); 4163 list_for_each_entry(w, &proc->delivered_death, 4164 entry) { 4165 struct binder_ref_death *tmp_death = 4166 container_of(w, 4167 struct binder_ref_death, 4168 work); 4169 4170 if (tmp_death->cookie == cookie) { 4171 death = tmp_death; 4172 break; 4173 } 4174 } 4175 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4176 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 4177 proc->pid, thread->pid, (u64)cookie, 4178 death); 4179 if (death == NULL) { 4180 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 4181 proc->pid, thread->pid, (u64)cookie); 4182 binder_inner_proc_unlock(proc); 4183 break; 4184 } 4185 binder_dequeue_work_ilocked(&death->work); 4186 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 4187 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4188 if (thread->looper & 4189 (BINDER_LOOPER_STATE_REGISTERED | 4190 BINDER_LOOPER_STATE_ENTERED)) 4191 binder_enqueue_thread_work_ilocked( 4192 thread, &death->work); 4193 else { 4194 binder_enqueue_work_ilocked( 4195 &death->work, 4196 &proc->todo); 4197 binder_wakeup_proc_ilocked(proc); 4198 } 4199 } 4200 binder_inner_proc_unlock(proc); 4201 } break; 4202 4203 default: 4204 pr_err("%d:%d unknown command %u\n", 4205 proc->pid, thread->pid, cmd); 4206 return -EINVAL; 4207 } 4208 *consumed = ptr - buffer; 4209 } 4210 return 0; 4211 } 4212 4213 static void binder_stat_br(struct binder_proc *proc, 4214 struct binder_thread *thread, uint32_t cmd) 4215 { 4216 trace_binder_return(cmd); 4217 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4218 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4219 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4220 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4221 } 4222 } 4223 4224 static int binder_put_node_cmd(struct binder_proc *proc, 4225 struct binder_thread *thread, 4226 void __user **ptrp, 4227 binder_uintptr_t node_ptr, 4228 binder_uintptr_t node_cookie, 4229 int node_debug_id, 4230 uint32_t cmd, const char *cmd_name) 4231 { 4232 void __user *ptr = *ptrp; 4233 4234 if (put_user(cmd, (uint32_t __user *)ptr)) 4235 return -EFAULT; 4236 ptr += sizeof(uint32_t); 4237 4238 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4239 return -EFAULT; 4240 ptr += sizeof(binder_uintptr_t); 4241 4242 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4243 return -EFAULT; 4244 ptr += sizeof(binder_uintptr_t); 4245 4246 binder_stat_br(proc, thread, cmd); 4247 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4248 proc->pid, thread->pid, cmd_name, node_debug_id, 4249 (u64)node_ptr, (u64)node_cookie); 4250 4251 *ptrp = ptr; 4252 return 0; 4253 } 4254 4255 static int binder_wait_for_work(struct binder_thread *thread, 4256 bool do_proc_work) 4257 { 4258 DEFINE_WAIT(wait); 4259 struct binder_proc *proc = thread->proc; 4260 int ret = 0; 4261 4262 binder_inner_proc_lock(proc); 4263 for (;;) { 4264 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); 4265 if (binder_has_work_ilocked(thread, do_proc_work)) 4266 break; 4267 if (do_proc_work) 4268 list_add(&thread->waiting_thread_node, 4269 &proc->waiting_threads); 4270 binder_inner_proc_unlock(proc); 4271 schedule(); 4272 binder_inner_proc_lock(proc); 4273 list_del_init(&thread->waiting_thread_node); 4274 if (signal_pending(current)) { 4275 ret = -EINTR; 4276 break; 4277 } 4278 } 4279 finish_wait(&thread->wait, &wait); 4280 binder_inner_proc_unlock(proc); 4281 4282 return ret; 4283 } 4284 4285 /** 4286 * binder_apply_fd_fixups() - finish fd translation 4287 * @proc: binder_proc associated @t->buffer 4288 * @t: binder transaction with list of fd fixups 4289 * 4290 * Now that we are in the context of the transaction target 4291 * process, we can allocate and install fds. Process the 4292 * list of fds to translate and fixup the buffer with the 4293 * new fds first and only then install the files. 4294 * 4295 * If we fail to allocate an fd, skip the install and release 4296 * any fds that have already been allocated. 4297 */ 4298 static int binder_apply_fd_fixups(struct binder_proc *proc, 4299 struct binder_transaction *t) 4300 { 4301 struct binder_txn_fd_fixup *fixup, *tmp; 4302 int ret = 0; 4303 4304 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4305 int fd = get_unused_fd_flags(O_CLOEXEC); 4306 4307 if (fd < 0) { 4308 binder_debug(BINDER_DEBUG_TRANSACTION, 4309 "failed fd fixup txn %d fd %d\n", 4310 t->debug_id, fd); 4311 ret = -ENOMEM; 4312 goto err; 4313 } 4314 binder_debug(BINDER_DEBUG_TRANSACTION, 4315 "fd fixup txn %d fd %d\n", 4316 t->debug_id, fd); 4317 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4318 fixup->target_fd = fd; 4319 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4320 fixup->offset, &fd, 4321 sizeof(u32))) { 4322 ret = -EINVAL; 4323 goto err; 4324 } 4325 } 4326 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4327 fd_install(fixup->target_fd, fixup->file); 4328 list_del(&fixup->fixup_entry); 4329 kfree(fixup); 4330 } 4331 4332 return ret; 4333 4334 err: 4335 binder_free_txn_fixups(t); 4336 return ret; 4337 } 4338 4339 static int binder_thread_read(struct binder_proc *proc, 4340 struct binder_thread *thread, 4341 binder_uintptr_t binder_buffer, size_t size, 4342 binder_size_t *consumed, int non_block) 4343 { 4344 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4345 void __user *ptr = buffer + *consumed; 4346 void __user *end = buffer + size; 4347 4348 int ret = 0; 4349 int wait_for_proc_work; 4350 4351 if (*consumed == 0) { 4352 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4353 return -EFAULT; 4354 ptr += sizeof(uint32_t); 4355 } 4356 4357 retry: 4358 binder_inner_proc_lock(proc); 4359 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4360 binder_inner_proc_unlock(proc); 4361 4362 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4363 4364 trace_binder_wait_for_work(wait_for_proc_work, 4365 !!thread->transaction_stack, 4366 !binder_worklist_empty(proc, &thread->todo)); 4367 if (wait_for_proc_work) { 4368 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4369 BINDER_LOOPER_STATE_ENTERED))) { 4370 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4371 proc->pid, thread->pid, thread->looper); 4372 wait_event_interruptible(binder_user_error_wait, 4373 binder_stop_on_user_error < 2); 4374 } 4375 binder_set_nice(proc->default_priority); 4376 } 4377 4378 if (non_block) { 4379 if (!binder_has_work(thread, wait_for_proc_work)) 4380 ret = -EAGAIN; 4381 } else { 4382 ret = binder_wait_for_work(thread, wait_for_proc_work); 4383 } 4384 4385 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4386 4387 if (ret) 4388 return ret; 4389 4390 while (1) { 4391 uint32_t cmd; 4392 struct binder_transaction_data_secctx tr; 4393 struct binder_transaction_data *trd = &tr.transaction_data; 4394 struct binder_work *w = NULL; 4395 struct list_head *list = NULL; 4396 struct binder_transaction *t = NULL; 4397 struct binder_thread *t_from; 4398 size_t trsize = sizeof(*trd); 4399 4400 binder_inner_proc_lock(proc); 4401 if (!binder_worklist_empty_ilocked(&thread->todo)) 4402 list = &thread->todo; 4403 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4404 wait_for_proc_work) 4405 list = &proc->todo; 4406 else { 4407 binder_inner_proc_unlock(proc); 4408 4409 /* no data added */ 4410 if (ptr - buffer == 4 && !thread->looper_need_return) 4411 goto retry; 4412 break; 4413 } 4414 4415 if (end - ptr < sizeof(tr) + 4) { 4416 binder_inner_proc_unlock(proc); 4417 break; 4418 } 4419 w = binder_dequeue_work_head_ilocked(list); 4420 if (binder_worklist_empty_ilocked(&thread->todo)) 4421 thread->process_todo = false; 4422 4423 switch (w->type) { 4424 case BINDER_WORK_TRANSACTION: { 4425 binder_inner_proc_unlock(proc); 4426 t = container_of(w, struct binder_transaction, work); 4427 } break; 4428 case BINDER_WORK_RETURN_ERROR: { 4429 struct binder_error *e = container_of( 4430 w, struct binder_error, work); 4431 4432 WARN_ON(e->cmd == BR_OK); 4433 binder_inner_proc_unlock(proc); 4434 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4435 return -EFAULT; 4436 cmd = e->cmd; 4437 e->cmd = BR_OK; 4438 ptr += sizeof(uint32_t); 4439 4440 binder_stat_br(proc, thread, cmd); 4441 } break; 4442 case BINDER_WORK_TRANSACTION_COMPLETE: 4443 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { 4444 if (proc->oneway_spam_detection_enabled && 4445 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) 4446 cmd = BR_ONEWAY_SPAM_SUSPECT; 4447 else 4448 cmd = BR_TRANSACTION_COMPLETE; 4449 binder_inner_proc_unlock(proc); 4450 kfree(w); 4451 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4452 if (put_user(cmd, (uint32_t __user *)ptr)) 4453 return -EFAULT; 4454 ptr += sizeof(uint32_t); 4455 4456 binder_stat_br(proc, thread, cmd); 4457 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4458 "%d:%d BR_TRANSACTION_COMPLETE\n", 4459 proc->pid, thread->pid); 4460 } break; 4461 case BINDER_WORK_NODE: { 4462 struct binder_node *node = container_of(w, struct binder_node, work); 4463 int strong, weak; 4464 binder_uintptr_t node_ptr = node->ptr; 4465 binder_uintptr_t node_cookie = node->cookie; 4466 int node_debug_id = node->debug_id; 4467 int has_weak_ref; 4468 int has_strong_ref; 4469 void __user *orig_ptr = ptr; 4470 4471 BUG_ON(proc != node->proc); 4472 strong = node->internal_strong_refs || 4473 node->local_strong_refs; 4474 weak = !hlist_empty(&node->refs) || 4475 node->local_weak_refs || 4476 node->tmp_refs || strong; 4477 has_strong_ref = node->has_strong_ref; 4478 has_weak_ref = node->has_weak_ref; 4479 4480 if (weak && !has_weak_ref) { 4481 node->has_weak_ref = 1; 4482 node->pending_weak_ref = 1; 4483 node->local_weak_refs++; 4484 } 4485 if (strong && !has_strong_ref) { 4486 node->has_strong_ref = 1; 4487 node->pending_strong_ref = 1; 4488 node->local_strong_refs++; 4489 } 4490 if (!strong && has_strong_ref) 4491 node->has_strong_ref = 0; 4492 if (!weak && has_weak_ref) 4493 node->has_weak_ref = 0; 4494 if (!weak && !strong) { 4495 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4496 "%d:%d node %d u%016llx c%016llx deleted\n", 4497 proc->pid, thread->pid, 4498 node_debug_id, 4499 (u64)node_ptr, 4500 (u64)node_cookie); 4501 rb_erase(&node->rb_node, &proc->nodes); 4502 binder_inner_proc_unlock(proc); 4503 binder_node_lock(node); 4504 /* 4505 * Acquire the node lock before freeing the 4506 * node to serialize with other threads that 4507 * may have been holding the node lock while 4508 * decrementing this node (avoids race where 4509 * this thread frees while the other thread 4510 * is unlocking the node after the final 4511 * decrement) 4512 */ 4513 binder_node_unlock(node); 4514 binder_free_node(node); 4515 } else 4516 binder_inner_proc_unlock(proc); 4517 4518 if (weak && !has_weak_ref) 4519 ret = binder_put_node_cmd( 4520 proc, thread, &ptr, node_ptr, 4521 node_cookie, node_debug_id, 4522 BR_INCREFS, "BR_INCREFS"); 4523 if (!ret && strong && !has_strong_ref) 4524 ret = binder_put_node_cmd( 4525 proc, thread, &ptr, node_ptr, 4526 node_cookie, node_debug_id, 4527 BR_ACQUIRE, "BR_ACQUIRE"); 4528 if (!ret && !strong && has_strong_ref) 4529 ret = binder_put_node_cmd( 4530 proc, thread, &ptr, node_ptr, 4531 node_cookie, node_debug_id, 4532 BR_RELEASE, "BR_RELEASE"); 4533 if (!ret && !weak && has_weak_ref) 4534 ret = binder_put_node_cmd( 4535 proc, thread, &ptr, node_ptr, 4536 node_cookie, node_debug_id, 4537 BR_DECREFS, "BR_DECREFS"); 4538 if (orig_ptr == ptr) 4539 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4540 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4541 proc->pid, thread->pid, 4542 node_debug_id, 4543 (u64)node_ptr, 4544 (u64)node_cookie); 4545 if (ret) 4546 return ret; 4547 } break; 4548 case BINDER_WORK_DEAD_BINDER: 4549 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4550 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4551 struct binder_ref_death *death; 4552 uint32_t cmd; 4553 binder_uintptr_t cookie; 4554 4555 death = container_of(w, struct binder_ref_death, work); 4556 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4557 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4558 else 4559 cmd = BR_DEAD_BINDER; 4560 cookie = death->cookie; 4561 4562 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4563 "%d:%d %s %016llx\n", 4564 proc->pid, thread->pid, 4565 cmd == BR_DEAD_BINDER ? 4566 "BR_DEAD_BINDER" : 4567 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4568 (u64)cookie); 4569 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4570 binder_inner_proc_unlock(proc); 4571 kfree(death); 4572 binder_stats_deleted(BINDER_STAT_DEATH); 4573 } else { 4574 binder_enqueue_work_ilocked( 4575 w, &proc->delivered_death); 4576 binder_inner_proc_unlock(proc); 4577 } 4578 if (put_user(cmd, (uint32_t __user *)ptr)) 4579 return -EFAULT; 4580 ptr += sizeof(uint32_t); 4581 if (put_user(cookie, 4582 (binder_uintptr_t __user *)ptr)) 4583 return -EFAULT; 4584 ptr += sizeof(binder_uintptr_t); 4585 binder_stat_br(proc, thread, cmd); 4586 if (cmd == BR_DEAD_BINDER) 4587 goto done; /* DEAD_BINDER notifications can cause transactions */ 4588 } break; 4589 default: 4590 binder_inner_proc_unlock(proc); 4591 pr_err("%d:%d: bad work type %d\n", 4592 proc->pid, thread->pid, w->type); 4593 break; 4594 } 4595 4596 if (!t) 4597 continue; 4598 4599 BUG_ON(t->buffer == NULL); 4600 if (t->buffer->target_node) { 4601 struct binder_node *target_node = t->buffer->target_node; 4602 4603 trd->target.ptr = target_node->ptr; 4604 trd->cookie = target_node->cookie; 4605 t->saved_priority = task_nice(current); 4606 if (t->priority < target_node->min_priority && 4607 !(t->flags & TF_ONE_WAY)) 4608 binder_set_nice(t->priority); 4609 else if (!(t->flags & TF_ONE_WAY) || 4610 t->saved_priority > target_node->min_priority) 4611 binder_set_nice(target_node->min_priority); 4612 cmd = BR_TRANSACTION; 4613 } else { 4614 trd->target.ptr = 0; 4615 trd->cookie = 0; 4616 cmd = BR_REPLY; 4617 } 4618 trd->code = t->code; 4619 trd->flags = t->flags; 4620 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4621 4622 t_from = binder_get_txn_from(t); 4623 if (t_from) { 4624 struct task_struct *sender = t_from->proc->tsk; 4625 4626 trd->sender_pid = 4627 task_tgid_nr_ns(sender, 4628 task_active_pid_ns(current)); 4629 } else { 4630 trd->sender_pid = 0; 4631 } 4632 4633 ret = binder_apply_fd_fixups(proc, t); 4634 if (ret) { 4635 struct binder_buffer *buffer = t->buffer; 4636 bool oneway = !!(t->flags & TF_ONE_WAY); 4637 int tid = t->debug_id; 4638 4639 if (t_from) 4640 binder_thread_dec_tmpref(t_from); 4641 buffer->transaction = NULL; 4642 binder_cleanup_transaction(t, "fd fixups failed", 4643 BR_FAILED_REPLY); 4644 binder_free_buf(proc, thread, buffer, true); 4645 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4646 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4647 proc->pid, thread->pid, 4648 oneway ? "async " : 4649 (cmd == BR_REPLY ? "reply " : ""), 4650 tid, BR_FAILED_REPLY, ret, __LINE__); 4651 if (cmd == BR_REPLY) { 4652 cmd = BR_FAILED_REPLY; 4653 if (put_user(cmd, (uint32_t __user *)ptr)) 4654 return -EFAULT; 4655 ptr += sizeof(uint32_t); 4656 binder_stat_br(proc, thread, cmd); 4657 break; 4658 } 4659 continue; 4660 } 4661 trd->data_size = t->buffer->data_size; 4662 trd->offsets_size = t->buffer->offsets_size; 4663 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4664 trd->data.ptr.offsets = trd->data.ptr.buffer + 4665 ALIGN(t->buffer->data_size, 4666 sizeof(void *)); 4667 4668 tr.secctx = t->security_ctx; 4669 if (t->security_ctx) { 4670 cmd = BR_TRANSACTION_SEC_CTX; 4671 trsize = sizeof(tr); 4672 } 4673 if (put_user(cmd, (uint32_t __user *)ptr)) { 4674 if (t_from) 4675 binder_thread_dec_tmpref(t_from); 4676 4677 binder_cleanup_transaction(t, "put_user failed", 4678 BR_FAILED_REPLY); 4679 4680 return -EFAULT; 4681 } 4682 ptr += sizeof(uint32_t); 4683 if (copy_to_user(ptr, &tr, trsize)) { 4684 if (t_from) 4685 binder_thread_dec_tmpref(t_from); 4686 4687 binder_cleanup_transaction(t, "copy_to_user failed", 4688 BR_FAILED_REPLY); 4689 4690 return -EFAULT; 4691 } 4692 ptr += trsize; 4693 4694 trace_binder_transaction_received(t); 4695 binder_stat_br(proc, thread, cmd); 4696 binder_debug(BINDER_DEBUG_TRANSACTION, 4697 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", 4698 proc->pid, thread->pid, 4699 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4700 (cmd == BR_TRANSACTION_SEC_CTX) ? 4701 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4702 t->debug_id, t_from ? t_from->proc->pid : 0, 4703 t_from ? t_from->pid : 0, cmd, 4704 t->buffer->data_size, t->buffer->offsets_size, 4705 (u64)trd->data.ptr.buffer, 4706 (u64)trd->data.ptr.offsets); 4707 4708 if (t_from) 4709 binder_thread_dec_tmpref(t_from); 4710 t->buffer->allow_user_free = 1; 4711 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4712 binder_inner_proc_lock(thread->proc); 4713 t->to_parent = thread->transaction_stack; 4714 t->to_thread = thread; 4715 thread->transaction_stack = t; 4716 binder_inner_proc_unlock(thread->proc); 4717 } else { 4718 binder_free_transaction(t); 4719 } 4720 break; 4721 } 4722 4723 done: 4724 4725 *consumed = ptr - buffer; 4726 binder_inner_proc_lock(proc); 4727 if (proc->requested_threads == 0 && 4728 list_empty(&thread->proc->waiting_threads) && 4729 proc->requested_threads_started < proc->max_threads && 4730 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4731 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4732 /*spawn a new thread if we leave this out */) { 4733 proc->requested_threads++; 4734 binder_inner_proc_unlock(proc); 4735 binder_debug(BINDER_DEBUG_THREADS, 4736 "%d:%d BR_SPAWN_LOOPER\n", 4737 proc->pid, thread->pid); 4738 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4739 return -EFAULT; 4740 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4741 } else 4742 binder_inner_proc_unlock(proc); 4743 return 0; 4744 } 4745 4746 static void binder_release_work(struct binder_proc *proc, 4747 struct list_head *list) 4748 { 4749 struct binder_work *w; 4750 enum binder_work_type wtype; 4751 4752 while (1) { 4753 binder_inner_proc_lock(proc); 4754 w = binder_dequeue_work_head_ilocked(list); 4755 wtype = w ? w->type : 0; 4756 binder_inner_proc_unlock(proc); 4757 if (!w) 4758 return; 4759 4760 switch (wtype) { 4761 case BINDER_WORK_TRANSACTION: { 4762 struct binder_transaction *t; 4763 4764 t = container_of(w, struct binder_transaction, work); 4765 4766 binder_cleanup_transaction(t, "process died.", 4767 BR_DEAD_REPLY); 4768 } break; 4769 case BINDER_WORK_RETURN_ERROR: { 4770 struct binder_error *e = container_of( 4771 w, struct binder_error, work); 4772 4773 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4774 "undelivered TRANSACTION_ERROR: %u\n", 4775 e->cmd); 4776 } break; 4777 case BINDER_WORK_TRANSACTION_COMPLETE: { 4778 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4779 "undelivered TRANSACTION_COMPLETE\n"); 4780 kfree(w); 4781 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4782 } break; 4783 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4784 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4785 struct binder_ref_death *death; 4786 4787 death = container_of(w, struct binder_ref_death, work); 4788 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4789 "undelivered death notification, %016llx\n", 4790 (u64)death->cookie); 4791 kfree(death); 4792 binder_stats_deleted(BINDER_STAT_DEATH); 4793 } break; 4794 case BINDER_WORK_NODE: 4795 break; 4796 default: 4797 pr_err("unexpected work type, %d, not freed\n", 4798 wtype); 4799 break; 4800 } 4801 } 4802 4803 } 4804 4805 static struct binder_thread *binder_get_thread_ilocked( 4806 struct binder_proc *proc, struct binder_thread *new_thread) 4807 { 4808 struct binder_thread *thread = NULL; 4809 struct rb_node *parent = NULL; 4810 struct rb_node **p = &proc->threads.rb_node; 4811 4812 while (*p) { 4813 parent = *p; 4814 thread = rb_entry(parent, struct binder_thread, rb_node); 4815 4816 if (current->pid < thread->pid) 4817 p = &(*p)->rb_left; 4818 else if (current->pid > thread->pid) 4819 p = &(*p)->rb_right; 4820 else 4821 return thread; 4822 } 4823 if (!new_thread) 4824 return NULL; 4825 thread = new_thread; 4826 binder_stats_created(BINDER_STAT_THREAD); 4827 thread->proc = proc; 4828 thread->pid = current->pid; 4829 atomic_set(&thread->tmp_ref, 0); 4830 init_waitqueue_head(&thread->wait); 4831 INIT_LIST_HEAD(&thread->todo); 4832 rb_link_node(&thread->rb_node, parent, p); 4833 rb_insert_color(&thread->rb_node, &proc->threads); 4834 thread->looper_need_return = true; 4835 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4836 thread->return_error.cmd = BR_OK; 4837 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4838 thread->reply_error.cmd = BR_OK; 4839 thread->ee.command = BR_OK; 4840 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4841 return thread; 4842 } 4843 4844 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4845 { 4846 struct binder_thread *thread; 4847 struct binder_thread *new_thread; 4848 4849 binder_inner_proc_lock(proc); 4850 thread = binder_get_thread_ilocked(proc, NULL); 4851 binder_inner_proc_unlock(proc); 4852 if (!thread) { 4853 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4854 if (new_thread == NULL) 4855 return NULL; 4856 binder_inner_proc_lock(proc); 4857 thread = binder_get_thread_ilocked(proc, new_thread); 4858 binder_inner_proc_unlock(proc); 4859 if (thread != new_thread) 4860 kfree(new_thread); 4861 } 4862 return thread; 4863 } 4864 4865 static void binder_free_proc(struct binder_proc *proc) 4866 { 4867 struct binder_device *device; 4868 4869 BUG_ON(!list_empty(&proc->todo)); 4870 BUG_ON(!list_empty(&proc->delivered_death)); 4871 if (proc->outstanding_txns) 4872 pr_warn("%s: Unexpected outstanding_txns %d\n", 4873 __func__, proc->outstanding_txns); 4874 device = container_of(proc->context, struct binder_device, context); 4875 if (refcount_dec_and_test(&device->ref)) { 4876 kfree(proc->context->name); 4877 kfree(device); 4878 } 4879 binder_alloc_deferred_release(&proc->alloc); 4880 put_task_struct(proc->tsk); 4881 put_cred(proc->cred); 4882 binder_stats_deleted(BINDER_STAT_PROC); 4883 kfree(proc); 4884 } 4885 4886 static void binder_free_thread(struct binder_thread *thread) 4887 { 4888 BUG_ON(!list_empty(&thread->todo)); 4889 binder_stats_deleted(BINDER_STAT_THREAD); 4890 binder_proc_dec_tmpref(thread->proc); 4891 kfree(thread); 4892 } 4893 4894 static int binder_thread_release(struct binder_proc *proc, 4895 struct binder_thread *thread) 4896 { 4897 struct binder_transaction *t; 4898 struct binder_transaction *send_reply = NULL; 4899 int active_transactions = 0; 4900 struct binder_transaction *last_t = NULL; 4901 4902 binder_inner_proc_lock(thread->proc); 4903 /* 4904 * take a ref on the proc so it survives 4905 * after we remove this thread from proc->threads. 4906 * The corresponding dec is when we actually 4907 * free the thread in binder_free_thread() 4908 */ 4909 proc->tmp_ref++; 4910 /* 4911 * take a ref on this thread to ensure it 4912 * survives while we are releasing it 4913 */ 4914 atomic_inc(&thread->tmp_ref); 4915 rb_erase(&thread->rb_node, &proc->threads); 4916 t = thread->transaction_stack; 4917 if (t) { 4918 spin_lock(&t->lock); 4919 if (t->to_thread == thread) 4920 send_reply = t; 4921 } else { 4922 __acquire(&t->lock); 4923 } 4924 thread->is_dead = true; 4925 4926 while (t) { 4927 last_t = t; 4928 active_transactions++; 4929 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4930 "release %d:%d transaction %d %s, still active\n", 4931 proc->pid, thread->pid, 4932 t->debug_id, 4933 (t->to_thread == thread) ? "in" : "out"); 4934 4935 if (t->to_thread == thread) { 4936 thread->proc->outstanding_txns--; 4937 t->to_proc = NULL; 4938 t->to_thread = NULL; 4939 if (t->buffer) { 4940 t->buffer->transaction = NULL; 4941 t->buffer = NULL; 4942 } 4943 t = t->to_parent; 4944 } else if (t->from == thread) { 4945 t->from = NULL; 4946 t = t->from_parent; 4947 } else 4948 BUG(); 4949 spin_unlock(&last_t->lock); 4950 if (t) 4951 spin_lock(&t->lock); 4952 else 4953 __acquire(&t->lock); 4954 } 4955 /* annotation for sparse, lock not acquired in last iteration above */ 4956 __release(&t->lock); 4957 4958 /* 4959 * If this thread used poll, make sure we remove the waitqueue from any 4960 * poll data structures holding it. 4961 */ 4962 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4963 wake_up_pollfree(&thread->wait); 4964 4965 binder_inner_proc_unlock(thread->proc); 4966 4967 /* 4968 * This is needed to avoid races between wake_up_pollfree() above and 4969 * someone else removing the last entry from the queue for other reasons 4970 * (e.g. ep_remove_wait_queue() being called due to an epoll file 4971 * descriptor being closed). Such other users hold an RCU read lock, so 4972 * we can be sure they're done after we call synchronize_rcu(). 4973 */ 4974 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4975 synchronize_rcu(); 4976 4977 if (send_reply) 4978 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4979 binder_release_work(proc, &thread->todo); 4980 binder_thread_dec_tmpref(thread); 4981 return active_transactions; 4982 } 4983 4984 static __poll_t binder_poll(struct file *filp, 4985 struct poll_table_struct *wait) 4986 { 4987 struct binder_proc *proc = filp->private_data; 4988 struct binder_thread *thread = NULL; 4989 bool wait_for_proc_work; 4990 4991 thread = binder_get_thread(proc); 4992 if (!thread) 4993 return POLLERR; 4994 4995 binder_inner_proc_lock(thread->proc); 4996 thread->looper |= BINDER_LOOPER_STATE_POLL; 4997 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4998 4999 binder_inner_proc_unlock(thread->proc); 5000 5001 poll_wait(filp, &thread->wait, wait); 5002 5003 if (binder_has_work(thread, wait_for_proc_work)) 5004 return EPOLLIN; 5005 5006 return 0; 5007 } 5008 5009 static int binder_ioctl_write_read(struct file *filp, 5010 unsigned int cmd, unsigned long arg, 5011 struct binder_thread *thread) 5012 { 5013 int ret = 0; 5014 struct binder_proc *proc = filp->private_data; 5015 unsigned int size = _IOC_SIZE(cmd); 5016 void __user *ubuf = (void __user *)arg; 5017 struct binder_write_read bwr; 5018 5019 if (size != sizeof(struct binder_write_read)) { 5020 ret = -EINVAL; 5021 goto out; 5022 } 5023 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 5024 ret = -EFAULT; 5025 goto out; 5026 } 5027 binder_debug(BINDER_DEBUG_READ_WRITE, 5028 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 5029 proc->pid, thread->pid, 5030 (u64)bwr.write_size, (u64)bwr.write_buffer, 5031 (u64)bwr.read_size, (u64)bwr.read_buffer); 5032 5033 if (bwr.write_size > 0) { 5034 ret = binder_thread_write(proc, thread, 5035 bwr.write_buffer, 5036 bwr.write_size, 5037 &bwr.write_consumed); 5038 trace_binder_write_done(ret); 5039 if (ret < 0) { 5040 bwr.read_consumed = 0; 5041 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 5042 ret = -EFAULT; 5043 goto out; 5044 } 5045 } 5046 if (bwr.read_size > 0) { 5047 ret = binder_thread_read(proc, thread, bwr.read_buffer, 5048 bwr.read_size, 5049 &bwr.read_consumed, 5050 filp->f_flags & O_NONBLOCK); 5051 trace_binder_read_done(ret); 5052 binder_inner_proc_lock(proc); 5053 if (!binder_worklist_empty_ilocked(&proc->todo)) 5054 binder_wakeup_proc_ilocked(proc); 5055 binder_inner_proc_unlock(proc); 5056 if (ret < 0) { 5057 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 5058 ret = -EFAULT; 5059 goto out; 5060 } 5061 } 5062 binder_debug(BINDER_DEBUG_READ_WRITE, 5063 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 5064 proc->pid, thread->pid, 5065 (u64)bwr.write_consumed, (u64)bwr.write_size, 5066 (u64)bwr.read_consumed, (u64)bwr.read_size); 5067 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 5068 ret = -EFAULT; 5069 goto out; 5070 } 5071 out: 5072 return ret; 5073 } 5074 5075 static int binder_ioctl_set_ctx_mgr(struct file *filp, 5076 struct flat_binder_object *fbo) 5077 { 5078 int ret = 0; 5079 struct binder_proc *proc = filp->private_data; 5080 struct binder_context *context = proc->context; 5081 struct binder_node *new_node; 5082 kuid_t curr_euid = current_euid(); 5083 5084 mutex_lock(&context->context_mgr_node_lock); 5085 if (context->binder_context_mgr_node) { 5086 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 5087 ret = -EBUSY; 5088 goto out; 5089 } 5090 ret = security_binder_set_context_mgr(proc->cred); 5091 if (ret < 0) 5092 goto out; 5093 if (uid_valid(context->binder_context_mgr_uid)) { 5094 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 5095 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 5096 from_kuid(&init_user_ns, curr_euid), 5097 from_kuid(&init_user_ns, 5098 context->binder_context_mgr_uid)); 5099 ret = -EPERM; 5100 goto out; 5101 } 5102 } else { 5103 context->binder_context_mgr_uid = curr_euid; 5104 } 5105 new_node = binder_new_node(proc, fbo); 5106 if (!new_node) { 5107 ret = -ENOMEM; 5108 goto out; 5109 } 5110 binder_node_lock(new_node); 5111 new_node->local_weak_refs++; 5112 new_node->local_strong_refs++; 5113 new_node->has_strong_ref = 1; 5114 new_node->has_weak_ref = 1; 5115 context->binder_context_mgr_node = new_node; 5116 binder_node_unlock(new_node); 5117 binder_put_node(new_node); 5118 out: 5119 mutex_unlock(&context->context_mgr_node_lock); 5120 return ret; 5121 } 5122 5123 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 5124 struct binder_node_info_for_ref *info) 5125 { 5126 struct binder_node *node; 5127 struct binder_context *context = proc->context; 5128 __u32 handle = info->handle; 5129 5130 if (info->strong_count || info->weak_count || info->reserved1 || 5131 info->reserved2 || info->reserved3) { 5132 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 5133 proc->pid); 5134 return -EINVAL; 5135 } 5136 5137 /* This ioctl may only be used by the context manager */ 5138 mutex_lock(&context->context_mgr_node_lock); 5139 if (!context->binder_context_mgr_node || 5140 context->binder_context_mgr_node->proc != proc) { 5141 mutex_unlock(&context->context_mgr_node_lock); 5142 return -EPERM; 5143 } 5144 mutex_unlock(&context->context_mgr_node_lock); 5145 5146 node = binder_get_node_from_ref(proc, handle, true, NULL); 5147 if (!node) 5148 return -EINVAL; 5149 5150 info->strong_count = node->local_strong_refs + 5151 node->internal_strong_refs; 5152 info->weak_count = node->local_weak_refs; 5153 5154 binder_put_node(node); 5155 5156 return 0; 5157 } 5158 5159 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 5160 struct binder_node_debug_info *info) 5161 { 5162 struct rb_node *n; 5163 binder_uintptr_t ptr = info->ptr; 5164 5165 memset(info, 0, sizeof(*info)); 5166 5167 binder_inner_proc_lock(proc); 5168 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5169 struct binder_node *node = rb_entry(n, struct binder_node, 5170 rb_node); 5171 if (node->ptr > ptr) { 5172 info->ptr = node->ptr; 5173 info->cookie = node->cookie; 5174 info->has_strong_ref = node->has_strong_ref; 5175 info->has_weak_ref = node->has_weak_ref; 5176 break; 5177 } 5178 } 5179 binder_inner_proc_unlock(proc); 5180 5181 return 0; 5182 } 5183 5184 static bool binder_txns_pending_ilocked(struct binder_proc *proc) 5185 { 5186 struct rb_node *n; 5187 struct binder_thread *thread; 5188 5189 if (proc->outstanding_txns > 0) 5190 return true; 5191 5192 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { 5193 thread = rb_entry(n, struct binder_thread, rb_node); 5194 if (thread->transaction_stack) 5195 return true; 5196 } 5197 return false; 5198 } 5199 5200 static int binder_ioctl_freeze(struct binder_freeze_info *info, 5201 struct binder_proc *target_proc) 5202 { 5203 int ret = 0; 5204 5205 if (!info->enable) { 5206 binder_inner_proc_lock(target_proc); 5207 target_proc->sync_recv = false; 5208 target_proc->async_recv = false; 5209 target_proc->is_frozen = false; 5210 binder_inner_proc_unlock(target_proc); 5211 return 0; 5212 } 5213 5214 /* 5215 * Freezing the target. Prevent new transactions by 5216 * setting frozen state. If timeout specified, wait 5217 * for transactions to drain. 5218 */ 5219 binder_inner_proc_lock(target_proc); 5220 target_proc->sync_recv = false; 5221 target_proc->async_recv = false; 5222 target_proc->is_frozen = true; 5223 binder_inner_proc_unlock(target_proc); 5224 5225 if (info->timeout_ms > 0) 5226 ret = wait_event_interruptible_timeout( 5227 target_proc->freeze_wait, 5228 (!target_proc->outstanding_txns), 5229 msecs_to_jiffies(info->timeout_ms)); 5230 5231 /* Check pending transactions that wait for reply */ 5232 if (ret >= 0) { 5233 binder_inner_proc_lock(target_proc); 5234 if (binder_txns_pending_ilocked(target_proc)) 5235 ret = -EAGAIN; 5236 binder_inner_proc_unlock(target_proc); 5237 } 5238 5239 if (ret < 0) { 5240 binder_inner_proc_lock(target_proc); 5241 target_proc->is_frozen = false; 5242 binder_inner_proc_unlock(target_proc); 5243 } 5244 5245 return ret; 5246 } 5247 5248 static int binder_ioctl_get_freezer_info( 5249 struct binder_frozen_status_info *info) 5250 { 5251 struct binder_proc *target_proc; 5252 bool found = false; 5253 __u32 txns_pending; 5254 5255 info->sync_recv = 0; 5256 info->async_recv = 0; 5257 5258 mutex_lock(&binder_procs_lock); 5259 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5260 if (target_proc->pid == info->pid) { 5261 found = true; 5262 binder_inner_proc_lock(target_proc); 5263 txns_pending = binder_txns_pending_ilocked(target_proc); 5264 info->sync_recv |= target_proc->sync_recv | 5265 (txns_pending << 1); 5266 info->async_recv |= target_proc->async_recv; 5267 binder_inner_proc_unlock(target_proc); 5268 } 5269 } 5270 mutex_unlock(&binder_procs_lock); 5271 5272 if (!found) 5273 return -EINVAL; 5274 5275 return 0; 5276 } 5277 5278 static int binder_ioctl_get_extended_error(struct binder_thread *thread, 5279 void __user *ubuf) 5280 { 5281 struct binder_extended_error ee; 5282 5283 binder_inner_proc_lock(thread->proc); 5284 ee = thread->ee; 5285 binder_set_extended_error(&thread->ee, 0, BR_OK, 0); 5286 binder_inner_proc_unlock(thread->proc); 5287 5288 if (copy_to_user(ubuf, &ee, sizeof(ee))) 5289 return -EFAULT; 5290 5291 return 0; 5292 } 5293 5294 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 5295 { 5296 int ret; 5297 struct binder_proc *proc = filp->private_data; 5298 struct binder_thread *thread; 5299 unsigned int size = _IOC_SIZE(cmd); 5300 void __user *ubuf = (void __user *)arg; 5301 5302 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 5303 proc->pid, current->pid, cmd, arg);*/ 5304 5305 binder_selftest_alloc(&proc->alloc); 5306 5307 trace_binder_ioctl(cmd, arg); 5308 5309 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5310 if (ret) 5311 goto err_unlocked; 5312 5313 thread = binder_get_thread(proc); 5314 if (thread == NULL) { 5315 ret = -ENOMEM; 5316 goto err; 5317 } 5318 5319 switch (cmd) { 5320 case BINDER_WRITE_READ: 5321 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 5322 if (ret) 5323 goto err; 5324 break; 5325 case BINDER_SET_MAX_THREADS: { 5326 int max_threads; 5327 5328 if (copy_from_user(&max_threads, ubuf, 5329 sizeof(max_threads))) { 5330 ret = -EINVAL; 5331 goto err; 5332 } 5333 binder_inner_proc_lock(proc); 5334 proc->max_threads = max_threads; 5335 binder_inner_proc_unlock(proc); 5336 break; 5337 } 5338 case BINDER_SET_CONTEXT_MGR_EXT: { 5339 struct flat_binder_object fbo; 5340 5341 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5342 ret = -EINVAL; 5343 goto err; 5344 } 5345 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5346 if (ret) 5347 goto err; 5348 break; 5349 } 5350 case BINDER_SET_CONTEXT_MGR: 5351 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5352 if (ret) 5353 goto err; 5354 break; 5355 case BINDER_THREAD_EXIT: 5356 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5357 proc->pid, thread->pid); 5358 binder_thread_release(proc, thread); 5359 thread = NULL; 5360 break; 5361 case BINDER_VERSION: { 5362 struct binder_version __user *ver = ubuf; 5363 5364 if (size != sizeof(struct binder_version)) { 5365 ret = -EINVAL; 5366 goto err; 5367 } 5368 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5369 &ver->protocol_version)) { 5370 ret = -EINVAL; 5371 goto err; 5372 } 5373 break; 5374 } 5375 case BINDER_GET_NODE_INFO_FOR_REF: { 5376 struct binder_node_info_for_ref info; 5377 5378 if (copy_from_user(&info, ubuf, sizeof(info))) { 5379 ret = -EFAULT; 5380 goto err; 5381 } 5382 5383 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5384 if (ret < 0) 5385 goto err; 5386 5387 if (copy_to_user(ubuf, &info, sizeof(info))) { 5388 ret = -EFAULT; 5389 goto err; 5390 } 5391 5392 break; 5393 } 5394 case BINDER_GET_NODE_DEBUG_INFO: { 5395 struct binder_node_debug_info info; 5396 5397 if (copy_from_user(&info, ubuf, sizeof(info))) { 5398 ret = -EFAULT; 5399 goto err; 5400 } 5401 5402 ret = binder_ioctl_get_node_debug_info(proc, &info); 5403 if (ret < 0) 5404 goto err; 5405 5406 if (copy_to_user(ubuf, &info, sizeof(info))) { 5407 ret = -EFAULT; 5408 goto err; 5409 } 5410 break; 5411 } 5412 case BINDER_FREEZE: { 5413 struct binder_freeze_info info; 5414 struct binder_proc **target_procs = NULL, *target_proc; 5415 int target_procs_count = 0, i = 0; 5416 5417 ret = 0; 5418 5419 if (copy_from_user(&info, ubuf, sizeof(info))) { 5420 ret = -EFAULT; 5421 goto err; 5422 } 5423 5424 mutex_lock(&binder_procs_lock); 5425 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5426 if (target_proc->pid == info.pid) 5427 target_procs_count++; 5428 } 5429 5430 if (target_procs_count == 0) { 5431 mutex_unlock(&binder_procs_lock); 5432 ret = -EINVAL; 5433 goto err; 5434 } 5435 5436 target_procs = kcalloc(target_procs_count, 5437 sizeof(struct binder_proc *), 5438 GFP_KERNEL); 5439 5440 if (!target_procs) { 5441 mutex_unlock(&binder_procs_lock); 5442 ret = -ENOMEM; 5443 goto err; 5444 } 5445 5446 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5447 if (target_proc->pid != info.pid) 5448 continue; 5449 5450 binder_inner_proc_lock(target_proc); 5451 target_proc->tmp_ref++; 5452 binder_inner_proc_unlock(target_proc); 5453 5454 target_procs[i++] = target_proc; 5455 } 5456 mutex_unlock(&binder_procs_lock); 5457 5458 for (i = 0; i < target_procs_count; i++) { 5459 if (ret >= 0) 5460 ret = binder_ioctl_freeze(&info, 5461 target_procs[i]); 5462 5463 binder_proc_dec_tmpref(target_procs[i]); 5464 } 5465 5466 kfree(target_procs); 5467 5468 if (ret < 0) 5469 goto err; 5470 break; 5471 } 5472 case BINDER_GET_FROZEN_INFO: { 5473 struct binder_frozen_status_info info; 5474 5475 if (copy_from_user(&info, ubuf, sizeof(info))) { 5476 ret = -EFAULT; 5477 goto err; 5478 } 5479 5480 ret = binder_ioctl_get_freezer_info(&info); 5481 if (ret < 0) 5482 goto err; 5483 5484 if (copy_to_user(ubuf, &info, sizeof(info))) { 5485 ret = -EFAULT; 5486 goto err; 5487 } 5488 break; 5489 } 5490 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { 5491 uint32_t enable; 5492 5493 if (copy_from_user(&enable, ubuf, sizeof(enable))) { 5494 ret = -EFAULT; 5495 goto err; 5496 } 5497 binder_inner_proc_lock(proc); 5498 proc->oneway_spam_detection_enabled = (bool)enable; 5499 binder_inner_proc_unlock(proc); 5500 break; 5501 } 5502 case BINDER_GET_EXTENDED_ERROR: 5503 ret = binder_ioctl_get_extended_error(thread, ubuf); 5504 if (ret < 0) 5505 goto err; 5506 break; 5507 default: 5508 ret = -EINVAL; 5509 goto err; 5510 } 5511 ret = 0; 5512 err: 5513 if (thread) 5514 thread->looper_need_return = false; 5515 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5516 if (ret && ret != -EINTR) 5517 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5518 err_unlocked: 5519 trace_binder_ioctl_done(ret); 5520 return ret; 5521 } 5522 5523 static void binder_vma_open(struct vm_area_struct *vma) 5524 { 5525 struct binder_proc *proc = vma->vm_private_data; 5526 5527 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5528 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5529 proc->pid, vma->vm_start, vma->vm_end, 5530 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5531 (unsigned long)pgprot_val(vma->vm_page_prot)); 5532 } 5533 5534 static void binder_vma_close(struct vm_area_struct *vma) 5535 { 5536 struct binder_proc *proc = vma->vm_private_data; 5537 5538 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5539 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5540 proc->pid, vma->vm_start, vma->vm_end, 5541 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5542 (unsigned long)pgprot_val(vma->vm_page_prot)); 5543 binder_alloc_vma_close(&proc->alloc); 5544 } 5545 5546 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5547 { 5548 return VM_FAULT_SIGBUS; 5549 } 5550 5551 static const struct vm_operations_struct binder_vm_ops = { 5552 .open = binder_vma_open, 5553 .close = binder_vma_close, 5554 .fault = binder_vm_fault, 5555 }; 5556 5557 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5558 { 5559 struct binder_proc *proc = filp->private_data; 5560 5561 if (proc->tsk != current->group_leader) 5562 return -EINVAL; 5563 5564 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5565 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5566 __func__, proc->pid, vma->vm_start, vma->vm_end, 5567 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5568 (unsigned long)pgprot_val(vma->vm_page_prot)); 5569 5570 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5571 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5572 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); 5573 return -EPERM; 5574 } 5575 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5576 vma->vm_flags &= ~VM_MAYWRITE; 5577 5578 vma->vm_ops = &binder_vm_ops; 5579 vma->vm_private_data = proc; 5580 5581 return binder_alloc_mmap_handler(&proc->alloc, vma); 5582 } 5583 5584 static int binder_open(struct inode *nodp, struct file *filp) 5585 { 5586 struct binder_proc *proc, *itr; 5587 struct binder_device *binder_dev; 5588 struct binderfs_info *info; 5589 struct dentry *binder_binderfs_dir_entry_proc = NULL; 5590 bool existing_pid = false; 5591 5592 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5593 current->group_leader->pid, current->pid); 5594 5595 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5596 if (proc == NULL) 5597 return -ENOMEM; 5598 spin_lock_init(&proc->inner_lock); 5599 spin_lock_init(&proc->outer_lock); 5600 get_task_struct(current->group_leader); 5601 proc->tsk = current->group_leader; 5602 proc->cred = get_cred(filp->f_cred); 5603 INIT_LIST_HEAD(&proc->todo); 5604 init_waitqueue_head(&proc->freeze_wait); 5605 proc->default_priority = task_nice(current); 5606 /* binderfs stashes devices in i_private */ 5607 if (is_binderfs_device(nodp)) { 5608 binder_dev = nodp->i_private; 5609 info = nodp->i_sb->s_fs_info; 5610 binder_binderfs_dir_entry_proc = info->proc_log_dir; 5611 } else { 5612 binder_dev = container_of(filp->private_data, 5613 struct binder_device, miscdev); 5614 } 5615 refcount_inc(&binder_dev->ref); 5616 proc->context = &binder_dev->context; 5617 binder_alloc_init(&proc->alloc); 5618 5619 binder_stats_created(BINDER_STAT_PROC); 5620 proc->pid = current->group_leader->pid; 5621 INIT_LIST_HEAD(&proc->delivered_death); 5622 INIT_LIST_HEAD(&proc->waiting_threads); 5623 filp->private_data = proc; 5624 5625 mutex_lock(&binder_procs_lock); 5626 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5627 if (itr->pid == proc->pid) { 5628 existing_pid = true; 5629 break; 5630 } 5631 } 5632 hlist_add_head(&proc->proc_node, &binder_procs); 5633 mutex_unlock(&binder_procs_lock); 5634 5635 if (binder_debugfs_dir_entry_proc && !existing_pid) { 5636 char strbuf[11]; 5637 5638 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5639 /* 5640 * proc debug entries are shared between contexts. 5641 * Only create for the first PID to avoid debugfs log spamming 5642 * The printing code will anyway print all contexts for a given 5643 * PID so this is not a problem. 5644 */ 5645 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5646 binder_debugfs_dir_entry_proc, 5647 (void *)(unsigned long)proc->pid, 5648 &proc_fops); 5649 } 5650 5651 if (binder_binderfs_dir_entry_proc && !existing_pid) { 5652 char strbuf[11]; 5653 struct dentry *binderfs_entry; 5654 5655 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5656 /* 5657 * Similar to debugfs, the process specific log file is shared 5658 * between contexts. Only create for the first PID. 5659 * This is ok since same as debugfs, the log file will contain 5660 * information on all contexts of a given PID. 5661 */ 5662 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 5663 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 5664 if (!IS_ERR(binderfs_entry)) { 5665 proc->binderfs_entry = binderfs_entry; 5666 } else { 5667 int error; 5668 5669 error = PTR_ERR(binderfs_entry); 5670 pr_warn("Unable to create file %s in binderfs (error %d)\n", 5671 strbuf, error); 5672 } 5673 } 5674 5675 return 0; 5676 } 5677 5678 static int binder_flush(struct file *filp, fl_owner_t id) 5679 { 5680 struct binder_proc *proc = filp->private_data; 5681 5682 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5683 5684 return 0; 5685 } 5686 5687 static void binder_deferred_flush(struct binder_proc *proc) 5688 { 5689 struct rb_node *n; 5690 int wake_count = 0; 5691 5692 binder_inner_proc_lock(proc); 5693 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5694 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5695 5696 thread->looper_need_return = true; 5697 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5698 wake_up_interruptible(&thread->wait); 5699 wake_count++; 5700 } 5701 } 5702 binder_inner_proc_unlock(proc); 5703 5704 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5705 "binder_flush: %d woke %d threads\n", proc->pid, 5706 wake_count); 5707 } 5708 5709 static int binder_release(struct inode *nodp, struct file *filp) 5710 { 5711 struct binder_proc *proc = filp->private_data; 5712 5713 debugfs_remove(proc->debugfs_entry); 5714 5715 if (proc->binderfs_entry) { 5716 binderfs_remove_file(proc->binderfs_entry); 5717 proc->binderfs_entry = NULL; 5718 } 5719 5720 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5721 5722 return 0; 5723 } 5724 5725 static int binder_node_release(struct binder_node *node, int refs) 5726 { 5727 struct binder_ref *ref; 5728 int death = 0; 5729 struct binder_proc *proc = node->proc; 5730 5731 binder_release_work(proc, &node->async_todo); 5732 5733 binder_node_lock(node); 5734 binder_inner_proc_lock(proc); 5735 binder_dequeue_work_ilocked(&node->work); 5736 /* 5737 * The caller must have taken a temporary ref on the node, 5738 */ 5739 BUG_ON(!node->tmp_refs); 5740 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5741 binder_inner_proc_unlock(proc); 5742 binder_node_unlock(node); 5743 binder_free_node(node); 5744 5745 return refs; 5746 } 5747 5748 node->proc = NULL; 5749 node->local_strong_refs = 0; 5750 node->local_weak_refs = 0; 5751 binder_inner_proc_unlock(proc); 5752 5753 spin_lock(&binder_dead_nodes_lock); 5754 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5755 spin_unlock(&binder_dead_nodes_lock); 5756 5757 hlist_for_each_entry(ref, &node->refs, node_entry) { 5758 refs++; 5759 /* 5760 * Need the node lock to synchronize 5761 * with new notification requests and the 5762 * inner lock to synchronize with queued 5763 * death notifications. 5764 */ 5765 binder_inner_proc_lock(ref->proc); 5766 if (!ref->death) { 5767 binder_inner_proc_unlock(ref->proc); 5768 continue; 5769 } 5770 5771 death++; 5772 5773 BUG_ON(!list_empty(&ref->death->work.entry)); 5774 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5775 binder_enqueue_work_ilocked(&ref->death->work, 5776 &ref->proc->todo); 5777 binder_wakeup_proc_ilocked(ref->proc); 5778 binder_inner_proc_unlock(ref->proc); 5779 } 5780 5781 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5782 "node %d now dead, refs %d, death %d\n", 5783 node->debug_id, refs, death); 5784 binder_node_unlock(node); 5785 binder_put_node(node); 5786 5787 return refs; 5788 } 5789 5790 static void binder_deferred_release(struct binder_proc *proc) 5791 { 5792 struct binder_context *context = proc->context; 5793 struct rb_node *n; 5794 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5795 5796 mutex_lock(&binder_procs_lock); 5797 hlist_del(&proc->proc_node); 5798 mutex_unlock(&binder_procs_lock); 5799 5800 mutex_lock(&context->context_mgr_node_lock); 5801 if (context->binder_context_mgr_node && 5802 context->binder_context_mgr_node->proc == proc) { 5803 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5804 "%s: %d context_mgr_node gone\n", 5805 __func__, proc->pid); 5806 context->binder_context_mgr_node = NULL; 5807 } 5808 mutex_unlock(&context->context_mgr_node_lock); 5809 binder_inner_proc_lock(proc); 5810 /* 5811 * Make sure proc stays alive after we 5812 * remove all the threads 5813 */ 5814 proc->tmp_ref++; 5815 5816 proc->is_dead = true; 5817 proc->is_frozen = false; 5818 proc->sync_recv = false; 5819 proc->async_recv = false; 5820 threads = 0; 5821 active_transactions = 0; 5822 while ((n = rb_first(&proc->threads))) { 5823 struct binder_thread *thread; 5824 5825 thread = rb_entry(n, struct binder_thread, rb_node); 5826 binder_inner_proc_unlock(proc); 5827 threads++; 5828 active_transactions += binder_thread_release(proc, thread); 5829 binder_inner_proc_lock(proc); 5830 } 5831 5832 nodes = 0; 5833 incoming_refs = 0; 5834 while ((n = rb_first(&proc->nodes))) { 5835 struct binder_node *node; 5836 5837 node = rb_entry(n, struct binder_node, rb_node); 5838 nodes++; 5839 /* 5840 * take a temporary ref on the node before 5841 * calling binder_node_release() which will either 5842 * kfree() the node or call binder_put_node() 5843 */ 5844 binder_inc_node_tmpref_ilocked(node); 5845 rb_erase(&node->rb_node, &proc->nodes); 5846 binder_inner_proc_unlock(proc); 5847 incoming_refs = binder_node_release(node, incoming_refs); 5848 binder_inner_proc_lock(proc); 5849 } 5850 binder_inner_proc_unlock(proc); 5851 5852 outgoing_refs = 0; 5853 binder_proc_lock(proc); 5854 while ((n = rb_first(&proc->refs_by_desc))) { 5855 struct binder_ref *ref; 5856 5857 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5858 outgoing_refs++; 5859 binder_cleanup_ref_olocked(ref); 5860 binder_proc_unlock(proc); 5861 binder_free_ref(ref); 5862 binder_proc_lock(proc); 5863 } 5864 binder_proc_unlock(proc); 5865 5866 binder_release_work(proc, &proc->todo); 5867 binder_release_work(proc, &proc->delivered_death); 5868 5869 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5870 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5871 __func__, proc->pid, threads, nodes, incoming_refs, 5872 outgoing_refs, active_transactions); 5873 5874 binder_proc_dec_tmpref(proc); 5875 } 5876 5877 static void binder_deferred_func(struct work_struct *work) 5878 { 5879 struct binder_proc *proc; 5880 5881 int defer; 5882 5883 do { 5884 mutex_lock(&binder_deferred_lock); 5885 if (!hlist_empty(&binder_deferred_list)) { 5886 proc = hlist_entry(binder_deferred_list.first, 5887 struct binder_proc, deferred_work_node); 5888 hlist_del_init(&proc->deferred_work_node); 5889 defer = proc->deferred_work; 5890 proc->deferred_work = 0; 5891 } else { 5892 proc = NULL; 5893 defer = 0; 5894 } 5895 mutex_unlock(&binder_deferred_lock); 5896 5897 if (defer & BINDER_DEFERRED_FLUSH) 5898 binder_deferred_flush(proc); 5899 5900 if (defer & BINDER_DEFERRED_RELEASE) 5901 binder_deferred_release(proc); /* frees proc */ 5902 } while (proc); 5903 } 5904 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5905 5906 static void 5907 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5908 { 5909 mutex_lock(&binder_deferred_lock); 5910 proc->deferred_work |= defer; 5911 if (hlist_unhashed(&proc->deferred_work_node)) { 5912 hlist_add_head(&proc->deferred_work_node, 5913 &binder_deferred_list); 5914 schedule_work(&binder_deferred_work); 5915 } 5916 mutex_unlock(&binder_deferred_lock); 5917 } 5918 5919 static void print_binder_transaction_ilocked(struct seq_file *m, 5920 struct binder_proc *proc, 5921 const char *prefix, 5922 struct binder_transaction *t) 5923 { 5924 struct binder_proc *to_proc; 5925 struct binder_buffer *buffer = t->buffer; 5926 5927 spin_lock(&t->lock); 5928 to_proc = t->to_proc; 5929 seq_printf(m, 5930 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5931 prefix, t->debug_id, t, 5932 t->from ? t->from->proc->pid : 0, 5933 t->from ? t->from->pid : 0, 5934 to_proc ? to_proc->pid : 0, 5935 t->to_thread ? t->to_thread->pid : 0, 5936 t->code, t->flags, t->priority, t->need_reply); 5937 spin_unlock(&t->lock); 5938 5939 if (proc != to_proc) { 5940 /* 5941 * Can only safely deref buffer if we are holding the 5942 * correct proc inner lock for this node 5943 */ 5944 seq_puts(m, "\n"); 5945 return; 5946 } 5947 5948 if (buffer == NULL) { 5949 seq_puts(m, " buffer free\n"); 5950 return; 5951 } 5952 if (buffer->target_node) 5953 seq_printf(m, " node %d", buffer->target_node->debug_id); 5954 seq_printf(m, " size %zd:%zd data %pK\n", 5955 buffer->data_size, buffer->offsets_size, 5956 buffer->user_data); 5957 } 5958 5959 static void print_binder_work_ilocked(struct seq_file *m, 5960 struct binder_proc *proc, 5961 const char *prefix, 5962 const char *transaction_prefix, 5963 struct binder_work *w) 5964 { 5965 struct binder_node *node; 5966 struct binder_transaction *t; 5967 5968 switch (w->type) { 5969 case BINDER_WORK_TRANSACTION: 5970 t = container_of(w, struct binder_transaction, work); 5971 print_binder_transaction_ilocked( 5972 m, proc, transaction_prefix, t); 5973 break; 5974 case BINDER_WORK_RETURN_ERROR: { 5975 struct binder_error *e = container_of( 5976 w, struct binder_error, work); 5977 5978 seq_printf(m, "%stransaction error: %u\n", 5979 prefix, e->cmd); 5980 } break; 5981 case BINDER_WORK_TRANSACTION_COMPLETE: 5982 seq_printf(m, "%stransaction complete\n", prefix); 5983 break; 5984 case BINDER_WORK_NODE: 5985 node = container_of(w, struct binder_node, work); 5986 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5987 prefix, node->debug_id, 5988 (u64)node->ptr, (u64)node->cookie); 5989 break; 5990 case BINDER_WORK_DEAD_BINDER: 5991 seq_printf(m, "%shas dead binder\n", prefix); 5992 break; 5993 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5994 seq_printf(m, "%shas cleared dead binder\n", prefix); 5995 break; 5996 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5997 seq_printf(m, "%shas cleared death notification\n", prefix); 5998 break; 5999 default: 6000 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 6001 break; 6002 } 6003 } 6004 6005 static void print_binder_thread_ilocked(struct seq_file *m, 6006 struct binder_thread *thread, 6007 int print_always) 6008 { 6009 struct binder_transaction *t; 6010 struct binder_work *w; 6011 size_t start_pos = m->count; 6012 size_t header_pos; 6013 6014 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 6015 thread->pid, thread->looper, 6016 thread->looper_need_return, 6017 atomic_read(&thread->tmp_ref)); 6018 header_pos = m->count; 6019 t = thread->transaction_stack; 6020 while (t) { 6021 if (t->from == thread) { 6022 print_binder_transaction_ilocked(m, thread->proc, 6023 " outgoing transaction", t); 6024 t = t->from_parent; 6025 } else if (t->to_thread == thread) { 6026 print_binder_transaction_ilocked(m, thread->proc, 6027 " incoming transaction", t); 6028 t = t->to_parent; 6029 } else { 6030 print_binder_transaction_ilocked(m, thread->proc, 6031 " bad transaction", t); 6032 t = NULL; 6033 } 6034 } 6035 list_for_each_entry(w, &thread->todo, entry) { 6036 print_binder_work_ilocked(m, thread->proc, " ", 6037 " pending transaction", w); 6038 } 6039 if (!print_always && m->count == header_pos) 6040 m->count = start_pos; 6041 } 6042 6043 static void print_binder_node_nilocked(struct seq_file *m, 6044 struct binder_node *node) 6045 { 6046 struct binder_ref *ref; 6047 struct binder_work *w; 6048 int count; 6049 6050 count = 0; 6051 hlist_for_each_entry(ref, &node->refs, node_entry) 6052 count++; 6053 6054 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 6055 node->debug_id, (u64)node->ptr, (u64)node->cookie, 6056 node->has_strong_ref, node->has_weak_ref, 6057 node->local_strong_refs, node->local_weak_refs, 6058 node->internal_strong_refs, count, node->tmp_refs); 6059 if (count) { 6060 seq_puts(m, " proc"); 6061 hlist_for_each_entry(ref, &node->refs, node_entry) 6062 seq_printf(m, " %d", ref->proc->pid); 6063 } 6064 seq_puts(m, "\n"); 6065 if (node->proc) { 6066 list_for_each_entry(w, &node->async_todo, entry) 6067 print_binder_work_ilocked(m, node->proc, " ", 6068 " pending async transaction", w); 6069 } 6070 } 6071 6072 static void print_binder_ref_olocked(struct seq_file *m, 6073 struct binder_ref *ref) 6074 { 6075 binder_node_lock(ref->node); 6076 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 6077 ref->data.debug_id, ref->data.desc, 6078 ref->node->proc ? "" : "dead ", 6079 ref->node->debug_id, ref->data.strong, 6080 ref->data.weak, ref->death); 6081 binder_node_unlock(ref->node); 6082 } 6083 6084 static void print_binder_proc(struct seq_file *m, 6085 struct binder_proc *proc, int print_all) 6086 { 6087 struct binder_work *w; 6088 struct rb_node *n; 6089 size_t start_pos = m->count; 6090 size_t header_pos; 6091 struct binder_node *last_node = NULL; 6092 6093 seq_printf(m, "proc %d\n", proc->pid); 6094 seq_printf(m, "context %s\n", proc->context->name); 6095 header_pos = m->count; 6096 6097 binder_inner_proc_lock(proc); 6098 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6099 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 6100 rb_node), print_all); 6101 6102 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 6103 struct binder_node *node = rb_entry(n, struct binder_node, 6104 rb_node); 6105 if (!print_all && !node->has_async_transaction) 6106 continue; 6107 6108 /* 6109 * take a temporary reference on the node so it 6110 * survives and isn't removed from the tree 6111 * while we print it. 6112 */ 6113 binder_inc_node_tmpref_ilocked(node); 6114 /* Need to drop inner lock to take node lock */ 6115 binder_inner_proc_unlock(proc); 6116 if (last_node) 6117 binder_put_node(last_node); 6118 binder_node_inner_lock(node); 6119 print_binder_node_nilocked(m, node); 6120 binder_node_inner_unlock(node); 6121 last_node = node; 6122 binder_inner_proc_lock(proc); 6123 } 6124 binder_inner_proc_unlock(proc); 6125 if (last_node) 6126 binder_put_node(last_node); 6127 6128 if (print_all) { 6129 binder_proc_lock(proc); 6130 for (n = rb_first(&proc->refs_by_desc); 6131 n != NULL; 6132 n = rb_next(n)) 6133 print_binder_ref_olocked(m, rb_entry(n, 6134 struct binder_ref, 6135 rb_node_desc)); 6136 binder_proc_unlock(proc); 6137 } 6138 binder_alloc_print_allocated(m, &proc->alloc); 6139 binder_inner_proc_lock(proc); 6140 list_for_each_entry(w, &proc->todo, entry) 6141 print_binder_work_ilocked(m, proc, " ", 6142 " pending transaction", w); 6143 list_for_each_entry(w, &proc->delivered_death, entry) { 6144 seq_puts(m, " has delivered dead binder\n"); 6145 break; 6146 } 6147 binder_inner_proc_unlock(proc); 6148 if (!print_all && m->count == header_pos) 6149 m->count = start_pos; 6150 } 6151 6152 static const char * const binder_return_strings[] = { 6153 "BR_ERROR", 6154 "BR_OK", 6155 "BR_TRANSACTION", 6156 "BR_REPLY", 6157 "BR_ACQUIRE_RESULT", 6158 "BR_DEAD_REPLY", 6159 "BR_TRANSACTION_COMPLETE", 6160 "BR_INCREFS", 6161 "BR_ACQUIRE", 6162 "BR_RELEASE", 6163 "BR_DECREFS", 6164 "BR_ATTEMPT_ACQUIRE", 6165 "BR_NOOP", 6166 "BR_SPAWN_LOOPER", 6167 "BR_FINISHED", 6168 "BR_DEAD_BINDER", 6169 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 6170 "BR_FAILED_REPLY", 6171 "BR_FROZEN_REPLY", 6172 "BR_ONEWAY_SPAM_SUSPECT", 6173 }; 6174 6175 static const char * const binder_command_strings[] = { 6176 "BC_TRANSACTION", 6177 "BC_REPLY", 6178 "BC_ACQUIRE_RESULT", 6179 "BC_FREE_BUFFER", 6180 "BC_INCREFS", 6181 "BC_ACQUIRE", 6182 "BC_RELEASE", 6183 "BC_DECREFS", 6184 "BC_INCREFS_DONE", 6185 "BC_ACQUIRE_DONE", 6186 "BC_ATTEMPT_ACQUIRE", 6187 "BC_REGISTER_LOOPER", 6188 "BC_ENTER_LOOPER", 6189 "BC_EXIT_LOOPER", 6190 "BC_REQUEST_DEATH_NOTIFICATION", 6191 "BC_CLEAR_DEATH_NOTIFICATION", 6192 "BC_DEAD_BINDER_DONE", 6193 "BC_TRANSACTION_SG", 6194 "BC_REPLY_SG", 6195 }; 6196 6197 static const char * const binder_objstat_strings[] = { 6198 "proc", 6199 "thread", 6200 "node", 6201 "ref", 6202 "death", 6203 "transaction", 6204 "transaction_complete" 6205 }; 6206 6207 static void print_binder_stats(struct seq_file *m, const char *prefix, 6208 struct binder_stats *stats) 6209 { 6210 int i; 6211 6212 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 6213 ARRAY_SIZE(binder_command_strings)); 6214 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 6215 int temp = atomic_read(&stats->bc[i]); 6216 6217 if (temp) 6218 seq_printf(m, "%s%s: %d\n", prefix, 6219 binder_command_strings[i], temp); 6220 } 6221 6222 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 6223 ARRAY_SIZE(binder_return_strings)); 6224 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 6225 int temp = atomic_read(&stats->br[i]); 6226 6227 if (temp) 6228 seq_printf(m, "%s%s: %d\n", prefix, 6229 binder_return_strings[i], temp); 6230 } 6231 6232 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6233 ARRAY_SIZE(binder_objstat_strings)); 6234 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6235 ARRAY_SIZE(stats->obj_deleted)); 6236 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 6237 int created = atomic_read(&stats->obj_created[i]); 6238 int deleted = atomic_read(&stats->obj_deleted[i]); 6239 6240 if (created || deleted) 6241 seq_printf(m, "%s%s: active %d total %d\n", 6242 prefix, 6243 binder_objstat_strings[i], 6244 created - deleted, 6245 created); 6246 } 6247 } 6248 6249 static void print_binder_proc_stats(struct seq_file *m, 6250 struct binder_proc *proc) 6251 { 6252 struct binder_work *w; 6253 struct binder_thread *thread; 6254 struct rb_node *n; 6255 int count, strong, weak, ready_threads; 6256 size_t free_async_space = 6257 binder_alloc_get_free_async_space(&proc->alloc); 6258 6259 seq_printf(m, "proc %d\n", proc->pid); 6260 seq_printf(m, "context %s\n", proc->context->name); 6261 count = 0; 6262 ready_threads = 0; 6263 binder_inner_proc_lock(proc); 6264 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6265 count++; 6266 6267 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 6268 ready_threads++; 6269 6270 seq_printf(m, " threads: %d\n", count); 6271 seq_printf(m, " requested threads: %d+%d/%d\n" 6272 " ready threads %d\n" 6273 " free async space %zd\n", proc->requested_threads, 6274 proc->requested_threads_started, proc->max_threads, 6275 ready_threads, 6276 free_async_space); 6277 count = 0; 6278 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 6279 count++; 6280 binder_inner_proc_unlock(proc); 6281 seq_printf(m, " nodes: %d\n", count); 6282 count = 0; 6283 strong = 0; 6284 weak = 0; 6285 binder_proc_lock(proc); 6286 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 6287 struct binder_ref *ref = rb_entry(n, struct binder_ref, 6288 rb_node_desc); 6289 count++; 6290 strong += ref->data.strong; 6291 weak += ref->data.weak; 6292 } 6293 binder_proc_unlock(proc); 6294 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 6295 6296 count = binder_alloc_get_allocated_count(&proc->alloc); 6297 seq_printf(m, " buffers: %d\n", count); 6298 6299 binder_alloc_print_pages(m, &proc->alloc); 6300 6301 count = 0; 6302 binder_inner_proc_lock(proc); 6303 list_for_each_entry(w, &proc->todo, entry) { 6304 if (w->type == BINDER_WORK_TRANSACTION) 6305 count++; 6306 } 6307 binder_inner_proc_unlock(proc); 6308 seq_printf(m, " pending transactions: %d\n", count); 6309 6310 print_binder_stats(m, " ", &proc->stats); 6311 } 6312 6313 static int state_show(struct seq_file *m, void *unused) 6314 { 6315 struct binder_proc *proc; 6316 struct binder_node *node; 6317 struct binder_node *last_node = NULL; 6318 6319 seq_puts(m, "binder state:\n"); 6320 6321 spin_lock(&binder_dead_nodes_lock); 6322 if (!hlist_empty(&binder_dead_nodes)) 6323 seq_puts(m, "dead nodes:\n"); 6324 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 6325 /* 6326 * take a temporary reference on the node so it 6327 * survives and isn't removed from the list 6328 * while we print it. 6329 */ 6330 node->tmp_refs++; 6331 spin_unlock(&binder_dead_nodes_lock); 6332 if (last_node) 6333 binder_put_node(last_node); 6334 binder_node_lock(node); 6335 print_binder_node_nilocked(m, node); 6336 binder_node_unlock(node); 6337 last_node = node; 6338 spin_lock(&binder_dead_nodes_lock); 6339 } 6340 spin_unlock(&binder_dead_nodes_lock); 6341 if (last_node) 6342 binder_put_node(last_node); 6343 6344 mutex_lock(&binder_procs_lock); 6345 hlist_for_each_entry(proc, &binder_procs, proc_node) 6346 print_binder_proc(m, proc, 1); 6347 mutex_unlock(&binder_procs_lock); 6348 6349 return 0; 6350 } 6351 6352 static int stats_show(struct seq_file *m, void *unused) 6353 { 6354 struct binder_proc *proc; 6355 6356 seq_puts(m, "binder stats:\n"); 6357 6358 print_binder_stats(m, "", &binder_stats); 6359 6360 mutex_lock(&binder_procs_lock); 6361 hlist_for_each_entry(proc, &binder_procs, proc_node) 6362 print_binder_proc_stats(m, proc); 6363 mutex_unlock(&binder_procs_lock); 6364 6365 return 0; 6366 } 6367 6368 static int transactions_show(struct seq_file *m, void *unused) 6369 { 6370 struct binder_proc *proc; 6371 6372 seq_puts(m, "binder transactions:\n"); 6373 mutex_lock(&binder_procs_lock); 6374 hlist_for_each_entry(proc, &binder_procs, proc_node) 6375 print_binder_proc(m, proc, 0); 6376 mutex_unlock(&binder_procs_lock); 6377 6378 return 0; 6379 } 6380 6381 static int proc_show(struct seq_file *m, void *unused) 6382 { 6383 struct binder_proc *itr; 6384 int pid = (unsigned long)m->private; 6385 6386 mutex_lock(&binder_procs_lock); 6387 hlist_for_each_entry(itr, &binder_procs, proc_node) { 6388 if (itr->pid == pid) { 6389 seq_puts(m, "binder proc state:\n"); 6390 print_binder_proc(m, itr, 1); 6391 } 6392 } 6393 mutex_unlock(&binder_procs_lock); 6394 6395 return 0; 6396 } 6397 6398 static void print_binder_transaction_log_entry(struct seq_file *m, 6399 struct binder_transaction_log_entry *e) 6400 { 6401 int debug_id = READ_ONCE(e->debug_id_done); 6402 /* 6403 * read barrier to guarantee debug_id_done read before 6404 * we print the log values 6405 */ 6406 smp_rmb(); 6407 seq_printf(m, 6408 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 6409 e->debug_id, (e->call_type == 2) ? "reply" : 6410 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 6411 e->from_thread, e->to_proc, e->to_thread, e->context_name, 6412 e->to_node, e->target_handle, e->data_size, e->offsets_size, 6413 e->return_error, e->return_error_param, 6414 e->return_error_line); 6415 /* 6416 * read-barrier to guarantee read of debug_id_done after 6417 * done printing the fields of the entry 6418 */ 6419 smp_rmb(); 6420 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 6421 "\n" : " (incomplete)\n"); 6422 } 6423 6424 static int transaction_log_show(struct seq_file *m, void *unused) 6425 { 6426 struct binder_transaction_log *log = m->private; 6427 unsigned int log_cur = atomic_read(&log->cur); 6428 unsigned int count; 6429 unsigned int cur; 6430 int i; 6431 6432 count = log_cur + 1; 6433 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 6434 0 : count % ARRAY_SIZE(log->entry); 6435 if (count > ARRAY_SIZE(log->entry) || log->full) 6436 count = ARRAY_SIZE(log->entry); 6437 for (i = 0; i < count; i++) { 6438 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 6439 6440 print_binder_transaction_log_entry(m, &log->entry[index]); 6441 } 6442 return 0; 6443 } 6444 6445 const struct file_operations binder_fops = { 6446 .owner = THIS_MODULE, 6447 .poll = binder_poll, 6448 .unlocked_ioctl = binder_ioctl, 6449 .compat_ioctl = compat_ptr_ioctl, 6450 .mmap = binder_mmap, 6451 .open = binder_open, 6452 .flush = binder_flush, 6453 .release = binder_release, 6454 }; 6455 6456 DEFINE_SHOW_ATTRIBUTE(state); 6457 DEFINE_SHOW_ATTRIBUTE(stats); 6458 DEFINE_SHOW_ATTRIBUTE(transactions); 6459 DEFINE_SHOW_ATTRIBUTE(transaction_log); 6460 6461 const struct binder_debugfs_entry binder_debugfs_entries[] = { 6462 { 6463 .name = "state", 6464 .mode = 0444, 6465 .fops = &state_fops, 6466 .data = NULL, 6467 }, 6468 { 6469 .name = "stats", 6470 .mode = 0444, 6471 .fops = &stats_fops, 6472 .data = NULL, 6473 }, 6474 { 6475 .name = "transactions", 6476 .mode = 0444, 6477 .fops = &transactions_fops, 6478 .data = NULL, 6479 }, 6480 { 6481 .name = "transaction_log", 6482 .mode = 0444, 6483 .fops = &transaction_log_fops, 6484 .data = &binder_transaction_log, 6485 }, 6486 { 6487 .name = "failed_transaction_log", 6488 .mode = 0444, 6489 .fops = &transaction_log_fops, 6490 .data = &binder_transaction_log_failed, 6491 }, 6492 {} /* terminator */ 6493 }; 6494 6495 static int __init init_binder_device(const char *name) 6496 { 6497 int ret; 6498 struct binder_device *binder_device; 6499 6500 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6501 if (!binder_device) 6502 return -ENOMEM; 6503 6504 binder_device->miscdev.fops = &binder_fops; 6505 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6506 binder_device->miscdev.name = name; 6507 6508 refcount_set(&binder_device->ref, 1); 6509 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6510 binder_device->context.name = name; 6511 mutex_init(&binder_device->context.context_mgr_node_lock); 6512 6513 ret = misc_register(&binder_device->miscdev); 6514 if (ret < 0) { 6515 kfree(binder_device); 6516 return ret; 6517 } 6518 6519 hlist_add_head(&binder_device->hlist, &binder_devices); 6520 6521 return ret; 6522 } 6523 6524 static int __init binder_init(void) 6525 { 6526 int ret; 6527 char *device_name, *device_tmp; 6528 struct binder_device *device; 6529 struct hlist_node *tmp; 6530 char *device_names = NULL; 6531 6532 ret = binder_alloc_shrinker_init(); 6533 if (ret) 6534 return ret; 6535 6536 atomic_set(&binder_transaction_log.cur, ~0U); 6537 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6538 6539 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6540 if (binder_debugfs_dir_entry_root) { 6541 const struct binder_debugfs_entry *db_entry; 6542 6543 binder_for_each_debugfs_entry(db_entry) 6544 debugfs_create_file(db_entry->name, 6545 db_entry->mode, 6546 binder_debugfs_dir_entry_root, 6547 db_entry->data, 6548 db_entry->fops); 6549 6550 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6551 binder_debugfs_dir_entry_root); 6552 } 6553 6554 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 6555 strcmp(binder_devices_param, "") != 0) { 6556 /* 6557 * Copy the module_parameter string, because we don't want to 6558 * tokenize it in-place. 6559 */ 6560 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6561 if (!device_names) { 6562 ret = -ENOMEM; 6563 goto err_alloc_device_names_failed; 6564 } 6565 6566 device_tmp = device_names; 6567 while ((device_name = strsep(&device_tmp, ","))) { 6568 ret = init_binder_device(device_name); 6569 if (ret) 6570 goto err_init_binder_device_failed; 6571 } 6572 } 6573 6574 ret = init_binderfs(); 6575 if (ret) 6576 goto err_init_binder_device_failed; 6577 6578 return ret; 6579 6580 err_init_binder_device_failed: 6581 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6582 misc_deregister(&device->miscdev); 6583 hlist_del(&device->hlist); 6584 kfree(device); 6585 } 6586 6587 kfree(device_names); 6588 6589 err_alloc_device_names_failed: 6590 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6591 6592 return ret; 6593 } 6594 6595 device_initcall(binder_init); 6596 6597 #define CREATE_TRACE_POINTS 6598 #include "binder_trace.h" 6599 6600 MODULE_LICENSE("GPL v2"); 6601