1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/string.h> 61 #include <linux/uaccess.h> 62 #include <linux/pid_namespace.h> 63 #include <linux/security.h> 64 #include <linux/spinlock.h> 65 #include <linux/ratelimit.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/sizes.h> 69 #include <linux/ktime.h> 70 71 #include <uapi/linux/android/binder.h> 72 73 #include <linux/cacheflush.h> 74 75 #include "binder_internal.h" 76 #include "binder_trace.h" 77 78 static HLIST_HEAD(binder_deferred_list); 79 static DEFINE_MUTEX(binder_deferred_lock); 80 81 static HLIST_HEAD(binder_devices); 82 static HLIST_HEAD(binder_procs); 83 static DEFINE_MUTEX(binder_procs_lock); 84 85 static HLIST_HEAD(binder_dead_nodes); 86 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 87 88 static struct dentry *binder_debugfs_dir_entry_root; 89 static struct dentry *binder_debugfs_dir_entry_proc; 90 static atomic_t binder_last_id; 91 92 static int proc_show(struct seq_file *m, void *unused); 93 DEFINE_SHOW_ATTRIBUTE(proc); 94 95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 96 97 enum { 98 BINDER_DEBUG_USER_ERROR = 1U << 0, 99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 102 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 104 BINDER_DEBUG_READ_WRITE = 1U << 6, 105 BINDER_DEBUG_USER_REFS = 1U << 7, 106 BINDER_DEBUG_THREADS = 1U << 8, 107 BINDER_DEBUG_TRANSACTION = 1U << 9, 108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 109 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 112 BINDER_DEBUG_SPINLOCKS = 1U << 14, 113 }; 114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 116 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 117 118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 119 module_param_named(devices, binder_devices_param, charp, 0444); 120 121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 122 static int binder_stop_on_user_error; 123 124 static int binder_set_stop_on_user_error(const char *val, 125 const struct kernel_param *kp) 126 { 127 int ret; 128 129 ret = param_set_int(val, kp); 130 if (binder_stop_on_user_error < 2) 131 wake_up(&binder_user_error_wait); 132 return ret; 133 } 134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 135 param_get_int, &binder_stop_on_user_error, 0644); 136 137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...) 138 { 139 struct va_format vaf; 140 va_list args; 141 142 if (binder_debug_mask & mask) { 143 va_start(args, format); 144 vaf.va = &args; 145 vaf.fmt = format; 146 pr_info_ratelimited("%pV", &vaf); 147 va_end(args); 148 } 149 } 150 151 #define binder_txn_error(x...) \ 152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) 153 154 static __printf(1, 2) void binder_user_error(const char *format, ...) 155 { 156 struct va_format vaf; 157 va_list args; 158 159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { 160 va_start(args, format); 161 vaf.va = &args; 162 vaf.fmt = format; 163 pr_info_ratelimited("%pV", &vaf); 164 va_end(args); 165 } 166 167 if (binder_stop_on_user_error) 168 binder_stop_on_user_error = 2; 169 } 170 171 #define binder_set_extended_error(ee, _id, _command, _param) \ 172 do { \ 173 (ee)->id = _id; \ 174 (ee)->command = _command; \ 175 (ee)->param = _param; \ 176 } while (0) 177 178 #define to_flat_binder_object(hdr) \ 179 container_of(hdr, struct flat_binder_object, hdr) 180 181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 182 183 #define to_binder_buffer_object(hdr) \ 184 container_of(hdr, struct binder_buffer_object, hdr) 185 186 #define to_binder_fd_array_object(hdr) \ 187 container_of(hdr, struct binder_fd_array_object, hdr) 188 189 static struct binder_stats binder_stats; 190 191 static inline void binder_stats_deleted(enum binder_stat_types type) 192 { 193 atomic_inc(&binder_stats.obj_deleted[type]); 194 } 195 196 static inline void binder_stats_created(enum binder_stat_types type) 197 { 198 atomic_inc(&binder_stats.obj_created[type]); 199 } 200 201 struct binder_transaction_log_entry { 202 int debug_id; 203 int debug_id_done; 204 int call_type; 205 int from_proc; 206 int from_thread; 207 int target_handle; 208 int to_proc; 209 int to_thread; 210 int to_node; 211 int data_size; 212 int offsets_size; 213 int return_error_line; 214 uint32_t return_error; 215 uint32_t return_error_param; 216 char context_name[BINDERFS_MAX_NAME + 1]; 217 }; 218 219 struct binder_transaction_log { 220 atomic_t cur; 221 bool full; 222 struct binder_transaction_log_entry entry[32]; 223 }; 224 225 static struct binder_transaction_log binder_transaction_log; 226 static struct binder_transaction_log binder_transaction_log_failed; 227 228 static struct binder_transaction_log_entry *binder_transaction_log_add( 229 struct binder_transaction_log *log) 230 { 231 struct binder_transaction_log_entry *e; 232 unsigned int cur = atomic_inc_return(&log->cur); 233 234 if (cur >= ARRAY_SIZE(log->entry)) 235 log->full = true; 236 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 237 WRITE_ONCE(e->debug_id_done, 0); 238 /* 239 * write-barrier to synchronize access to e->debug_id_done. 240 * We make sure the initialized 0 value is seen before 241 * memset() other fields are zeroed by memset. 242 */ 243 smp_wmb(); 244 memset(e, 0, sizeof(*e)); 245 return e; 246 } 247 248 enum binder_deferred_state { 249 BINDER_DEFERRED_FLUSH = 0x01, 250 BINDER_DEFERRED_RELEASE = 0x02, 251 }; 252 253 enum { 254 BINDER_LOOPER_STATE_REGISTERED = 0x01, 255 BINDER_LOOPER_STATE_ENTERED = 0x02, 256 BINDER_LOOPER_STATE_EXITED = 0x04, 257 BINDER_LOOPER_STATE_INVALID = 0x08, 258 BINDER_LOOPER_STATE_WAITING = 0x10, 259 BINDER_LOOPER_STATE_POLL = 0x20, 260 }; 261 262 /** 263 * binder_proc_lock() - Acquire outer lock for given binder_proc 264 * @proc: struct binder_proc to acquire 265 * 266 * Acquires proc->outer_lock. Used to protect binder_ref 267 * structures associated with the given proc. 268 */ 269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 270 static void 271 _binder_proc_lock(struct binder_proc *proc, int line) 272 __acquires(&proc->outer_lock) 273 { 274 binder_debug(BINDER_DEBUG_SPINLOCKS, 275 "%s: line=%d\n", __func__, line); 276 spin_lock(&proc->outer_lock); 277 } 278 279 /** 280 * binder_proc_unlock() - Release outer lock for given binder_proc 281 * @proc: struct binder_proc to acquire 282 * 283 * Release lock acquired via binder_proc_lock() 284 */ 285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__) 286 static void 287 _binder_proc_unlock(struct binder_proc *proc, int line) 288 __releases(&proc->outer_lock) 289 { 290 binder_debug(BINDER_DEBUG_SPINLOCKS, 291 "%s: line=%d\n", __func__, line); 292 spin_unlock(&proc->outer_lock); 293 } 294 295 /** 296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 297 * @proc: struct binder_proc to acquire 298 * 299 * Acquires proc->inner_lock. Used to protect todo lists 300 */ 301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 302 static void 303 _binder_inner_proc_lock(struct binder_proc *proc, int line) 304 __acquires(&proc->inner_lock) 305 { 306 binder_debug(BINDER_DEBUG_SPINLOCKS, 307 "%s: line=%d\n", __func__, line); 308 spin_lock(&proc->inner_lock); 309 } 310 311 /** 312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 313 * @proc: struct binder_proc to acquire 314 * 315 * Release lock acquired via binder_inner_proc_lock() 316 */ 317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 318 static void 319 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 320 __releases(&proc->inner_lock) 321 { 322 binder_debug(BINDER_DEBUG_SPINLOCKS, 323 "%s: line=%d\n", __func__, line); 324 spin_unlock(&proc->inner_lock); 325 } 326 327 /** 328 * binder_node_lock() - Acquire spinlock for given binder_node 329 * @node: struct binder_node to acquire 330 * 331 * Acquires node->lock. Used to protect binder_node fields 332 */ 333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 334 static void 335 _binder_node_lock(struct binder_node *node, int line) 336 __acquires(&node->lock) 337 { 338 binder_debug(BINDER_DEBUG_SPINLOCKS, 339 "%s: line=%d\n", __func__, line); 340 spin_lock(&node->lock); 341 } 342 343 /** 344 * binder_node_unlock() - Release spinlock for given binder_proc 345 * @node: struct binder_node to acquire 346 * 347 * Release lock acquired via binder_node_lock() 348 */ 349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 350 static void 351 _binder_node_unlock(struct binder_node *node, int line) 352 __releases(&node->lock) 353 { 354 binder_debug(BINDER_DEBUG_SPINLOCKS, 355 "%s: line=%d\n", __func__, line); 356 spin_unlock(&node->lock); 357 } 358 359 /** 360 * binder_node_inner_lock() - Acquire node and inner locks 361 * @node: struct binder_node to acquire 362 * 363 * Acquires node->lock. If node->proc also acquires 364 * proc->inner_lock. Used to protect binder_node fields 365 */ 366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 367 static void 368 _binder_node_inner_lock(struct binder_node *node, int line) 369 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 370 { 371 binder_debug(BINDER_DEBUG_SPINLOCKS, 372 "%s: line=%d\n", __func__, line); 373 spin_lock(&node->lock); 374 if (node->proc) 375 binder_inner_proc_lock(node->proc); 376 else 377 /* annotation for sparse */ 378 __acquire(&node->proc->inner_lock); 379 } 380 381 /** 382 * binder_node_inner_unlock() - Release node and inner locks 383 * @node: struct binder_node to acquire 384 * 385 * Release lock acquired via binder_node_lock() 386 */ 387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 388 static void 389 _binder_node_inner_unlock(struct binder_node *node, int line) 390 __releases(&node->lock) __releases(&node->proc->inner_lock) 391 { 392 struct binder_proc *proc = node->proc; 393 394 binder_debug(BINDER_DEBUG_SPINLOCKS, 395 "%s: line=%d\n", __func__, line); 396 if (proc) 397 binder_inner_proc_unlock(proc); 398 else 399 /* annotation for sparse */ 400 __release(&node->proc->inner_lock); 401 spin_unlock(&node->lock); 402 } 403 404 static bool binder_worklist_empty_ilocked(struct list_head *list) 405 { 406 return list_empty(list); 407 } 408 409 /** 410 * binder_worklist_empty() - Check if no items on the work list 411 * @proc: binder_proc associated with list 412 * @list: list to check 413 * 414 * Return: true if there are no items on list, else false 415 */ 416 static bool binder_worklist_empty(struct binder_proc *proc, 417 struct list_head *list) 418 { 419 bool ret; 420 421 binder_inner_proc_lock(proc); 422 ret = binder_worklist_empty_ilocked(list); 423 binder_inner_proc_unlock(proc); 424 return ret; 425 } 426 427 /** 428 * binder_enqueue_work_ilocked() - Add an item to the work list 429 * @work: struct binder_work to add to list 430 * @target_list: list to add work to 431 * 432 * Adds the work to the specified list. Asserts that work 433 * is not already on a list. 434 * 435 * Requires the proc->inner_lock to be held. 436 */ 437 static void 438 binder_enqueue_work_ilocked(struct binder_work *work, 439 struct list_head *target_list) 440 { 441 BUG_ON(target_list == NULL); 442 BUG_ON(work->entry.next && !list_empty(&work->entry)); 443 list_add_tail(&work->entry, target_list); 444 } 445 446 /** 447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 448 * @thread: thread to queue work to 449 * @work: struct binder_work to add to list 450 * 451 * Adds the work to the todo list of the thread. Doesn't set the process_todo 452 * flag, which means that (if it wasn't already set) the thread will go to 453 * sleep without handling this work when it calls read. 454 * 455 * Requires the proc->inner_lock to be held. 456 */ 457 static void 458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 459 struct binder_work *work) 460 { 461 WARN_ON(!list_empty(&thread->waiting_thread_node)); 462 binder_enqueue_work_ilocked(work, &thread->todo); 463 } 464 465 /** 466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 467 * @thread: thread to queue work to 468 * @work: struct binder_work to add to list 469 * 470 * Adds the work to the todo list of the thread, and enables processing 471 * of the todo queue. 472 * 473 * Requires the proc->inner_lock to be held. 474 */ 475 static void 476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 477 struct binder_work *work) 478 { 479 WARN_ON(!list_empty(&thread->waiting_thread_node)); 480 binder_enqueue_work_ilocked(work, &thread->todo); 481 482 /* (e)poll-based threads require an explicit wakeup signal when 483 * queuing their own work; they rely on these events to consume 484 * messages without I/O block. Without it, threads risk waiting 485 * indefinitely without handling the work. 486 */ 487 if (thread->looper & BINDER_LOOPER_STATE_POLL && 488 thread->pid == current->pid && !thread->process_todo) 489 wake_up_interruptible_sync(&thread->wait); 490 491 thread->process_todo = true; 492 } 493 494 /** 495 * binder_enqueue_thread_work() - Add an item to the thread work list 496 * @thread: thread to queue work to 497 * @work: struct binder_work to add to list 498 * 499 * Adds the work to the todo list of the thread, and enables processing 500 * of the todo queue. 501 */ 502 static void 503 binder_enqueue_thread_work(struct binder_thread *thread, 504 struct binder_work *work) 505 { 506 binder_inner_proc_lock(thread->proc); 507 binder_enqueue_thread_work_ilocked(thread, work); 508 binder_inner_proc_unlock(thread->proc); 509 } 510 511 static void 512 binder_dequeue_work_ilocked(struct binder_work *work) 513 { 514 list_del_init(&work->entry); 515 } 516 517 /** 518 * binder_dequeue_work() - Removes an item from the work list 519 * @proc: binder_proc associated with list 520 * @work: struct binder_work to remove from list 521 * 522 * Removes the specified work item from whatever list it is on. 523 * Can safely be called if work is not on any list. 524 */ 525 static void 526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 527 { 528 binder_inner_proc_lock(proc); 529 binder_dequeue_work_ilocked(work); 530 binder_inner_proc_unlock(proc); 531 } 532 533 static struct binder_work *binder_dequeue_work_head_ilocked( 534 struct list_head *list) 535 { 536 struct binder_work *w; 537 538 w = list_first_entry_or_null(list, struct binder_work, entry); 539 if (w) 540 list_del_init(&w->entry); 541 return w; 542 } 543 544 static void 545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 546 static void binder_free_thread(struct binder_thread *thread); 547 static void binder_free_proc(struct binder_proc *proc); 548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 549 550 static bool binder_has_work_ilocked(struct binder_thread *thread, 551 bool do_proc_work) 552 { 553 return thread->process_todo || 554 thread->looper_need_return || 555 (do_proc_work && 556 !binder_worklist_empty_ilocked(&thread->proc->todo)); 557 } 558 559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 560 { 561 bool has_work; 562 563 binder_inner_proc_lock(thread->proc); 564 has_work = binder_has_work_ilocked(thread, do_proc_work); 565 binder_inner_proc_unlock(thread->proc); 566 567 return has_work; 568 } 569 570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 571 { 572 return !thread->transaction_stack && 573 binder_worklist_empty_ilocked(&thread->todo); 574 } 575 576 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 577 bool sync) 578 { 579 struct rb_node *n; 580 struct binder_thread *thread; 581 582 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 583 thread = rb_entry(n, struct binder_thread, rb_node); 584 if (thread->looper & BINDER_LOOPER_STATE_POLL && 585 binder_available_for_proc_work_ilocked(thread)) { 586 if (sync) 587 wake_up_interruptible_sync(&thread->wait); 588 else 589 wake_up_interruptible(&thread->wait); 590 } 591 } 592 } 593 594 /** 595 * binder_select_thread_ilocked() - selects a thread for doing proc work. 596 * @proc: process to select a thread from 597 * 598 * Note that calling this function moves the thread off the waiting_threads 599 * list, so it can only be woken up by the caller of this function, or a 600 * signal. Therefore, callers *should* always wake up the thread this function 601 * returns. 602 * 603 * Return: If there's a thread currently waiting for process work, 604 * returns that thread. Otherwise returns NULL. 605 */ 606 static struct binder_thread * 607 binder_select_thread_ilocked(struct binder_proc *proc) 608 { 609 struct binder_thread *thread; 610 611 assert_spin_locked(&proc->inner_lock); 612 thread = list_first_entry_or_null(&proc->waiting_threads, 613 struct binder_thread, 614 waiting_thread_node); 615 616 if (thread) 617 list_del_init(&thread->waiting_thread_node); 618 619 return thread; 620 } 621 622 /** 623 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 624 * @proc: process to wake up a thread in 625 * @thread: specific thread to wake-up (may be NULL) 626 * @sync: whether to do a synchronous wake-up 627 * 628 * This function wakes up a thread in the @proc process. 629 * The caller may provide a specific thread to wake-up in 630 * the @thread parameter. If @thread is NULL, this function 631 * will wake up threads that have called poll(). 632 * 633 * Note that for this function to work as expected, callers 634 * should first call binder_select_thread() to find a thread 635 * to handle the work (if they don't have a thread already), 636 * and pass the result into the @thread parameter. 637 */ 638 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 639 struct binder_thread *thread, 640 bool sync) 641 { 642 assert_spin_locked(&proc->inner_lock); 643 644 if (thread) { 645 if (sync) 646 wake_up_interruptible_sync(&thread->wait); 647 else 648 wake_up_interruptible(&thread->wait); 649 return; 650 } 651 652 /* Didn't find a thread waiting for proc work; this can happen 653 * in two scenarios: 654 * 1. All threads are busy handling transactions 655 * In that case, one of those threads should call back into 656 * the kernel driver soon and pick up this work. 657 * 2. Threads are using the (e)poll interface, in which case 658 * they may be blocked on the waitqueue without having been 659 * added to waiting_threads. For this case, we just iterate 660 * over all threads not handling transaction work, and 661 * wake them all up. We wake all because we don't know whether 662 * a thread that called into (e)poll is handling non-binder 663 * work currently. 664 */ 665 binder_wakeup_poll_threads_ilocked(proc, sync); 666 } 667 668 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 669 { 670 struct binder_thread *thread = binder_select_thread_ilocked(proc); 671 672 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 673 } 674 675 static void binder_set_nice(long nice) 676 { 677 long min_nice; 678 679 if (can_nice(current, nice)) { 680 set_user_nice(current, nice); 681 return; 682 } 683 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 684 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 685 "%d: nice value %ld not allowed use %ld instead\n", 686 current->pid, nice, min_nice); 687 set_user_nice(current, min_nice); 688 if (min_nice <= MAX_NICE) 689 return; 690 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 691 } 692 693 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 694 binder_uintptr_t ptr) 695 { 696 struct rb_node *n = proc->nodes.rb_node; 697 struct binder_node *node; 698 699 assert_spin_locked(&proc->inner_lock); 700 701 while (n) { 702 node = rb_entry(n, struct binder_node, rb_node); 703 704 if (ptr < node->ptr) 705 n = n->rb_left; 706 else if (ptr > node->ptr) 707 n = n->rb_right; 708 else { 709 /* 710 * take an implicit weak reference 711 * to ensure node stays alive until 712 * call to binder_put_node() 713 */ 714 binder_inc_node_tmpref_ilocked(node); 715 return node; 716 } 717 } 718 return NULL; 719 } 720 721 static struct binder_node *binder_get_node(struct binder_proc *proc, 722 binder_uintptr_t ptr) 723 { 724 struct binder_node *node; 725 726 binder_inner_proc_lock(proc); 727 node = binder_get_node_ilocked(proc, ptr); 728 binder_inner_proc_unlock(proc); 729 return node; 730 } 731 732 static struct binder_node *binder_init_node_ilocked( 733 struct binder_proc *proc, 734 struct binder_node *new_node, 735 struct flat_binder_object *fp) 736 { 737 struct rb_node **p = &proc->nodes.rb_node; 738 struct rb_node *parent = NULL; 739 struct binder_node *node; 740 binder_uintptr_t ptr = fp ? fp->binder : 0; 741 binder_uintptr_t cookie = fp ? fp->cookie : 0; 742 __u32 flags = fp ? fp->flags : 0; 743 744 assert_spin_locked(&proc->inner_lock); 745 746 while (*p) { 747 748 parent = *p; 749 node = rb_entry(parent, struct binder_node, rb_node); 750 751 if (ptr < node->ptr) 752 p = &(*p)->rb_left; 753 else if (ptr > node->ptr) 754 p = &(*p)->rb_right; 755 else { 756 /* 757 * A matching node is already in 758 * the rb tree. Abandon the init 759 * and return it. 760 */ 761 binder_inc_node_tmpref_ilocked(node); 762 return node; 763 } 764 } 765 node = new_node; 766 binder_stats_created(BINDER_STAT_NODE); 767 node->tmp_refs++; 768 rb_link_node(&node->rb_node, parent, p); 769 rb_insert_color(&node->rb_node, &proc->nodes); 770 node->debug_id = atomic_inc_return(&binder_last_id); 771 node->proc = proc; 772 node->ptr = ptr; 773 node->cookie = cookie; 774 node->work.type = BINDER_WORK_NODE; 775 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 776 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 777 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 778 spin_lock_init(&node->lock); 779 INIT_LIST_HEAD(&node->work.entry); 780 INIT_LIST_HEAD(&node->async_todo); 781 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 782 "%d:%d node %d u%016llx c%016llx created\n", 783 proc->pid, current->pid, node->debug_id, 784 (u64)node->ptr, (u64)node->cookie); 785 786 return node; 787 } 788 789 static struct binder_node *binder_new_node(struct binder_proc *proc, 790 struct flat_binder_object *fp) 791 { 792 struct binder_node *node; 793 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 794 795 if (!new_node) 796 return NULL; 797 binder_inner_proc_lock(proc); 798 node = binder_init_node_ilocked(proc, new_node, fp); 799 binder_inner_proc_unlock(proc); 800 if (node != new_node) 801 /* 802 * The node was already added by another thread 803 */ 804 kfree(new_node); 805 806 return node; 807 } 808 809 static void binder_free_node(struct binder_node *node) 810 { 811 kfree(node); 812 binder_stats_deleted(BINDER_STAT_NODE); 813 } 814 815 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 816 int internal, 817 struct list_head *target_list) 818 { 819 struct binder_proc *proc = node->proc; 820 821 assert_spin_locked(&node->lock); 822 if (proc) 823 assert_spin_locked(&proc->inner_lock); 824 if (strong) { 825 if (internal) { 826 if (target_list == NULL && 827 node->internal_strong_refs == 0 && 828 !(node->proc && 829 node == node->proc->context->binder_context_mgr_node && 830 node->has_strong_ref)) { 831 pr_err("invalid inc strong node for %d\n", 832 node->debug_id); 833 return -EINVAL; 834 } 835 node->internal_strong_refs++; 836 } else 837 node->local_strong_refs++; 838 if (!node->has_strong_ref && target_list) { 839 struct binder_thread *thread = container_of(target_list, 840 struct binder_thread, todo); 841 binder_dequeue_work_ilocked(&node->work); 842 BUG_ON(&thread->todo != target_list); 843 binder_enqueue_deferred_thread_work_ilocked(thread, 844 &node->work); 845 } 846 } else { 847 if (!internal) 848 node->local_weak_refs++; 849 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 850 if (target_list == NULL) { 851 pr_err("invalid inc weak node for %d\n", 852 node->debug_id); 853 return -EINVAL; 854 } 855 /* 856 * See comment above 857 */ 858 binder_enqueue_work_ilocked(&node->work, target_list); 859 } 860 } 861 return 0; 862 } 863 864 static int binder_inc_node(struct binder_node *node, int strong, int internal, 865 struct list_head *target_list) 866 { 867 int ret; 868 869 binder_node_inner_lock(node); 870 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 871 binder_node_inner_unlock(node); 872 873 return ret; 874 } 875 876 static bool binder_dec_node_nilocked(struct binder_node *node, 877 int strong, int internal) 878 { 879 struct binder_proc *proc = node->proc; 880 881 assert_spin_locked(&node->lock); 882 if (proc) 883 assert_spin_locked(&proc->inner_lock); 884 if (strong) { 885 if (internal) 886 node->internal_strong_refs--; 887 else 888 node->local_strong_refs--; 889 if (node->local_strong_refs || node->internal_strong_refs) 890 return false; 891 } else { 892 if (!internal) 893 node->local_weak_refs--; 894 if (node->local_weak_refs || node->tmp_refs || 895 !hlist_empty(&node->refs)) 896 return false; 897 } 898 899 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 900 if (list_empty(&node->work.entry)) { 901 binder_enqueue_work_ilocked(&node->work, &proc->todo); 902 binder_wakeup_proc_ilocked(proc); 903 } 904 } else { 905 if (hlist_empty(&node->refs) && !node->local_strong_refs && 906 !node->local_weak_refs && !node->tmp_refs) { 907 if (proc) { 908 binder_dequeue_work_ilocked(&node->work); 909 rb_erase(&node->rb_node, &proc->nodes); 910 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 911 "refless node %d deleted\n", 912 node->debug_id); 913 } else { 914 BUG_ON(!list_empty(&node->work.entry)); 915 spin_lock(&binder_dead_nodes_lock); 916 /* 917 * tmp_refs could have changed so 918 * check it again 919 */ 920 if (node->tmp_refs) { 921 spin_unlock(&binder_dead_nodes_lock); 922 return false; 923 } 924 hlist_del(&node->dead_node); 925 spin_unlock(&binder_dead_nodes_lock); 926 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 927 "dead node %d deleted\n", 928 node->debug_id); 929 } 930 return true; 931 } 932 } 933 return false; 934 } 935 936 static void binder_dec_node(struct binder_node *node, int strong, int internal) 937 { 938 bool free_node; 939 940 binder_node_inner_lock(node); 941 free_node = binder_dec_node_nilocked(node, strong, internal); 942 binder_node_inner_unlock(node); 943 if (free_node) 944 binder_free_node(node); 945 } 946 947 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 948 { 949 /* 950 * No call to binder_inc_node() is needed since we 951 * don't need to inform userspace of any changes to 952 * tmp_refs 953 */ 954 node->tmp_refs++; 955 } 956 957 /** 958 * binder_inc_node_tmpref() - take a temporary reference on node 959 * @node: node to reference 960 * 961 * Take reference on node to prevent the node from being freed 962 * while referenced only by a local variable. The inner lock is 963 * needed to serialize with the node work on the queue (which 964 * isn't needed after the node is dead). If the node is dead 965 * (node->proc is NULL), use binder_dead_nodes_lock to protect 966 * node->tmp_refs against dead-node-only cases where the node 967 * lock cannot be acquired (eg traversing the dead node list to 968 * print nodes) 969 */ 970 static void binder_inc_node_tmpref(struct binder_node *node) 971 { 972 binder_node_lock(node); 973 if (node->proc) 974 binder_inner_proc_lock(node->proc); 975 else 976 spin_lock(&binder_dead_nodes_lock); 977 binder_inc_node_tmpref_ilocked(node); 978 if (node->proc) 979 binder_inner_proc_unlock(node->proc); 980 else 981 spin_unlock(&binder_dead_nodes_lock); 982 binder_node_unlock(node); 983 } 984 985 /** 986 * binder_dec_node_tmpref() - remove a temporary reference on node 987 * @node: node to reference 988 * 989 * Release temporary reference on node taken via binder_inc_node_tmpref() 990 */ 991 static void binder_dec_node_tmpref(struct binder_node *node) 992 { 993 bool free_node; 994 995 binder_node_inner_lock(node); 996 if (!node->proc) 997 spin_lock(&binder_dead_nodes_lock); 998 else 999 __acquire(&binder_dead_nodes_lock); 1000 node->tmp_refs--; 1001 BUG_ON(node->tmp_refs < 0); 1002 if (!node->proc) 1003 spin_unlock(&binder_dead_nodes_lock); 1004 else 1005 __release(&binder_dead_nodes_lock); 1006 /* 1007 * Call binder_dec_node() to check if all refcounts are 0 1008 * and cleanup is needed. Calling with strong=0 and internal=1 1009 * causes no actual reference to be released in binder_dec_node(). 1010 * If that changes, a change is needed here too. 1011 */ 1012 free_node = binder_dec_node_nilocked(node, 0, 1); 1013 binder_node_inner_unlock(node); 1014 if (free_node) 1015 binder_free_node(node); 1016 } 1017 1018 static void binder_put_node(struct binder_node *node) 1019 { 1020 binder_dec_node_tmpref(node); 1021 } 1022 1023 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1024 u32 desc, bool need_strong_ref) 1025 { 1026 struct rb_node *n = proc->refs_by_desc.rb_node; 1027 struct binder_ref *ref; 1028 1029 while (n) { 1030 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1031 1032 if (desc < ref->data.desc) { 1033 n = n->rb_left; 1034 } else if (desc > ref->data.desc) { 1035 n = n->rb_right; 1036 } else if (need_strong_ref && !ref->data.strong) { 1037 binder_user_error("tried to use weak ref as strong ref\n"); 1038 return NULL; 1039 } else { 1040 return ref; 1041 } 1042 } 1043 return NULL; 1044 } 1045 1046 /* Find the smallest unused descriptor the "slow way" */ 1047 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset) 1048 { 1049 struct binder_ref *ref; 1050 struct rb_node *n; 1051 u32 desc; 1052 1053 desc = offset; 1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { 1055 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1056 if (ref->data.desc > desc) 1057 break; 1058 desc = ref->data.desc + 1; 1059 } 1060 1061 return desc; 1062 } 1063 1064 /* 1065 * Find an available reference descriptor ID. The proc->outer_lock might 1066 * be released in the process, in which case -EAGAIN is returned and the 1067 * @desc should be considered invalid. 1068 */ 1069 static int get_ref_desc_olocked(struct binder_proc *proc, 1070 struct binder_node *node, 1071 u32 *desc) 1072 { 1073 struct dbitmap *dmap = &proc->dmap; 1074 unsigned int nbits, offset; 1075 unsigned long *new, bit; 1076 1077 /* 0 is reserved for the context manager */ 1078 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1; 1079 1080 if (!dbitmap_enabled(dmap)) { 1081 *desc = slow_desc_lookup_olocked(proc, offset); 1082 return 0; 1083 } 1084 1085 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) { 1086 *desc = bit; 1087 return 0; 1088 } 1089 1090 /* 1091 * The dbitmap is full and needs to grow. The proc->outer_lock 1092 * is briefly released to allocate the new bitmap safely. 1093 */ 1094 nbits = dbitmap_grow_nbits(dmap); 1095 binder_proc_unlock(proc); 1096 new = bitmap_zalloc(nbits, GFP_KERNEL); 1097 binder_proc_lock(proc); 1098 dbitmap_grow(dmap, new, nbits); 1099 1100 return -EAGAIN; 1101 } 1102 1103 /** 1104 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1105 * @proc: binder_proc that owns the ref 1106 * @node: binder_node of target 1107 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1108 * 1109 * Look up the ref for the given node and return it if it exists 1110 * 1111 * If it doesn't exist and the caller provides a newly allocated 1112 * ref, initialize the fields of the newly allocated ref and insert 1113 * into the given proc rb_trees and node refs list. 1114 * 1115 * Return: the ref for node. It is possible that another thread 1116 * allocated/initialized the ref first in which case the 1117 * returned ref would be different than the passed-in 1118 * new_ref. new_ref must be kfree'd by the caller in 1119 * this case. 1120 */ 1121 static struct binder_ref *binder_get_ref_for_node_olocked( 1122 struct binder_proc *proc, 1123 struct binder_node *node, 1124 struct binder_ref *new_ref) 1125 { 1126 struct binder_ref *ref; 1127 struct rb_node *parent; 1128 struct rb_node **p; 1129 u32 desc; 1130 1131 retry: 1132 p = &proc->refs_by_node.rb_node; 1133 parent = NULL; 1134 while (*p) { 1135 parent = *p; 1136 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1137 1138 if (node < ref->node) 1139 p = &(*p)->rb_left; 1140 else if (node > ref->node) 1141 p = &(*p)->rb_right; 1142 else 1143 return ref; 1144 } 1145 if (!new_ref) 1146 return NULL; 1147 1148 /* might release the proc->outer_lock */ 1149 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN) 1150 goto retry; 1151 1152 binder_stats_created(BINDER_STAT_REF); 1153 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1154 new_ref->proc = proc; 1155 new_ref->node = node; 1156 rb_link_node(&new_ref->rb_node_node, parent, p); 1157 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1158 1159 new_ref->data.desc = desc; 1160 p = &proc->refs_by_desc.rb_node; 1161 while (*p) { 1162 parent = *p; 1163 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1164 1165 if (new_ref->data.desc < ref->data.desc) 1166 p = &(*p)->rb_left; 1167 else if (new_ref->data.desc > ref->data.desc) 1168 p = &(*p)->rb_right; 1169 else 1170 BUG(); 1171 } 1172 rb_link_node(&new_ref->rb_node_desc, parent, p); 1173 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1174 1175 binder_node_lock(node); 1176 hlist_add_head(&new_ref->node_entry, &node->refs); 1177 1178 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1179 "%d new ref %d desc %d for node %d\n", 1180 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1181 node->debug_id); 1182 binder_node_unlock(node); 1183 return new_ref; 1184 } 1185 1186 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1187 { 1188 struct dbitmap *dmap = &ref->proc->dmap; 1189 bool delete_node = false; 1190 1191 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1192 "%d delete ref %d desc %d for node %d\n", 1193 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1194 ref->node->debug_id); 1195 1196 if (dbitmap_enabled(dmap)) 1197 dbitmap_clear_bit(dmap, ref->data.desc); 1198 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1199 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1200 1201 binder_node_inner_lock(ref->node); 1202 if (ref->data.strong) 1203 binder_dec_node_nilocked(ref->node, 1, 1); 1204 1205 hlist_del(&ref->node_entry); 1206 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1207 binder_node_inner_unlock(ref->node); 1208 /* 1209 * Clear ref->node unless we want the caller to free the node 1210 */ 1211 if (!delete_node) { 1212 /* 1213 * The caller uses ref->node to determine 1214 * whether the node needs to be freed. Clear 1215 * it since the node is still alive. 1216 */ 1217 ref->node = NULL; 1218 } 1219 1220 if (ref->death) { 1221 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1222 "%d delete ref %d desc %d has death notification\n", 1223 ref->proc->pid, ref->data.debug_id, 1224 ref->data.desc); 1225 binder_dequeue_work(ref->proc, &ref->death->work); 1226 binder_stats_deleted(BINDER_STAT_DEATH); 1227 } 1228 1229 if (ref->freeze) { 1230 binder_dequeue_work(ref->proc, &ref->freeze->work); 1231 binder_stats_deleted(BINDER_STAT_FREEZE); 1232 } 1233 1234 binder_stats_deleted(BINDER_STAT_REF); 1235 } 1236 1237 /** 1238 * binder_inc_ref_olocked() - increment the ref for given handle 1239 * @ref: ref to be incremented 1240 * @strong: if true, strong increment, else weak 1241 * @target_list: list to queue node work on 1242 * 1243 * Increment the ref. @ref->proc->outer_lock must be held on entry 1244 * 1245 * Return: 0, if successful, else errno 1246 */ 1247 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1248 struct list_head *target_list) 1249 { 1250 int ret; 1251 1252 if (strong) { 1253 if (ref->data.strong == 0) { 1254 ret = binder_inc_node(ref->node, 1, 1, target_list); 1255 if (ret) 1256 return ret; 1257 } 1258 ref->data.strong++; 1259 } else { 1260 if (ref->data.weak == 0) { 1261 ret = binder_inc_node(ref->node, 0, 1, target_list); 1262 if (ret) 1263 return ret; 1264 } 1265 ref->data.weak++; 1266 } 1267 return 0; 1268 } 1269 1270 /** 1271 * binder_dec_ref_olocked() - dec the ref for given handle 1272 * @ref: ref to be decremented 1273 * @strong: if true, strong decrement, else weak 1274 * 1275 * Decrement the ref. 1276 * 1277 * Return: %true if ref is cleaned up and ready to be freed. 1278 */ 1279 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1280 { 1281 if (strong) { 1282 if (ref->data.strong == 0) { 1283 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1284 ref->proc->pid, ref->data.debug_id, 1285 ref->data.desc, ref->data.strong, 1286 ref->data.weak); 1287 return false; 1288 } 1289 ref->data.strong--; 1290 if (ref->data.strong == 0) 1291 binder_dec_node(ref->node, strong, 1); 1292 } else { 1293 if (ref->data.weak == 0) { 1294 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1295 ref->proc->pid, ref->data.debug_id, 1296 ref->data.desc, ref->data.strong, 1297 ref->data.weak); 1298 return false; 1299 } 1300 ref->data.weak--; 1301 } 1302 if (ref->data.strong == 0 && ref->data.weak == 0) { 1303 binder_cleanup_ref_olocked(ref); 1304 return true; 1305 } 1306 return false; 1307 } 1308 1309 /** 1310 * binder_get_node_from_ref() - get the node from the given proc/desc 1311 * @proc: proc containing the ref 1312 * @desc: the handle associated with the ref 1313 * @need_strong_ref: if true, only return node if ref is strong 1314 * @rdata: the id/refcount data for the ref 1315 * 1316 * Given a proc and ref handle, return the associated binder_node 1317 * 1318 * Return: a binder_node or NULL if not found or not strong when strong required 1319 */ 1320 static struct binder_node *binder_get_node_from_ref( 1321 struct binder_proc *proc, 1322 u32 desc, bool need_strong_ref, 1323 struct binder_ref_data *rdata) 1324 { 1325 struct binder_node *node; 1326 struct binder_ref *ref; 1327 1328 binder_proc_lock(proc); 1329 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1330 if (!ref) 1331 goto err_no_ref; 1332 node = ref->node; 1333 /* 1334 * Take an implicit reference on the node to ensure 1335 * it stays alive until the call to binder_put_node() 1336 */ 1337 binder_inc_node_tmpref(node); 1338 if (rdata) 1339 *rdata = ref->data; 1340 binder_proc_unlock(proc); 1341 1342 return node; 1343 1344 err_no_ref: 1345 binder_proc_unlock(proc); 1346 return NULL; 1347 } 1348 1349 /** 1350 * binder_free_ref() - free the binder_ref 1351 * @ref: ref to free 1352 * 1353 * Free the binder_ref. Free the binder_node indicated by ref->node 1354 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1355 */ 1356 static void binder_free_ref(struct binder_ref *ref) 1357 { 1358 if (ref->node) 1359 binder_free_node(ref->node); 1360 kfree(ref->death); 1361 kfree(ref->freeze); 1362 kfree(ref); 1363 } 1364 1365 /* shrink descriptor bitmap if needed */ 1366 static void try_shrink_dmap(struct binder_proc *proc) 1367 { 1368 unsigned long *new; 1369 int nbits; 1370 1371 binder_proc_lock(proc); 1372 nbits = dbitmap_shrink_nbits(&proc->dmap); 1373 binder_proc_unlock(proc); 1374 1375 if (!nbits) 1376 return; 1377 1378 new = bitmap_zalloc(nbits, GFP_KERNEL); 1379 binder_proc_lock(proc); 1380 dbitmap_shrink(&proc->dmap, new, nbits); 1381 binder_proc_unlock(proc); 1382 } 1383 1384 /** 1385 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1386 * @proc: proc containing the ref 1387 * @desc: the handle associated with the ref 1388 * @increment: true=inc reference, false=dec reference 1389 * @strong: true=strong reference, false=weak reference 1390 * @rdata: the id/refcount data for the ref 1391 * 1392 * Given a proc and ref handle, increment or decrement the ref 1393 * according to "increment" arg. 1394 * 1395 * Return: 0 if successful, else errno 1396 */ 1397 static int binder_update_ref_for_handle(struct binder_proc *proc, 1398 uint32_t desc, bool increment, bool strong, 1399 struct binder_ref_data *rdata) 1400 { 1401 int ret = 0; 1402 struct binder_ref *ref; 1403 bool delete_ref = false; 1404 1405 binder_proc_lock(proc); 1406 ref = binder_get_ref_olocked(proc, desc, strong); 1407 if (!ref) { 1408 ret = -EINVAL; 1409 goto err_no_ref; 1410 } 1411 if (increment) 1412 ret = binder_inc_ref_olocked(ref, strong, NULL); 1413 else 1414 delete_ref = binder_dec_ref_olocked(ref, strong); 1415 1416 if (rdata) 1417 *rdata = ref->data; 1418 binder_proc_unlock(proc); 1419 1420 if (delete_ref) { 1421 binder_free_ref(ref); 1422 try_shrink_dmap(proc); 1423 } 1424 return ret; 1425 1426 err_no_ref: 1427 binder_proc_unlock(proc); 1428 return ret; 1429 } 1430 1431 /** 1432 * binder_dec_ref_for_handle() - dec the ref for given handle 1433 * @proc: proc containing the ref 1434 * @desc: the handle associated with the ref 1435 * @strong: true=strong reference, false=weak reference 1436 * @rdata: the id/refcount data for the ref 1437 * 1438 * Just calls binder_update_ref_for_handle() to decrement the ref. 1439 * 1440 * Return: 0 if successful, else errno 1441 */ 1442 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1443 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1444 { 1445 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1446 } 1447 1448 1449 /** 1450 * binder_inc_ref_for_node() - increment the ref for given proc/node 1451 * @proc: proc containing the ref 1452 * @node: target node 1453 * @strong: true=strong reference, false=weak reference 1454 * @target_list: worklist to use if node is incremented 1455 * @rdata: the id/refcount data for the ref 1456 * 1457 * Given a proc and node, increment the ref. Create the ref if it 1458 * doesn't already exist 1459 * 1460 * Return: 0 if successful, else errno 1461 */ 1462 static int binder_inc_ref_for_node(struct binder_proc *proc, 1463 struct binder_node *node, 1464 bool strong, 1465 struct list_head *target_list, 1466 struct binder_ref_data *rdata) 1467 { 1468 struct binder_ref *ref; 1469 struct binder_ref *new_ref = NULL; 1470 int ret = 0; 1471 1472 binder_proc_lock(proc); 1473 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1474 if (!ref) { 1475 binder_proc_unlock(proc); 1476 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1477 if (!new_ref) 1478 return -ENOMEM; 1479 binder_proc_lock(proc); 1480 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1481 } 1482 ret = binder_inc_ref_olocked(ref, strong, target_list); 1483 *rdata = ref->data; 1484 if (ret && ref == new_ref) { 1485 /* 1486 * Cleanup the failed reference here as the target 1487 * could now be dead and have already released its 1488 * references by now. Calling on the new reference 1489 * with strong=0 and a tmp_refs will not decrement 1490 * the node. The new_ref gets kfree'd below. 1491 */ 1492 binder_cleanup_ref_olocked(new_ref); 1493 ref = NULL; 1494 } 1495 1496 binder_proc_unlock(proc); 1497 if (new_ref && ref != new_ref) 1498 /* 1499 * Another thread created the ref first so 1500 * free the one we allocated 1501 */ 1502 kfree(new_ref); 1503 return ret; 1504 } 1505 1506 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1507 struct binder_transaction *t) 1508 { 1509 BUG_ON(!target_thread); 1510 assert_spin_locked(&target_thread->proc->inner_lock); 1511 BUG_ON(target_thread->transaction_stack != t); 1512 BUG_ON(target_thread->transaction_stack->from != target_thread); 1513 target_thread->transaction_stack = 1514 target_thread->transaction_stack->from_parent; 1515 t->from = NULL; 1516 } 1517 1518 /** 1519 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1520 * @thread: thread to decrement 1521 * 1522 * A thread needs to be kept alive while being used to create or 1523 * handle a transaction. binder_get_txn_from() is used to safely 1524 * extract t->from from a binder_transaction and keep the thread 1525 * indicated by t->from from being freed. When done with that 1526 * binder_thread, this function is called to decrement the 1527 * tmp_ref and free if appropriate (thread has been released 1528 * and no transaction being processed by the driver) 1529 */ 1530 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1531 { 1532 /* 1533 * atomic is used to protect the counter value while 1534 * it cannot reach zero or thread->is_dead is false 1535 */ 1536 binder_inner_proc_lock(thread->proc); 1537 atomic_dec(&thread->tmp_ref); 1538 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1539 binder_inner_proc_unlock(thread->proc); 1540 binder_free_thread(thread); 1541 return; 1542 } 1543 binder_inner_proc_unlock(thread->proc); 1544 } 1545 1546 /** 1547 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1548 * @proc: proc to decrement 1549 * 1550 * A binder_proc needs to be kept alive while being used to create or 1551 * handle a transaction. proc->tmp_ref is incremented when 1552 * creating a new transaction or the binder_proc is currently in-use 1553 * by threads that are being released. When done with the binder_proc, 1554 * this function is called to decrement the counter and free the 1555 * proc if appropriate (proc has been released, all threads have 1556 * been released and not currently in-use to process a transaction). 1557 */ 1558 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1559 { 1560 binder_inner_proc_lock(proc); 1561 proc->tmp_ref--; 1562 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1563 !proc->tmp_ref) { 1564 binder_inner_proc_unlock(proc); 1565 binder_free_proc(proc); 1566 return; 1567 } 1568 binder_inner_proc_unlock(proc); 1569 } 1570 1571 /** 1572 * binder_get_txn_from() - safely extract the "from" thread in transaction 1573 * @t: binder transaction for t->from 1574 * 1575 * Atomically return the "from" thread and increment the tmp_ref 1576 * count for the thread to ensure it stays alive until 1577 * binder_thread_dec_tmpref() is called. 1578 * 1579 * Return: the value of t->from 1580 */ 1581 static struct binder_thread *binder_get_txn_from( 1582 struct binder_transaction *t) 1583 { 1584 struct binder_thread *from; 1585 1586 spin_lock(&t->lock); 1587 from = t->from; 1588 if (from) 1589 atomic_inc(&from->tmp_ref); 1590 spin_unlock(&t->lock); 1591 return from; 1592 } 1593 1594 /** 1595 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1596 * @t: binder transaction for t->from 1597 * 1598 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1599 * to guarantee that the thread cannot be released while operating on it. 1600 * The caller must call binder_inner_proc_unlock() to release the inner lock 1601 * as well as call binder_dec_thread_txn() to release the reference. 1602 * 1603 * Return: the value of t->from 1604 */ 1605 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1606 struct binder_transaction *t) 1607 __acquires(&t->from->proc->inner_lock) 1608 { 1609 struct binder_thread *from; 1610 1611 from = binder_get_txn_from(t); 1612 if (!from) { 1613 __acquire(&from->proc->inner_lock); 1614 return NULL; 1615 } 1616 binder_inner_proc_lock(from->proc); 1617 if (t->from) { 1618 BUG_ON(from != t->from); 1619 return from; 1620 } 1621 binder_inner_proc_unlock(from->proc); 1622 __acquire(&from->proc->inner_lock); 1623 binder_thread_dec_tmpref(from); 1624 return NULL; 1625 } 1626 1627 /** 1628 * binder_free_txn_fixups() - free unprocessed fd fixups 1629 * @t: binder transaction for t->from 1630 * 1631 * If the transaction is being torn down prior to being 1632 * processed by the target process, free all of the 1633 * fd fixups and fput the file structs. It is safe to 1634 * call this function after the fixups have been 1635 * processed -- in that case, the list will be empty. 1636 */ 1637 static void binder_free_txn_fixups(struct binder_transaction *t) 1638 { 1639 struct binder_txn_fd_fixup *fixup, *tmp; 1640 1641 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1642 fput(fixup->file); 1643 if (fixup->target_fd >= 0) 1644 put_unused_fd(fixup->target_fd); 1645 list_del(&fixup->fixup_entry); 1646 kfree(fixup); 1647 } 1648 } 1649 1650 static void binder_txn_latency_free(struct binder_transaction *t) 1651 { 1652 int from_proc, from_thread, to_proc, to_thread; 1653 1654 spin_lock(&t->lock); 1655 from_proc = t->from ? t->from->proc->pid : 0; 1656 from_thread = t->from ? t->from->pid : 0; 1657 to_proc = t->to_proc ? t->to_proc->pid : 0; 1658 to_thread = t->to_thread ? t->to_thread->pid : 0; 1659 spin_unlock(&t->lock); 1660 1661 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); 1662 } 1663 1664 static void binder_free_transaction(struct binder_transaction *t) 1665 { 1666 struct binder_proc *target_proc = t->to_proc; 1667 1668 if (target_proc) { 1669 binder_inner_proc_lock(target_proc); 1670 target_proc->outstanding_txns--; 1671 if (target_proc->outstanding_txns < 0) 1672 pr_warn("%s: Unexpected outstanding_txns %d\n", 1673 __func__, target_proc->outstanding_txns); 1674 if (!target_proc->outstanding_txns && target_proc->is_frozen) 1675 wake_up_interruptible_all(&target_proc->freeze_wait); 1676 if (t->buffer) 1677 t->buffer->transaction = NULL; 1678 binder_inner_proc_unlock(target_proc); 1679 } 1680 if (trace_binder_txn_latency_free_enabled()) 1681 binder_txn_latency_free(t); 1682 /* 1683 * If the transaction has no target_proc, then 1684 * t->buffer->transaction has already been cleared. 1685 */ 1686 binder_free_txn_fixups(t); 1687 kfree(t); 1688 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1689 } 1690 1691 static void binder_send_failed_reply(struct binder_transaction *t, 1692 uint32_t error_code) 1693 { 1694 struct binder_thread *target_thread; 1695 struct binder_transaction *next; 1696 1697 BUG_ON(t->flags & TF_ONE_WAY); 1698 while (1) { 1699 target_thread = binder_get_txn_from_and_acq_inner(t); 1700 if (target_thread) { 1701 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1702 "send failed reply for transaction %d to %d:%d\n", 1703 t->debug_id, 1704 target_thread->proc->pid, 1705 target_thread->pid); 1706 1707 binder_pop_transaction_ilocked(target_thread, t); 1708 if (target_thread->reply_error.cmd == BR_OK) { 1709 target_thread->reply_error.cmd = error_code; 1710 binder_enqueue_thread_work_ilocked( 1711 target_thread, 1712 &target_thread->reply_error.work); 1713 wake_up_interruptible(&target_thread->wait); 1714 } else { 1715 /* 1716 * Cannot get here for normal operation, but 1717 * we can if multiple synchronous transactions 1718 * are sent without blocking for responses. 1719 * Just ignore the 2nd error in this case. 1720 */ 1721 pr_warn("Unexpected reply error: %u\n", 1722 target_thread->reply_error.cmd); 1723 } 1724 binder_inner_proc_unlock(target_thread->proc); 1725 binder_thread_dec_tmpref(target_thread); 1726 binder_free_transaction(t); 1727 return; 1728 } 1729 __release(&target_thread->proc->inner_lock); 1730 next = t->from_parent; 1731 1732 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1733 "send failed reply for transaction %d, target dead\n", 1734 t->debug_id); 1735 1736 binder_free_transaction(t); 1737 if (next == NULL) { 1738 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1739 "reply failed, no target thread at root\n"); 1740 return; 1741 } 1742 t = next; 1743 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1744 "reply failed, no target thread -- retry %d\n", 1745 t->debug_id); 1746 } 1747 } 1748 1749 /** 1750 * binder_cleanup_transaction() - cleans up undelivered transaction 1751 * @t: transaction that needs to be cleaned up 1752 * @reason: reason the transaction wasn't delivered 1753 * @error_code: error to return to caller (if synchronous call) 1754 */ 1755 static void binder_cleanup_transaction(struct binder_transaction *t, 1756 const char *reason, 1757 uint32_t error_code) 1758 { 1759 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 1760 binder_send_failed_reply(t, error_code); 1761 } else { 1762 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 1763 "undelivered transaction %d, %s\n", 1764 t->debug_id, reason); 1765 binder_free_transaction(t); 1766 } 1767 } 1768 1769 /** 1770 * binder_get_object() - gets object and checks for valid metadata 1771 * @proc: binder_proc owning the buffer 1772 * @u: sender's user pointer to base of buffer 1773 * @buffer: binder_buffer that we're parsing. 1774 * @offset: offset in the @buffer at which to validate an object. 1775 * @object: struct binder_object to read into 1776 * 1777 * Copy the binder object at the given offset into @object. If @u is 1778 * provided then the copy is from the sender's buffer. If not, then 1779 * it is copied from the target's @buffer. 1780 * 1781 * Return: If there's a valid metadata object at @offset, the 1782 * size of that object. Otherwise, it returns zero. The object 1783 * is read into the struct binder_object pointed to by @object. 1784 */ 1785 static size_t binder_get_object(struct binder_proc *proc, 1786 const void __user *u, 1787 struct binder_buffer *buffer, 1788 unsigned long offset, 1789 struct binder_object *object) 1790 { 1791 size_t read_size; 1792 struct binder_object_header *hdr; 1793 size_t object_size = 0; 1794 1795 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 1796 if (offset > buffer->data_size || read_size < sizeof(*hdr) || 1797 !IS_ALIGNED(offset, sizeof(u32))) 1798 return 0; 1799 1800 if (u) { 1801 if (copy_from_user(object, u + offset, read_size)) 1802 return 0; 1803 } else { 1804 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 1805 offset, read_size)) 1806 return 0; 1807 } 1808 1809 /* Ok, now see if we read a complete object. */ 1810 hdr = &object->hdr; 1811 switch (hdr->type) { 1812 case BINDER_TYPE_BINDER: 1813 case BINDER_TYPE_WEAK_BINDER: 1814 case BINDER_TYPE_HANDLE: 1815 case BINDER_TYPE_WEAK_HANDLE: 1816 object_size = sizeof(struct flat_binder_object); 1817 break; 1818 case BINDER_TYPE_FD: 1819 object_size = sizeof(struct binder_fd_object); 1820 break; 1821 case BINDER_TYPE_PTR: 1822 object_size = sizeof(struct binder_buffer_object); 1823 break; 1824 case BINDER_TYPE_FDA: 1825 object_size = sizeof(struct binder_fd_array_object); 1826 break; 1827 default: 1828 return 0; 1829 } 1830 if (offset <= buffer->data_size - object_size && 1831 buffer->data_size >= object_size) 1832 return object_size; 1833 else 1834 return 0; 1835 } 1836 1837 /** 1838 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1839 * @proc: binder_proc owning the buffer 1840 * @b: binder_buffer containing the object 1841 * @object: struct binder_object to read into 1842 * @index: index in offset array at which the binder_buffer_object is 1843 * located 1844 * @start_offset: points to the start of the offset array 1845 * @object_offsetp: offset of @object read from @b 1846 * @num_valid: the number of valid offsets in the offset array 1847 * 1848 * Return: If @index is within the valid range of the offset array 1849 * described by @start and @num_valid, and if there's a valid 1850 * binder_buffer_object at the offset found in index @index 1851 * of the offset array, that object is returned. Otherwise, 1852 * %NULL is returned. 1853 * Note that the offset found in index @index itself is not 1854 * verified; this function assumes that @num_valid elements 1855 * from @start were previously verified to have valid offsets. 1856 * If @object_offsetp is non-NULL, then the offset within 1857 * @b is written to it. 1858 */ 1859 static struct binder_buffer_object *binder_validate_ptr( 1860 struct binder_proc *proc, 1861 struct binder_buffer *b, 1862 struct binder_object *object, 1863 binder_size_t index, 1864 binder_size_t start_offset, 1865 binder_size_t *object_offsetp, 1866 binder_size_t num_valid) 1867 { 1868 size_t object_size; 1869 binder_size_t object_offset; 1870 unsigned long buffer_offset; 1871 1872 if (index >= num_valid) 1873 return NULL; 1874 1875 buffer_offset = start_offset + sizeof(binder_size_t) * index; 1876 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1877 b, buffer_offset, 1878 sizeof(object_offset))) 1879 return NULL; 1880 object_size = binder_get_object(proc, NULL, b, object_offset, object); 1881 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 1882 return NULL; 1883 if (object_offsetp) 1884 *object_offsetp = object_offset; 1885 1886 return &object->bbo; 1887 } 1888 1889 /** 1890 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1891 * @proc: binder_proc owning the buffer 1892 * @b: transaction buffer 1893 * @objects_start_offset: offset to start of objects buffer 1894 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 1895 * @fixup_offset: start offset in @buffer to fix up 1896 * @last_obj_offset: offset to last binder_buffer_object that we fixed 1897 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 1898 * 1899 * Return: %true if a fixup in buffer @buffer at offset @offset is 1900 * allowed. 1901 * 1902 * For safety reasons, we only allow fixups inside a buffer to happen 1903 * at increasing offsets; additionally, we only allow fixup on the last 1904 * buffer object that was verified, or one of its parents. 1905 * 1906 * Example of what is allowed: 1907 * 1908 * A 1909 * B (parent = A, offset = 0) 1910 * C (parent = A, offset = 16) 1911 * D (parent = C, offset = 0) 1912 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1913 * 1914 * Examples of what is not allowed: 1915 * 1916 * Decreasing offsets within the same parent: 1917 * A 1918 * C (parent = A, offset = 16) 1919 * B (parent = A, offset = 0) // decreasing offset within A 1920 * 1921 * Referring to a parent that wasn't the last object or any of its parents: 1922 * A 1923 * B (parent = A, offset = 0) 1924 * C (parent = A, offset = 0) 1925 * C (parent = A, offset = 16) 1926 * D (parent = B, offset = 0) // B is not A or any of A's parents 1927 */ 1928 static bool binder_validate_fixup(struct binder_proc *proc, 1929 struct binder_buffer *b, 1930 binder_size_t objects_start_offset, 1931 binder_size_t buffer_obj_offset, 1932 binder_size_t fixup_offset, 1933 binder_size_t last_obj_offset, 1934 binder_size_t last_min_offset) 1935 { 1936 if (!last_obj_offset) { 1937 /* Nothing to fix up in */ 1938 return false; 1939 } 1940 1941 while (last_obj_offset != buffer_obj_offset) { 1942 unsigned long buffer_offset; 1943 struct binder_object last_object; 1944 struct binder_buffer_object *last_bbo; 1945 size_t object_size = binder_get_object(proc, NULL, b, 1946 last_obj_offset, 1947 &last_object); 1948 if (object_size != sizeof(*last_bbo)) 1949 return false; 1950 1951 last_bbo = &last_object.bbo; 1952 /* 1953 * Safe to retrieve the parent of last_obj, since it 1954 * was already previously verified by the driver. 1955 */ 1956 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1957 return false; 1958 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 1959 buffer_offset = objects_start_offset + 1960 sizeof(binder_size_t) * last_bbo->parent; 1961 if (binder_alloc_copy_from_buffer(&proc->alloc, 1962 &last_obj_offset, 1963 b, buffer_offset, 1964 sizeof(last_obj_offset))) 1965 return false; 1966 } 1967 return (fixup_offset >= last_min_offset); 1968 } 1969 1970 /** 1971 * struct binder_task_work_cb - for deferred close 1972 * 1973 * @twork: callback_head for task work 1974 * @fd: fd to close 1975 * 1976 * Structure to pass task work to be handled after 1977 * returning from binder_ioctl() via task_work_add(). 1978 */ 1979 struct binder_task_work_cb { 1980 struct callback_head twork; 1981 struct file *file; 1982 }; 1983 1984 /** 1985 * binder_do_fd_close() - close list of file descriptors 1986 * @twork: callback head for task work 1987 * 1988 * It is not safe to call ksys_close() during the binder_ioctl() 1989 * function if there is a chance that binder's own file descriptor 1990 * might be closed. This is to meet the requirements for using 1991 * fdget() (see comments for __fget_light()). Therefore use 1992 * task_work_add() to schedule the close operation once we have 1993 * returned from binder_ioctl(). This function is a callback 1994 * for that mechanism and does the actual ksys_close() on the 1995 * given file descriptor. 1996 */ 1997 static void binder_do_fd_close(struct callback_head *twork) 1998 { 1999 struct binder_task_work_cb *twcb = container_of(twork, 2000 struct binder_task_work_cb, twork); 2001 2002 fput(twcb->file); 2003 kfree(twcb); 2004 } 2005 2006 /** 2007 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 2008 * @fd: file-descriptor to close 2009 * 2010 * See comments in binder_do_fd_close(). This function is used to schedule 2011 * a file-descriptor to be closed after returning from binder_ioctl(). 2012 */ 2013 static void binder_deferred_fd_close(int fd) 2014 { 2015 struct binder_task_work_cb *twcb; 2016 2017 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 2018 if (!twcb) 2019 return; 2020 init_task_work(&twcb->twork, binder_do_fd_close); 2021 twcb->file = file_close_fd(fd); 2022 if (twcb->file) { 2023 // pin it until binder_do_fd_close(); see comments there 2024 get_file(twcb->file); 2025 filp_close(twcb->file, current->files); 2026 task_work_add(current, &twcb->twork, TWA_RESUME); 2027 } else { 2028 kfree(twcb); 2029 } 2030 } 2031 2032 static void binder_transaction_buffer_release(struct binder_proc *proc, 2033 struct binder_thread *thread, 2034 struct binder_buffer *buffer, 2035 binder_size_t off_end_offset, 2036 bool is_failure) 2037 { 2038 int debug_id = buffer->debug_id; 2039 binder_size_t off_start_offset, buffer_offset; 2040 2041 binder_debug(BINDER_DEBUG_TRANSACTION, 2042 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 2043 proc->pid, buffer->debug_id, 2044 buffer->data_size, buffer->offsets_size, 2045 (unsigned long long)off_end_offset); 2046 2047 if (buffer->target_node) 2048 binder_dec_node(buffer->target_node, 1, 0); 2049 2050 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 2051 2052 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 2053 buffer_offset += sizeof(binder_size_t)) { 2054 struct binder_object_header *hdr; 2055 size_t object_size = 0; 2056 struct binder_object object; 2057 binder_size_t object_offset; 2058 2059 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2060 buffer, buffer_offset, 2061 sizeof(object_offset))) 2062 object_size = binder_get_object(proc, NULL, buffer, 2063 object_offset, &object); 2064 if (object_size == 0) { 2065 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2066 debug_id, (u64)object_offset, buffer->data_size); 2067 continue; 2068 } 2069 hdr = &object.hdr; 2070 switch (hdr->type) { 2071 case BINDER_TYPE_BINDER: 2072 case BINDER_TYPE_WEAK_BINDER: { 2073 struct flat_binder_object *fp; 2074 struct binder_node *node; 2075 2076 fp = to_flat_binder_object(hdr); 2077 node = binder_get_node(proc, fp->binder); 2078 if (node == NULL) { 2079 pr_err("transaction release %d bad node %016llx\n", 2080 debug_id, (u64)fp->binder); 2081 break; 2082 } 2083 binder_debug(BINDER_DEBUG_TRANSACTION, 2084 " node %d u%016llx\n", 2085 node->debug_id, (u64)node->ptr); 2086 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2087 0); 2088 binder_put_node(node); 2089 } break; 2090 case BINDER_TYPE_HANDLE: 2091 case BINDER_TYPE_WEAK_HANDLE: { 2092 struct flat_binder_object *fp; 2093 struct binder_ref_data rdata; 2094 int ret; 2095 2096 fp = to_flat_binder_object(hdr); 2097 ret = binder_dec_ref_for_handle(proc, fp->handle, 2098 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2099 2100 if (ret) { 2101 pr_err("transaction release %d bad handle %d, ret = %d\n", 2102 debug_id, fp->handle, ret); 2103 break; 2104 } 2105 binder_debug(BINDER_DEBUG_TRANSACTION, 2106 " ref %d desc %d\n", 2107 rdata.debug_id, rdata.desc); 2108 } break; 2109 2110 case BINDER_TYPE_FD: { 2111 /* 2112 * No need to close the file here since user-space 2113 * closes it for successfully delivered 2114 * transactions. For transactions that weren't 2115 * delivered, the new fd was never allocated so 2116 * there is no need to close and the fput on the 2117 * file is done when the transaction is torn 2118 * down. 2119 */ 2120 } break; 2121 case BINDER_TYPE_PTR: 2122 /* 2123 * Nothing to do here, this will get cleaned up when the 2124 * transaction buffer gets freed 2125 */ 2126 break; 2127 case BINDER_TYPE_FDA: { 2128 struct binder_fd_array_object *fda; 2129 struct binder_buffer_object *parent; 2130 struct binder_object ptr_object; 2131 binder_size_t fda_offset; 2132 size_t fd_index; 2133 binder_size_t fd_buf_size; 2134 binder_size_t num_valid; 2135 2136 if (is_failure) { 2137 /* 2138 * The fd fixups have not been applied so no 2139 * fds need to be closed. 2140 */ 2141 continue; 2142 } 2143 2144 num_valid = (buffer_offset - off_start_offset) / 2145 sizeof(binder_size_t); 2146 fda = to_binder_fd_array_object(hdr); 2147 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2148 fda->parent, 2149 off_start_offset, 2150 NULL, 2151 num_valid); 2152 if (!parent) { 2153 pr_err("transaction release %d bad parent offset\n", 2154 debug_id); 2155 continue; 2156 } 2157 fd_buf_size = sizeof(u32) * fda->num_fds; 2158 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2159 pr_err("transaction release %d invalid number of fds (%lld)\n", 2160 debug_id, (u64)fda->num_fds); 2161 continue; 2162 } 2163 if (fd_buf_size > parent->length || 2164 fda->parent_offset > parent->length - fd_buf_size) { 2165 /* No space for all file descriptors here. */ 2166 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2167 debug_id, (u64)fda->num_fds); 2168 continue; 2169 } 2170 /* 2171 * the source data for binder_buffer_object is visible 2172 * to user-space and the @buffer element is the user 2173 * pointer to the buffer_object containing the fd_array. 2174 * Convert the address to an offset relative to 2175 * the base of the transaction buffer. 2176 */ 2177 fda_offset = parent->buffer - buffer->user_data + 2178 fda->parent_offset; 2179 for (fd_index = 0; fd_index < fda->num_fds; 2180 fd_index++) { 2181 u32 fd; 2182 int err; 2183 binder_size_t offset = fda_offset + 2184 fd_index * sizeof(fd); 2185 2186 err = binder_alloc_copy_from_buffer( 2187 &proc->alloc, &fd, buffer, 2188 offset, sizeof(fd)); 2189 WARN_ON(err); 2190 if (!err) { 2191 binder_deferred_fd_close(fd); 2192 /* 2193 * Need to make sure the thread goes 2194 * back to userspace to complete the 2195 * deferred close 2196 */ 2197 if (thread) 2198 thread->looper_need_return = true; 2199 } 2200 } 2201 } break; 2202 default: 2203 pr_err("transaction release %d bad object type %x\n", 2204 debug_id, hdr->type); 2205 break; 2206 } 2207 } 2208 } 2209 2210 /* Clean up all the objects in the buffer */ 2211 static inline void binder_release_entire_buffer(struct binder_proc *proc, 2212 struct binder_thread *thread, 2213 struct binder_buffer *buffer, 2214 bool is_failure) 2215 { 2216 binder_size_t off_end_offset; 2217 2218 off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); 2219 off_end_offset += buffer->offsets_size; 2220 2221 binder_transaction_buffer_release(proc, thread, buffer, 2222 off_end_offset, is_failure); 2223 } 2224 2225 static int binder_translate_binder(struct flat_binder_object *fp, 2226 struct binder_transaction *t, 2227 struct binder_thread *thread) 2228 { 2229 struct binder_node *node; 2230 struct binder_proc *proc = thread->proc; 2231 struct binder_proc *target_proc = t->to_proc; 2232 struct binder_ref_data rdata; 2233 int ret = 0; 2234 2235 node = binder_get_node(proc, fp->binder); 2236 if (!node) { 2237 node = binder_new_node(proc, fp); 2238 if (!node) 2239 return -ENOMEM; 2240 } 2241 if (fp->cookie != node->cookie) { 2242 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2243 proc->pid, thread->pid, (u64)fp->binder, 2244 node->debug_id, (u64)fp->cookie, 2245 (u64)node->cookie); 2246 ret = -EINVAL; 2247 goto done; 2248 } 2249 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2250 ret = -EPERM; 2251 goto done; 2252 } 2253 2254 ret = binder_inc_ref_for_node(target_proc, node, 2255 fp->hdr.type == BINDER_TYPE_BINDER, 2256 &thread->todo, &rdata); 2257 if (ret) 2258 goto done; 2259 2260 if (fp->hdr.type == BINDER_TYPE_BINDER) 2261 fp->hdr.type = BINDER_TYPE_HANDLE; 2262 else 2263 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2264 fp->binder = 0; 2265 fp->handle = rdata.desc; 2266 fp->cookie = 0; 2267 2268 trace_binder_transaction_node_to_ref(t, node, &rdata); 2269 binder_debug(BINDER_DEBUG_TRANSACTION, 2270 " node %d u%016llx -> ref %d desc %d\n", 2271 node->debug_id, (u64)node->ptr, 2272 rdata.debug_id, rdata.desc); 2273 done: 2274 binder_put_node(node); 2275 return ret; 2276 } 2277 2278 static int binder_translate_handle(struct flat_binder_object *fp, 2279 struct binder_transaction *t, 2280 struct binder_thread *thread) 2281 { 2282 struct binder_proc *proc = thread->proc; 2283 struct binder_proc *target_proc = t->to_proc; 2284 struct binder_node *node; 2285 struct binder_ref_data src_rdata; 2286 int ret = 0; 2287 2288 node = binder_get_node_from_ref(proc, fp->handle, 2289 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2290 if (!node) { 2291 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2292 proc->pid, thread->pid, fp->handle); 2293 return -EINVAL; 2294 } 2295 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2296 ret = -EPERM; 2297 goto done; 2298 } 2299 2300 binder_node_lock(node); 2301 if (node->proc == target_proc) { 2302 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2303 fp->hdr.type = BINDER_TYPE_BINDER; 2304 else 2305 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2306 fp->binder = node->ptr; 2307 fp->cookie = node->cookie; 2308 if (node->proc) 2309 binder_inner_proc_lock(node->proc); 2310 else 2311 __acquire(&node->proc->inner_lock); 2312 binder_inc_node_nilocked(node, 2313 fp->hdr.type == BINDER_TYPE_BINDER, 2314 0, NULL); 2315 if (node->proc) 2316 binder_inner_proc_unlock(node->proc); 2317 else 2318 __release(&node->proc->inner_lock); 2319 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2320 binder_debug(BINDER_DEBUG_TRANSACTION, 2321 " ref %d desc %d -> node %d u%016llx\n", 2322 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2323 (u64)node->ptr); 2324 binder_node_unlock(node); 2325 } else { 2326 struct binder_ref_data dest_rdata; 2327 2328 binder_node_unlock(node); 2329 ret = binder_inc_ref_for_node(target_proc, node, 2330 fp->hdr.type == BINDER_TYPE_HANDLE, 2331 NULL, &dest_rdata); 2332 if (ret) 2333 goto done; 2334 2335 fp->binder = 0; 2336 fp->handle = dest_rdata.desc; 2337 fp->cookie = 0; 2338 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2339 &dest_rdata); 2340 binder_debug(BINDER_DEBUG_TRANSACTION, 2341 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2342 src_rdata.debug_id, src_rdata.desc, 2343 dest_rdata.debug_id, dest_rdata.desc, 2344 node->debug_id); 2345 } 2346 done: 2347 binder_put_node(node); 2348 return ret; 2349 } 2350 2351 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2352 struct binder_transaction *t, 2353 struct binder_thread *thread, 2354 struct binder_transaction *in_reply_to) 2355 { 2356 struct binder_proc *proc = thread->proc; 2357 struct binder_proc *target_proc = t->to_proc; 2358 struct binder_txn_fd_fixup *fixup; 2359 struct file *file; 2360 int ret = 0; 2361 bool target_allows_fd; 2362 2363 if (in_reply_to) 2364 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2365 else 2366 target_allows_fd = t->buffer->target_node->accept_fds; 2367 if (!target_allows_fd) { 2368 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2369 proc->pid, thread->pid, 2370 in_reply_to ? "reply" : "transaction", 2371 fd); 2372 ret = -EPERM; 2373 goto err_fd_not_accepted; 2374 } 2375 2376 file = fget(fd); 2377 if (!file) { 2378 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2379 proc->pid, thread->pid, fd); 2380 ret = -EBADF; 2381 goto err_fget; 2382 } 2383 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); 2384 if (ret < 0) { 2385 ret = -EPERM; 2386 goto err_security; 2387 } 2388 2389 /* 2390 * Add fixup record for this transaction. The allocation 2391 * of the fd in the target needs to be done from a 2392 * target thread. 2393 */ 2394 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2395 if (!fixup) { 2396 ret = -ENOMEM; 2397 goto err_alloc; 2398 } 2399 fixup->file = file; 2400 fixup->offset = fd_offset; 2401 fixup->target_fd = -1; 2402 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2403 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2404 2405 return ret; 2406 2407 err_alloc: 2408 err_security: 2409 fput(file); 2410 err_fget: 2411 err_fd_not_accepted: 2412 return ret; 2413 } 2414 2415 /** 2416 * struct binder_ptr_fixup - data to be fixed-up in target buffer 2417 * @offset offset in target buffer to fixup 2418 * @skip_size bytes to skip in copy (fixup will be written later) 2419 * @fixup_data data to write at fixup offset 2420 * @node list node 2421 * 2422 * This is used for the pointer fixup list (pf) which is created and consumed 2423 * during binder_transaction() and is only accessed locally. No 2424 * locking is necessary. 2425 * 2426 * The list is ordered by @offset. 2427 */ 2428 struct binder_ptr_fixup { 2429 binder_size_t offset; 2430 size_t skip_size; 2431 binder_uintptr_t fixup_data; 2432 struct list_head node; 2433 }; 2434 2435 /** 2436 * struct binder_sg_copy - scatter-gather data to be copied 2437 * @offset offset in target buffer 2438 * @sender_uaddr user address in source buffer 2439 * @length bytes to copy 2440 * @node list node 2441 * 2442 * This is used for the sg copy list (sgc) which is created and consumed 2443 * during binder_transaction() and is only accessed locally. No 2444 * locking is necessary. 2445 * 2446 * The list is ordered by @offset. 2447 */ 2448 struct binder_sg_copy { 2449 binder_size_t offset; 2450 const void __user *sender_uaddr; 2451 size_t length; 2452 struct list_head node; 2453 }; 2454 2455 /** 2456 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data 2457 * @alloc: binder_alloc associated with @buffer 2458 * @buffer: binder buffer in target process 2459 * @sgc_head: list_head of scatter-gather copy list 2460 * @pf_head: list_head of pointer fixup list 2461 * 2462 * Processes all elements of @sgc_head, applying fixups from @pf_head 2463 * and copying the scatter-gather data from the source process' user 2464 * buffer to the target's buffer. It is expected that the list creation 2465 * and processing all occurs during binder_transaction() so these lists 2466 * are only accessed in local context. 2467 * 2468 * Return: 0=success, else -errno 2469 */ 2470 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, 2471 struct binder_buffer *buffer, 2472 struct list_head *sgc_head, 2473 struct list_head *pf_head) 2474 { 2475 int ret = 0; 2476 struct binder_sg_copy *sgc, *tmpsgc; 2477 struct binder_ptr_fixup *tmppf; 2478 struct binder_ptr_fixup *pf = 2479 list_first_entry_or_null(pf_head, struct binder_ptr_fixup, 2480 node); 2481 2482 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2483 size_t bytes_copied = 0; 2484 2485 while (bytes_copied < sgc->length) { 2486 size_t copy_size; 2487 size_t bytes_left = sgc->length - bytes_copied; 2488 size_t offset = sgc->offset + bytes_copied; 2489 2490 /* 2491 * We copy up to the fixup (pointed to by pf) 2492 */ 2493 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) 2494 : bytes_left; 2495 if (!ret && copy_size) 2496 ret = binder_alloc_copy_user_to_buffer( 2497 alloc, buffer, 2498 offset, 2499 sgc->sender_uaddr + bytes_copied, 2500 copy_size); 2501 bytes_copied += copy_size; 2502 if (copy_size != bytes_left) { 2503 BUG_ON(!pf); 2504 /* we stopped at a fixup offset */ 2505 if (pf->skip_size) { 2506 /* 2507 * we are just skipping. This is for 2508 * BINDER_TYPE_FDA where the translated 2509 * fds will be fixed up when we get 2510 * to target context. 2511 */ 2512 bytes_copied += pf->skip_size; 2513 } else { 2514 /* apply the fixup indicated by pf */ 2515 if (!ret) 2516 ret = binder_alloc_copy_to_buffer( 2517 alloc, buffer, 2518 pf->offset, 2519 &pf->fixup_data, 2520 sizeof(pf->fixup_data)); 2521 bytes_copied += sizeof(pf->fixup_data); 2522 } 2523 list_del(&pf->node); 2524 kfree(pf); 2525 pf = list_first_entry_or_null(pf_head, 2526 struct binder_ptr_fixup, node); 2527 } 2528 } 2529 list_del(&sgc->node); 2530 kfree(sgc); 2531 } 2532 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2533 BUG_ON(pf->skip_size == 0); 2534 list_del(&pf->node); 2535 kfree(pf); 2536 } 2537 BUG_ON(!list_empty(sgc_head)); 2538 2539 return ret > 0 ? -EINVAL : ret; 2540 } 2541 2542 /** 2543 * binder_cleanup_deferred_txn_lists() - free specified lists 2544 * @sgc_head: list_head of scatter-gather copy list 2545 * @pf_head: list_head of pointer fixup list 2546 * 2547 * Called to clean up @sgc_head and @pf_head if there is an 2548 * error. 2549 */ 2550 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, 2551 struct list_head *pf_head) 2552 { 2553 struct binder_sg_copy *sgc, *tmpsgc; 2554 struct binder_ptr_fixup *pf, *tmppf; 2555 2556 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2557 list_del(&sgc->node); 2558 kfree(sgc); 2559 } 2560 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2561 list_del(&pf->node); 2562 kfree(pf); 2563 } 2564 } 2565 2566 /** 2567 * binder_defer_copy() - queue a scatter-gather buffer for copy 2568 * @sgc_head: list_head of scatter-gather copy list 2569 * @offset: binder buffer offset in target process 2570 * @sender_uaddr: user address in source process 2571 * @length: bytes to copy 2572 * 2573 * Specify a scatter-gather block to be copied. The actual copy must 2574 * be deferred until all the needed fixups are identified and queued. 2575 * Then the copy and fixups are done together so un-translated values 2576 * from the source are never visible in the target buffer. 2577 * 2578 * We are guaranteed that repeated calls to this function will have 2579 * monotonically increasing @offset values so the list will naturally 2580 * be ordered. 2581 * 2582 * Return: 0=success, else -errno 2583 */ 2584 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, 2585 const void __user *sender_uaddr, size_t length) 2586 { 2587 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); 2588 2589 if (!bc) 2590 return -ENOMEM; 2591 2592 bc->offset = offset; 2593 bc->sender_uaddr = sender_uaddr; 2594 bc->length = length; 2595 INIT_LIST_HEAD(&bc->node); 2596 2597 /* 2598 * We are guaranteed that the deferred copies are in-order 2599 * so just add to the tail. 2600 */ 2601 list_add_tail(&bc->node, sgc_head); 2602 2603 return 0; 2604 } 2605 2606 /** 2607 * binder_add_fixup() - queue a fixup to be applied to sg copy 2608 * @pf_head: list_head of binder ptr fixup list 2609 * @offset: binder buffer offset in target process 2610 * @fixup: bytes to be copied for fixup 2611 * @skip_size: bytes to skip when copying (fixup will be applied later) 2612 * 2613 * Add the specified fixup to a list ordered by @offset. When copying 2614 * the scatter-gather buffers, the fixup will be copied instead of 2615 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup 2616 * will be applied later (in target process context), so we just skip 2617 * the bytes specified by @skip_size. If @skip_size is 0, we copy the 2618 * value in @fixup. 2619 * 2620 * This function is called *mostly* in @offset order, but there are 2621 * exceptions. Since out-of-order inserts are relatively uncommon, 2622 * we insert the new element by searching backward from the tail of 2623 * the list. 2624 * 2625 * Return: 0=success, else -errno 2626 */ 2627 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, 2628 binder_uintptr_t fixup, size_t skip_size) 2629 { 2630 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); 2631 struct binder_ptr_fixup *tmppf; 2632 2633 if (!pf) 2634 return -ENOMEM; 2635 2636 pf->offset = offset; 2637 pf->fixup_data = fixup; 2638 pf->skip_size = skip_size; 2639 INIT_LIST_HEAD(&pf->node); 2640 2641 /* Fixups are *mostly* added in-order, but there are some 2642 * exceptions. Look backwards through list for insertion point. 2643 */ 2644 list_for_each_entry_reverse(tmppf, pf_head, node) { 2645 if (tmppf->offset < pf->offset) { 2646 list_add(&pf->node, &tmppf->node); 2647 return 0; 2648 } 2649 } 2650 /* 2651 * if we get here, then the new offset is the lowest so 2652 * insert at the head 2653 */ 2654 list_add(&pf->node, pf_head); 2655 return 0; 2656 } 2657 2658 static int binder_translate_fd_array(struct list_head *pf_head, 2659 struct binder_fd_array_object *fda, 2660 const void __user *sender_ubuffer, 2661 struct binder_buffer_object *parent, 2662 struct binder_buffer_object *sender_uparent, 2663 struct binder_transaction *t, 2664 struct binder_thread *thread, 2665 struct binder_transaction *in_reply_to) 2666 { 2667 binder_size_t fdi, fd_buf_size; 2668 binder_size_t fda_offset; 2669 const void __user *sender_ufda_base; 2670 struct binder_proc *proc = thread->proc; 2671 int ret; 2672 2673 if (fda->num_fds == 0) 2674 return 0; 2675 2676 fd_buf_size = sizeof(u32) * fda->num_fds; 2677 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2678 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2679 proc->pid, thread->pid, (u64)fda->num_fds); 2680 return -EINVAL; 2681 } 2682 if (fd_buf_size > parent->length || 2683 fda->parent_offset > parent->length - fd_buf_size) { 2684 /* No space for all file descriptors here. */ 2685 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2686 proc->pid, thread->pid, (u64)fda->num_fds); 2687 return -EINVAL; 2688 } 2689 /* 2690 * the source data for binder_buffer_object is visible 2691 * to user-space and the @buffer element is the user 2692 * pointer to the buffer_object containing the fd_array. 2693 * Convert the address to an offset relative to 2694 * the base of the transaction buffer. 2695 */ 2696 fda_offset = parent->buffer - t->buffer->user_data + 2697 fda->parent_offset; 2698 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + 2699 fda->parent_offset; 2700 2701 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || 2702 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { 2703 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2704 proc->pid, thread->pid); 2705 return -EINVAL; 2706 } 2707 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); 2708 if (ret) 2709 return ret; 2710 2711 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2712 u32 fd; 2713 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2714 binder_size_t sender_uoffset = fdi * sizeof(fd); 2715 2716 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); 2717 if (!ret) 2718 ret = binder_translate_fd(fd, offset, t, thread, 2719 in_reply_to); 2720 if (ret) 2721 return ret > 0 ? -EINVAL : ret; 2722 } 2723 return 0; 2724 } 2725 2726 static int binder_fixup_parent(struct list_head *pf_head, 2727 struct binder_transaction *t, 2728 struct binder_thread *thread, 2729 struct binder_buffer_object *bp, 2730 binder_size_t off_start_offset, 2731 binder_size_t num_valid, 2732 binder_size_t last_fixup_obj_off, 2733 binder_size_t last_fixup_min_off) 2734 { 2735 struct binder_buffer_object *parent; 2736 struct binder_buffer *b = t->buffer; 2737 struct binder_proc *proc = thread->proc; 2738 struct binder_proc *target_proc = t->to_proc; 2739 struct binder_object object; 2740 binder_size_t buffer_offset; 2741 binder_size_t parent_offset; 2742 2743 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2744 return 0; 2745 2746 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2747 off_start_offset, &parent_offset, 2748 num_valid); 2749 if (!parent) { 2750 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2751 proc->pid, thread->pid); 2752 return -EINVAL; 2753 } 2754 2755 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2756 parent_offset, bp->parent_offset, 2757 last_fixup_obj_off, 2758 last_fixup_min_off)) { 2759 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2760 proc->pid, thread->pid); 2761 return -EINVAL; 2762 } 2763 2764 if (parent->length < sizeof(binder_uintptr_t) || 2765 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2766 /* No space for a pointer here! */ 2767 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2768 proc->pid, thread->pid); 2769 return -EINVAL; 2770 } 2771 2772 buffer_offset = bp->parent_offset + parent->buffer - b->user_data; 2773 2774 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); 2775 } 2776 2777 /** 2778 * binder_can_update_transaction() - Can a txn be superseded by an updated one? 2779 * @t1: the pending async txn in the frozen process 2780 * @t2: the new async txn to supersede the outdated pending one 2781 * 2782 * Return: true if t2 can supersede t1 2783 * false if t2 can not supersede t1 2784 */ 2785 static bool binder_can_update_transaction(struct binder_transaction *t1, 2786 struct binder_transaction *t2) 2787 { 2788 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != 2789 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) 2790 return false; 2791 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && 2792 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && 2793 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && 2794 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) 2795 return true; 2796 return false; 2797 } 2798 2799 /** 2800 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction 2801 * @t: new async transaction 2802 * @target_list: list to find outdated transaction 2803 * 2804 * Return: the outdated transaction if found 2805 * NULL if no outdated transacton can be found 2806 * 2807 * Requires the proc->inner_lock to be held. 2808 */ 2809 static struct binder_transaction * 2810 binder_find_outdated_transaction_ilocked(struct binder_transaction *t, 2811 struct list_head *target_list) 2812 { 2813 struct binder_work *w; 2814 2815 list_for_each_entry(w, target_list, entry) { 2816 struct binder_transaction *t_queued; 2817 2818 if (w->type != BINDER_WORK_TRANSACTION) 2819 continue; 2820 t_queued = container_of(w, struct binder_transaction, work); 2821 if (binder_can_update_transaction(t_queued, t)) 2822 return t_queued; 2823 } 2824 return NULL; 2825 } 2826 2827 /** 2828 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2829 * @t: transaction to send 2830 * @proc: process to send the transaction to 2831 * @thread: thread in @proc to send the transaction to (may be NULL) 2832 * 2833 * This function queues a transaction to the specified process. It will try 2834 * to find a thread in the target process to handle the transaction and 2835 * wake it up. If no thread is found, the work is queued to the proc 2836 * waitqueue. 2837 * 2838 * If the @thread parameter is not NULL, the transaction is always queued 2839 * to the waitlist of that specific thread. 2840 * 2841 * Return: 0 if the transaction was successfully queued 2842 * BR_DEAD_REPLY if the target process or thread is dead 2843 * BR_FROZEN_REPLY if the target process or thread is frozen and 2844 * the sync transaction was rejected 2845 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen 2846 * and the async transaction was successfully queued 2847 */ 2848 static int binder_proc_transaction(struct binder_transaction *t, 2849 struct binder_proc *proc, 2850 struct binder_thread *thread) 2851 { 2852 struct binder_node *node = t->buffer->target_node; 2853 bool oneway = !!(t->flags & TF_ONE_WAY); 2854 bool pending_async = false; 2855 struct binder_transaction *t_outdated = NULL; 2856 bool frozen = false; 2857 2858 BUG_ON(!node); 2859 binder_node_lock(node); 2860 if (oneway) { 2861 BUG_ON(thread); 2862 if (node->has_async_transaction) 2863 pending_async = true; 2864 else 2865 node->has_async_transaction = true; 2866 } 2867 2868 binder_inner_proc_lock(proc); 2869 if (proc->is_frozen) { 2870 frozen = true; 2871 proc->sync_recv |= !oneway; 2872 proc->async_recv |= oneway; 2873 } 2874 2875 if ((frozen && !oneway) || proc->is_dead || 2876 (thread && thread->is_dead)) { 2877 binder_inner_proc_unlock(proc); 2878 binder_node_unlock(node); 2879 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; 2880 } 2881 2882 if (!thread && !pending_async) 2883 thread = binder_select_thread_ilocked(proc); 2884 2885 if (thread) { 2886 binder_enqueue_thread_work_ilocked(thread, &t->work); 2887 } else if (!pending_async) { 2888 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2889 } else { 2890 if ((t->flags & TF_UPDATE_TXN) && frozen) { 2891 t_outdated = binder_find_outdated_transaction_ilocked(t, 2892 &node->async_todo); 2893 if (t_outdated) { 2894 binder_debug(BINDER_DEBUG_TRANSACTION, 2895 "txn %d supersedes %d\n", 2896 t->debug_id, t_outdated->debug_id); 2897 list_del_init(&t_outdated->work.entry); 2898 proc->outstanding_txns--; 2899 } 2900 } 2901 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2902 } 2903 2904 if (!pending_async) 2905 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2906 2907 proc->outstanding_txns++; 2908 binder_inner_proc_unlock(proc); 2909 binder_node_unlock(node); 2910 2911 /* 2912 * To reduce potential contention, free the outdated transaction and 2913 * buffer after releasing the locks. 2914 */ 2915 if (t_outdated) { 2916 struct binder_buffer *buffer = t_outdated->buffer; 2917 2918 t_outdated->buffer = NULL; 2919 buffer->transaction = NULL; 2920 trace_binder_transaction_update_buffer_release(buffer); 2921 binder_release_entire_buffer(proc, NULL, buffer, false); 2922 binder_alloc_free_buf(&proc->alloc, buffer); 2923 kfree(t_outdated); 2924 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2925 } 2926 2927 if (oneway && frozen) 2928 return BR_TRANSACTION_PENDING_FROZEN; 2929 2930 return 0; 2931 } 2932 2933 /** 2934 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2935 * @node: struct binder_node for which to get refs 2936 * @procp: returns @node->proc if valid 2937 * @error: if no @procp then returns BR_DEAD_REPLY 2938 * 2939 * User-space normally keeps the node alive when creating a transaction 2940 * since it has a reference to the target. The local strong ref keeps it 2941 * alive if the sending process dies before the target process processes 2942 * the transaction. If the source process is malicious or has a reference 2943 * counting bug, relying on the local strong ref can fail. 2944 * 2945 * Since user-space can cause the local strong ref to go away, we also take 2946 * a tmpref on the node to ensure it survives while we are constructing 2947 * the transaction. We also need a tmpref on the proc while we are 2948 * constructing the transaction, so we take that here as well. 2949 * 2950 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2951 * Also sets @procp if valid. If the @node->proc is NULL indicating that the 2952 * target proc has died, @error is set to BR_DEAD_REPLY. 2953 */ 2954 static struct binder_node *binder_get_node_refs_for_txn( 2955 struct binder_node *node, 2956 struct binder_proc **procp, 2957 uint32_t *error) 2958 { 2959 struct binder_node *target_node = NULL; 2960 2961 binder_node_inner_lock(node); 2962 if (node->proc) { 2963 target_node = node; 2964 binder_inc_node_nilocked(node, 1, 0, NULL); 2965 binder_inc_node_tmpref_ilocked(node); 2966 node->proc->tmp_ref++; 2967 *procp = node->proc; 2968 } else 2969 *error = BR_DEAD_REPLY; 2970 binder_node_inner_unlock(node); 2971 2972 return target_node; 2973 } 2974 2975 static void binder_set_txn_from_error(struct binder_transaction *t, int id, 2976 uint32_t command, int32_t param) 2977 { 2978 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); 2979 2980 if (!from) { 2981 /* annotation for sparse */ 2982 __release(&from->proc->inner_lock); 2983 return; 2984 } 2985 2986 /* don't override existing errors */ 2987 if (from->ee.command == BR_OK) 2988 binder_set_extended_error(&from->ee, id, command, param); 2989 binder_inner_proc_unlock(from->proc); 2990 binder_thread_dec_tmpref(from); 2991 } 2992 2993 static void binder_transaction(struct binder_proc *proc, 2994 struct binder_thread *thread, 2995 struct binder_transaction_data *tr, int reply, 2996 binder_size_t extra_buffers_size) 2997 { 2998 int ret; 2999 struct binder_transaction *t; 3000 struct binder_work *w; 3001 struct binder_work *tcomplete; 3002 binder_size_t buffer_offset = 0; 3003 binder_size_t off_start_offset, off_end_offset; 3004 binder_size_t off_min; 3005 binder_size_t sg_buf_offset, sg_buf_end_offset; 3006 binder_size_t user_offset = 0; 3007 struct binder_proc *target_proc = NULL; 3008 struct binder_thread *target_thread = NULL; 3009 struct binder_node *target_node = NULL; 3010 struct binder_transaction *in_reply_to = NULL; 3011 struct binder_transaction_log_entry *e; 3012 uint32_t return_error = 0; 3013 uint32_t return_error_param = 0; 3014 uint32_t return_error_line = 0; 3015 binder_size_t last_fixup_obj_off = 0; 3016 binder_size_t last_fixup_min_off = 0; 3017 struct binder_context *context = proc->context; 3018 int t_debug_id = atomic_inc_return(&binder_last_id); 3019 ktime_t t_start_time = ktime_get(); 3020 char *secctx = NULL; 3021 u32 secctx_sz = 0; 3022 struct list_head sgc_head; 3023 struct list_head pf_head; 3024 const void __user *user_buffer = (const void __user *) 3025 (uintptr_t)tr->data.ptr.buffer; 3026 INIT_LIST_HEAD(&sgc_head); 3027 INIT_LIST_HEAD(&pf_head); 3028 3029 e = binder_transaction_log_add(&binder_transaction_log); 3030 e->debug_id = t_debug_id; 3031 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 3032 e->from_proc = proc->pid; 3033 e->from_thread = thread->pid; 3034 e->target_handle = tr->target.handle; 3035 e->data_size = tr->data_size; 3036 e->offsets_size = tr->offsets_size; 3037 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); 3038 3039 binder_inner_proc_lock(proc); 3040 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); 3041 binder_inner_proc_unlock(proc); 3042 3043 if (reply) { 3044 binder_inner_proc_lock(proc); 3045 in_reply_to = thread->transaction_stack; 3046 if (in_reply_to == NULL) { 3047 binder_inner_proc_unlock(proc); 3048 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 3049 proc->pid, thread->pid); 3050 return_error = BR_FAILED_REPLY; 3051 return_error_param = -EPROTO; 3052 return_error_line = __LINE__; 3053 goto err_empty_call_stack; 3054 } 3055 if (in_reply_to->to_thread != thread) { 3056 spin_lock(&in_reply_to->lock); 3057 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 3058 proc->pid, thread->pid, in_reply_to->debug_id, 3059 in_reply_to->to_proc ? 3060 in_reply_to->to_proc->pid : 0, 3061 in_reply_to->to_thread ? 3062 in_reply_to->to_thread->pid : 0); 3063 spin_unlock(&in_reply_to->lock); 3064 binder_inner_proc_unlock(proc); 3065 return_error = BR_FAILED_REPLY; 3066 return_error_param = -EPROTO; 3067 return_error_line = __LINE__; 3068 in_reply_to = NULL; 3069 goto err_bad_call_stack; 3070 } 3071 thread->transaction_stack = in_reply_to->to_parent; 3072 binder_inner_proc_unlock(proc); 3073 binder_set_nice(in_reply_to->saved_priority); 3074 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 3075 if (target_thread == NULL) { 3076 /* annotation for sparse */ 3077 __release(&target_thread->proc->inner_lock); 3078 binder_txn_error("%d:%d reply target not found\n", 3079 thread->pid, proc->pid); 3080 return_error = BR_DEAD_REPLY; 3081 return_error_line = __LINE__; 3082 goto err_dead_binder; 3083 } 3084 if (target_thread->transaction_stack != in_reply_to) { 3085 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 3086 proc->pid, thread->pid, 3087 target_thread->transaction_stack ? 3088 target_thread->transaction_stack->debug_id : 0, 3089 in_reply_to->debug_id); 3090 binder_inner_proc_unlock(target_thread->proc); 3091 return_error = BR_FAILED_REPLY; 3092 return_error_param = -EPROTO; 3093 return_error_line = __LINE__; 3094 in_reply_to = NULL; 3095 target_thread = NULL; 3096 goto err_dead_binder; 3097 } 3098 target_proc = target_thread->proc; 3099 target_proc->tmp_ref++; 3100 binder_inner_proc_unlock(target_thread->proc); 3101 } else { 3102 if (tr->target.handle) { 3103 struct binder_ref *ref; 3104 3105 /* 3106 * There must already be a strong ref 3107 * on this node. If so, do a strong 3108 * increment on the node to ensure it 3109 * stays alive until the transaction is 3110 * done. 3111 */ 3112 binder_proc_lock(proc); 3113 ref = binder_get_ref_olocked(proc, tr->target.handle, 3114 true); 3115 if (ref) { 3116 target_node = binder_get_node_refs_for_txn( 3117 ref->node, &target_proc, 3118 &return_error); 3119 } else { 3120 binder_user_error("%d:%d got transaction to invalid handle, %u\n", 3121 proc->pid, thread->pid, tr->target.handle); 3122 return_error = BR_FAILED_REPLY; 3123 } 3124 binder_proc_unlock(proc); 3125 } else { 3126 mutex_lock(&context->context_mgr_node_lock); 3127 target_node = context->binder_context_mgr_node; 3128 if (target_node) 3129 target_node = binder_get_node_refs_for_txn( 3130 target_node, &target_proc, 3131 &return_error); 3132 else 3133 return_error = BR_DEAD_REPLY; 3134 mutex_unlock(&context->context_mgr_node_lock); 3135 if (target_node && target_proc->pid == proc->pid) { 3136 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 3137 proc->pid, thread->pid); 3138 return_error = BR_FAILED_REPLY; 3139 return_error_param = -EINVAL; 3140 return_error_line = __LINE__; 3141 goto err_invalid_target_handle; 3142 } 3143 } 3144 if (!target_node) { 3145 binder_txn_error("%d:%d cannot find target node\n", 3146 thread->pid, proc->pid); 3147 /* 3148 * return_error is set above 3149 */ 3150 return_error_param = -EINVAL; 3151 return_error_line = __LINE__; 3152 goto err_dead_binder; 3153 } 3154 e->to_node = target_node->debug_id; 3155 if (WARN_ON(proc == target_proc)) { 3156 binder_txn_error("%d:%d self transactions not allowed\n", 3157 thread->pid, proc->pid); 3158 return_error = BR_FAILED_REPLY; 3159 return_error_param = -EINVAL; 3160 return_error_line = __LINE__; 3161 goto err_invalid_target_handle; 3162 } 3163 if (security_binder_transaction(proc->cred, 3164 target_proc->cred) < 0) { 3165 binder_txn_error("%d:%d transaction credentials failed\n", 3166 thread->pid, proc->pid); 3167 return_error = BR_FAILED_REPLY; 3168 return_error_param = -EPERM; 3169 return_error_line = __LINE__; 3170 goto err_invalid_target_handle; 3171 } 3172 binder_inner_proc_lock(proc); 3173 3174 w = list_first_entry_or_null(&thread->todo, 3175 struct binder_work, entry); 3176 if (!(tr->flags & TF_ONE_WAY) && w && 3177 w->type == BINDER_WORK_TRANSACTION) { 3178 /* 3179 * Do not allow new outgoing transaction from a 3180 * thread that has a transaction at the head of 3181 * its todo list. Only need to check the head 3182 * because binder_select_thread_ilocked picks a 3183 * thread from proc->waiting_threads to enqueue 3184 * the transaction, and nothing is queued to the 3185 * todo list while the thread is on waiting_threads. 3186 */ 3187 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 3188 proc->pid, thread->pid); 3189 binder_inner_proc_unlock(proc); 3190 return_error = BR_FAILED_REPLY; 3191 return_error_param = -EPROTO; 3192 return_error_line = __LINE__; 3193 goto err_bad_todo_list; 3194 } 3195 3196 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 3197 struct binder_transaction *tmp; 3198 3199 tmp = thread->transaction_stack; 3200 if (tmp->to_thread != thread) { 3201 spin_lock(&tmp->lock); 3202 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3203 proc->pid, thread->pid, tmp->debug_id, 3204 tmp->to_proc ? tmp->to_proc->pid : 0, 3205 tmp->to_thread ? 3206 tmp->to_thread->pid : 0); 3207 spin_unlock(&tmp->lock); 3208 binder_inner_proc_unlock(proc); 3209 return_error = BR_FAILED_REPLY; 3210 return_error_param = -EPROTO; 3211 return_error_line = __LINE__; 3212 goto err_bad_call_stack; 3213 } 3214 while (tmp) { 3215 struct binder_thread *from; 3216 3217 spin_lock(&tmp->lock); 3218 from = tmp->from; 3219 if (from && from->proc == target_proc) { 3220 atomic_inc(&from->tmp_ref); 3221 target_thread = from; 3222 spin_unlock(&tmp->lock); 3223 break; 3224 } 3225 spin_unlock(&tmp->lock); 3226 tmp = tmp->from_parent; 3227 } 3228 } 3229 binder_inner_proc_unlock(proc); 3230 } 3231 if (target_thread) 3232 e->to_thread = target_thread->pid; 3233 e->to_proc = target_proc->pid; 3234 3235 /* TODO: reuse incoming transaction for reply */ 3236 t = kzalloc(sizeof(*t), GFP_KERNEL); 3237 if (t == NULL) { 3238 binder_txn_error("%d:%d cannot allocate transaction\n", 3239 thread->pid, proc->pid); 3240 return_error = BR_FAILED_REPLY; 3241 return_error_param = -ENOMEM; 3242 return_error_line = __LINE__; 3243 goto err_alloc_t_failed; 3244 } 3245 INIT_LIST_HEAD(&t->fd_fixups); 3246 binder_stats_created(BINDER_STAT_TRANSACTION); 3247 spin_lock_init(&t->lock); 3248 3249 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3250 if (tcomplete == NULL) { 3251 binder_txn_error("%d:%d cannot allocate work for transaction\n", 3252 thread->pid, proc->pid); 3253 return_error = BR_FAILED_REPLY; 3254 return_error_param = -ENOMEM; 3255 return_error_line = __LINE__; 3256 goto err_alloc_tcomplete_failed; 3257 } 3258 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3259 3260 t->debug_id = t_debug_id; 3261 t->start_time = t_start_time; 3262 3263 if (reply) 3264 binder_debug(BINDER_DEBUG_TRANSACTION, 3265 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3266 proc->pid, thread->pid, t->debug_id, 3267 target_proc->pid, target_thread->pid, 3268 (u64)tr->data.ptr.buffer, 3269 (u64)tr->data.ptr.offsets, 3270 (u64)tr->data_size, (u64)tr->offsets_size, 3271 (u64)extra_buffers_size); 3272 else 3273 binder_debug(BINDER_DEBUG_TRANSACTION, 3274 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3275 proc->pid, thread->pid, t->debug_id, 3276 target_proc->pid, target_node->debug_id, 3277 (u64)tr->data.ptr.buffer, 3278 (u64)tr->data.ptr.offsets, 3279 (u64)tr->data_size, (u64)tr->offsets_size, 3280 (u64)extra_buffers_size); 3281 3282 if (!reply && !(tr->flags & TF_ONE_WAY)) 3283 t->from = thread; 3284 else 3285 t->from = NULL; 3286 t->from_pid = proc->pid; 3287 t->from_tid = thread->pid; 3288 t->sender_euid = task_euid(proc->tsk); 3289 t->to_proc = target_proc; 3290 t->to_thread = target_thread; 3291 t->code = tr->code; 3292 t->flags = tr->flags; 3293 t->priority = task_nice(current); 3294 3295 if (target_node && target_node->txn_security_ctx) { 3296 u32 secid; 3297 size_t added_size; 3298 3299 security_cred_getsecid(proc->cred, &secid); 3300 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3301 if (ret) { 3302 binder_txn_error("%d:%d failed to get security context\n", 3303 thread->pid, proc->pid); 3304 return_error = BR_FAILED_REPLY; 3305 return_error_param = ret; 3306 return_error_line = __LINE__; 3307 goto err_get_secctx_failed; 3308 } 3309 added_size = ALIGN(secctx_sz, sizeof(u64)); 3310 extra_buffers_size += added_size; 3311 if (extra_buffers_size < added_size) { 3312 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", 3313 thread->pid, proc->pid); 3314 return_error = BR_FAILED_REPLY; 3315 return_error_param = -EINVAL; 3316 return_error_line = __LINE__; 3317 goto err_bad_extra_size; 3318 } 3319 } 3320 3321 trace_binder_transaction(reply, t, target_node); 3322 3323 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3324 tr->offsets_size, extra_buffers_size, 3325 !reply && (t->flags & TF_ONE_WAY)); 3326 if (IS_ERR(t->buffer)) { 3327 char *s; 3328 3329 ret = PTR_ERR(t->buffer); 3330 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" 3331 : (ret == -ENOSPC) ? ": no space left" 3332 : (ret == -ENOMEM) ? ": memory allocation failed" 3333 : ""; 3334 binder_txn_error("cannot allocate buffer%s", s); 3335 3336 return_error_param = PTR_ERR(t->buffer); 3337 return_error = return_error_param == -ESRCH ? 3338 BR_DEAD_REPLY : BR_FAILED_REPLY; 3339 return_error_line = __LINE__; 3340 t->buffer = NULL; 3341 goto err_binder_alloc_buf_failed; 3342 } 3343 if (secctx) { 3344 int err; 3345 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3346 ALIGN(tr->offsets_size, sizeof(void *)) + 3347 ALIGN(extra_buffers_size, sizeof(void *)) - 3348 ALIGN(secctx_sz, sizeof(u64)); 3349 3350 t->security_ctx = t->buffer->user_data + buf_offset; 3351 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 3352 t->buffer, buf_offset, 3353 secctx, secctx_sz); 3354 if (err) { 3355 t->security_ctx = 0; 3356 WARN_ON(1); 3357 } 3358 security_release_secctx(secctx, secctx_sz); 3359 secctx = NULL; 3360 } 3361 t->buffer->debug_id = t->debug_id; 3362 t->buffer->transaction = t; 3363 t->buffer->target_node = target_node; 3364 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); 3365 trace_binder_transaction_alloc_buf(t->buffer); 3366 3367 if (binder_alloc_copy_user_to_buffer( 3368 &target_proc->alloc, 3369 t->buffer, 3370 ALIGN(tr->data_size, sizeof(void *)), 3371 (const void __user *) 3372 (uintptr_t)tr->data.ptr.offsets, 3373 tr->offsets_size)) { 3374 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3375 proc->pid, thread->pid); 3376 return_error = BR_FAILED_REPLY; 3377 return_error_param = -EFAULT; 3378 return_error_line = __LINE__; 3379 goto err_copy_data_failed; 3380 } 3381 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3382 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3383 proc->pid, thread->pid, (u64)tr->offsets_size); 3384 return_error = BR_FAILED_REPLY; 3385 return_error_param = -EINVAL; 3386 return_error_line = __LINE__; 3387 goto err_bad_offset; 3388 } 3389 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3390 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3391 proc->pid, thread->pid, 3392 (u64)extra_buffers_size); 3393 return_error = BR_FAILED_REPLY; 3394 return_error_param = -EINVAL; 3395 return_error_line = __LINE__; 3396 goto err_bad_offset; 3397 } 3398 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3399 buffer_offset = off_start_offset; 3400 off_end_offset = off_start_offset + tr->offsets_size; 3401 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3402 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 3403 ALIGN(secctx_sz, sizeof(u64)); 3404 off_min = 0; 3405 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3406 buffer_offset += sizeof(binder_size_t)) { 3407 struct binder_object_header *hdr; 3408 size_t object_size; 3409 struct binder_object object; 3410 binder_size_t object_offset; 3411 binder_size_t copy_size; 3412 3413 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 3414 &object_offset, 3415 t->buffer, 3416 buffer_offset, 3417 sizeof(object_offset))) { 3418 binder_txn_error("%d:%d copy offset from buffer failed\n", 3419 thread->pid, proc->pid); 3420 return_error = BR_FAILED_REPLY; 3421 return_error_param = -EINVAL; 3422 return_error_line = __LINE__; 3423 goto err_bad_offset; 3424 } 3425 3426 /* 3427 * Copy the source user buffer up to the next object 3428 * that will be processed. 3429 */ 3430 copy_size = object_offset - user_offset; 3431 if (copy_size && (user_offset > object_offset || 3432 object_offset > tr->data_size || 3433 binder_alloc_copy_user_to_buffer( 3434 &target_proc->alloc, 3435 t->buffer, user_offset, 3436 user_buffer + user_offset, 3437 copy_size))) { 3438 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3439 proc->pid, thread->pid); 3440 return_error = BR_FAILED_REPLY; 3441 return_error_param = -EFAULT; 3442 return_error_line = __LINE__; 3443 goto err_copy_data_failed; 3444 } 3445 object_size = binder_get_object(target_proc, user_buffer, 3446 t->buffer, object_offset, &object); 3447 if (object_size == 0 || object_offset < off_min) { 3448 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3449 proc->pid, thread->pid, 3450 (u64)object_offset, 3451 (u64)off_min, 3452 (u64)t->buffer->data_size); 3453 return_error = BR_FAILED_REPLY; 3454 return_error_param = -EINVAL; 3455 return_error_line = __LINE__; 3456 goto err_bad_offset; 3457 } 3458 /* 3459 * Set offset to the next buffer fragment to be 3460 * copied 3461 */ 3462 user_offset = object_offset + object_size; 3463 3464 hdr = &object.hdr; 3465 off_min = object_offset + object_size; 3466 switch (hdr->type) { 3467 case BINDER_TYPE_BINDER: 3468 case BINDER_TYPE_WEAK_BINDER: { 3469 struct flat_binder_object *fp; 3470 3471 fp = to_flat_binder_object(hdr); 3472 ret = binder_translate_binder(fp, t, thread); 3473 3474 if (ret < 0 || 3475 binder_alloc_copy_to_buffer(&target_proc->alloc, 3476 t->buffer, 3477 object_offset, 3478 fp, sizeof(*fp))) { 3479 binder_txn_error("%d:%d translate binder failed\n", 3480 thread->pid, proc->pid); 3481 return_error = BR_FAILED_REPLY; 3482 return_error_param = ret; 3483 return_error_line = __LINE__; 3484 goto err_translate_failed; 3485 } 3486 } break; 3487 case BINDER_TYPE_HANDLE: 3488 case BINDER_TYPE_WEAK_HANDLE: { 3489 struct flat_binder_object *fp; 3490 3491 fp = to_flat_binder_object(hdr); 3492 ret = binder_translate_handle(fp, t, thread); 3493 if (ret < 0 || 3494 binder_alloc_copy_to_buffer(&target_proc->alloc, 3495 t->buffer, 3496 object_offset, 3497 fp, sizeof(*fp))) { 3498 binder_txn_error("%d:%d translate handle failed\n", 3499 thread->pid, proc->pid); 3500 return_error = BR_FAILED_REPLY; 3501 return_error_param = ret; 3502 return_error_line = __LINE__; 3503 goto err_translate_failed; 3504 } 3505 } break; 3506 3507 case BINDER_TYPE_FD: { 3508 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3509 binder_size_t fd_offset = object_offset + 3510 (uintptr_t)&fp->fd - (uintptr_t)fp; 3511 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3512 thread, in_reply_to); 3513 3514 fp->pad_binder = 0; 3515 if (ret < 0 || 3516 binder_alloc_copy_to_buffer(&target_proc->alloc, 3517 t->buffer, 3518 object_offset, 3519 fp, sizeof(*fp))) { 3520 binder_txn_error("%d:%d translate fd failed\n", 3521 thread->pid, proc->pid); 3522 return_error = BR_FAILED_REPLY; 3523 return_error_param = ret; 3524 return_error_line = __LINE__; 3525 goto err_translate_failed; 3526 } 3527 } break; 3528 case BINDER_TYPE_FDA: { 3529 struct binder_object ptr_object; 3530 binder_size_t parent_offset; 3531 struct binder_object user_object; 3532 size_t user_parent_size; 3533 struct binder_fd_array_object *fda = 3534 to_binder_fd_array_object(hdr); 3535 size_t num_valid = (buffer_offset - off_start_offset) / 3536 sizeof(binder_size_t); 3537 struct binder_buffer_object *parent = 3538 binder_validate_ptr(target_proc, t->buffer, 3539 &ptr_object, fda->parent, 3540 off_start_offset, 3541 &parent_offset, 3542 num_valid); 3543 if (!parent) { 3544 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3545 proc->pid, thread->pid); 3546 return_error = BR_FAILED_REPLY; 3547 return_error_param = -EINVAL; 3548 return_error_line = __LINE__; 3549 goto err_bad_parent; 3550 } 3551 if (!binder_validate_fixup(target_proc, t->buffer, 3552 off_start_offset, 3553 parent_offset, 3554 fda->parent_offset, 3555 last_fixup_obj_off, 3556 last_fixup_min_off)) { 3557 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3558 proc->pid, thread->pid); 3559 return_error = BR_FAILED_REPLY; 3560 return_error_param = -EINVAL; 3561 return_error_line = __LINE__; 3562 goto err_bad_parent; 3563 } 3564 /* 3565 * We need to read the user version of the parent 3566 * object to get the original user offset 3567 */ 3568 user_parent_size = 3569 binder_get_object(proc, user_buffer, t->buffer, 3570 parent_offset, &user_object); 3571 if (user_parent_size != sizeof(user_object.bbo)) { 3572 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", 3573 proc->pid, thread->pid, 3574 user_parent_size, 3575 sizeof(user_object.bbo)); 3576 return_error = BR_FAILED_REPLY; 3577 return_error_param = -EINVAL; 3578 return_error_line = __LINE__; 3579 goto err_bad_parent; 3580 } 3581 ret = binder_translate_fd_array(&pf_head, fda, 3582 user_buffer, parent, 3583 &user_object.bbo, t, 3584 thread, in_reply_to); 3585 if (!ret) 3586 ret = binder_alloc_copy_to_buffer(&target_proc->alloc, 3587 t->buffer, 3588 object_offset, 3589 fda, sizeof(*fda)); 3590 if (ret) { 3591 binder_txn_error("%d:%d translate fd array failed\n", 3592 thread->pid, proc->pid); 3593 return_error = BR_FAILED_REPLY; 3594 return_error_param = ret > 0 ? -EINVAL : ret; 3595 return_error_line = __LINE__; 3596 goto err_translate_failed; 3597 } 3598 last_fixup_obj_off = parent_offset; 3599 last_fixup_min_off = 3600 fda->parent_offset + sizeof(u32) * fda->num_fds; 3601 } break; 3602 case BINDER_TYPE_PTR: { 3603 struct binder_buffer_object *bp = 3604 to_binder_buffer_object(hdr); 3605 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3606 size_t num_valid; 3607 3608 if (bp->length > buf_left) { 3609 binder_user_error("%d:%d got transaction with too large buffer\n", 3610 proc->pid, thread->pid); 3611 return_error = BR_FAILED_REPLY; 3612 return_error_param = -EINVAL; 3613 return_error_line = __LINE__; 3614 goto err_bad_offset; 3615 } 3616 ret = binder_defer_copy(&sgc_head, sg_buf_offset, 3617 (const void __user *)(uintptr_t)bp->buffer, 3618 bp->length); 3619 if (ret) { 3620 binder_txn_error("%d:%d deferred copy failed\n", 3621 thread->pid, proc->pid); 3622 return_error = BR_FAILED_REPLY; 3623 return_error_param = ret; 3624 return_error_line = __LINE__; 3625 goto err_translate_failed; 3626 } 3627 /* Fixup buffer pointer to target proc address space */ 3628 bp->buffer = t->buffer->user_data + sg_buf_offset; 3629 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3630 3631 num_valid = (buffer_offset - off_start_offset) / 3632 sizeof(binder_size_t); 3633 ret = binder_fixup_parent(&pf_head, t, 3634 thread, bp, 3635 off_start_offset, 3636 num_valid, 3637 last_fixup_obj_off, 3638 last_fixup_min_off); 3639 if (ret < 0 || 3640 binder_alloc_copy_to_buffer(&target_proc->alloc, 3641 t->buffer, 3642 object_offset, 3643 bp, sizeof(*bp))) { 3644 binder_txn_error("%d:%d failed to fixup parent\n", 3645 thread->pid, proc->pid); 3646 return_error = BR_FAILED_REPLY; 3647 return_error_param = ret; 3648 return_error_line = __LINE__; 3649 goto err_translate_failed; 3650 } 3651 last_fixup_obj_off = object_offset; 3652 last_fixup_min_off = 0; 3653 } break; 3654 default: 3655 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3656 proc->pid, thread->pid, hdr->type); 3657 return_error = BR_FAILED_REPLY; 3658 return_error_param = -EINVAL; 3659 return_error_line = __LINE__; 3660 goto err_bad_object_type; 3661 } 3662 } 3663 /* Done processing objects, copy the rest of the buffer */ 3664 if (binder_alloc_copy_user_to_buffer( 3665 &target_proc->alloc, 3666 t->buffer, user_offset, 3667 user_buffer + user_offset, 3668 tr->data_size - user_offset)) { 3669 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3670 proc->pid, thread->pid); 3671 return_error = BR_FAILED_REPLY; 3672 return_error_param = -EFAULT; 3673 return_error_line = __LINE__; 3674 goto err_copy_data_failed; 3675 } 3676 3677 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, 3678 &sgc_head, &pf_head); 3679 if (ret) { 3680 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3681 proc->pid, thread->pid); 3682 return_error = BR_FAILED_REPLY; 3683 return_error_param = ret; 3684 return_error_line = __LINE__; 3685 goto err_copy_data_failed; 3686 } 3687 if (t->buffer->oneway_spam_suspect) 3688 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; 3689 else 3690 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3691 t->work.type = BINDER_WORK_TRANSACTION; 3692 3693 if (reply) { 3694 binder_enqueue_thread_work(thread, tcomplete); 3695 binder_inner_proc_lock(target_proc); 3696 if (target_thread->is_dead) { 3697 return_error = BR_DEAD_REPLY; 3698 binder_inner_proc_unlock(target_proc); 3699 goto err_dead_proc_or_thread; 3700 } 3701 BUG_ON(t->buffer->async_transaction != 0); 3702 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3703 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3704 target_proc->outstanding_txns++; 3705 binder_inner_proc_unlock(target_proc); 3706 wake_up_interruptible_sync(&target_thread->wait); 3707 binder_free_transaction(in_reply_to); 3708 } else if (!(t->flags & TF_ONE_WAY)) { 3709 BUG_ON(t->buffer->async_transaction != 0); 3710 binder_inner_proc_lock(proc); 3711 /* 3712 * Defer the TRANSACTION_COMPLETE, so we don't return to 3713 * userspace immediately; this allows the target process to 3714 * immediately start processing this transaction, reducing 3715 * latency. We will then return the TRANSACTION_COMPLETE when 3716 * the target replies (or there is an error). 3717 */ 3718 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3719 t->need_reply = 1; 3720 t->from_parent = thread->transaction_stack; 3721 thread->transaction_stack = t; 3722 binder_inner_proc_unlock(proc); 3723 return_error = binder_proc_transaction(t, 3724 target_proc, target_thread); 3725 if (return_error) { 3726 binder_inner_proc_lock(proc); 3727 binder_pop_transaction_ilocked(thread, t); 3728 binder_inner_proc_unlock(proc); 3729 goto err_dead_proc_or_thread; 3730 } 3731 } else { 3732 BUG_ON(target_node == NULL); 3733 BUG_ON(t->buffer->async_transaction != 1); 3734 return_error = binder_proc_transaction(t, target_proc, NULL); 3735 /* 3736 * Let the caller know when async transaction reaches a frozen 3737 * process and is put in a pending queue, waiting for the target 3738 * process to be unfrozen. 3739 */ 3740 if (return_error == BR_TRANSACTION_PENDING_FROZEN) 3741 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; 3742 binder_enqueue_thread_work(thread, tcomplete); 3743 if (return_error && 3744 return_error != BR_TRANSACTION_PENDING_FROZEN) 3745 goto err_dead_proc_or_thread; 3746 } 3747 if (target_thread) 3748 binder_thread_dec_tmpref(target_thread); 3749 binder_proc_dec_tmpref(target_proc); 3750 if (target_node) 3751 binder_dec_node_tmpref(target_node); 3752 /* 3753 * write barrier to synchronize with initialization 3754 * of log entry 3755 */ 3756 smp_wmb(); 3757 WRITE_ONCE(e->debug_id_done, t_debug_id); 3758 return; 3759 3760 err_dead_proc_or_thread: 3761 binder_txn_error("%d:%d dead process or thread\n", 3762 thread->pid, proc->pid); 3763 return_error_line = __LINE__; 3764 binder_dequeue_work(proc, tcomplete); 3765 err_translate_failed: 3766 err_bad_object_type: 3767 err_bad_offset: 3768 err_bad_parent: 3769 err_copy_data_failed: 3770 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); 3771 binder_free_txn_fixups(t); 3772 trace_binder_transaction_failed_buffer_release(t->buffer); 3773 binder_transaction_buffer_release(target_proc, NULL, t->buffer, 3774 buffer_offset, true); 3775 if (target_node) 3776 binder_dec_node_tmpref(target_node); 3777 target_node = NULL; 3778 t->buffer->transaction = NULL; 3779 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3780 err_binder_alloc_buf_failed: 3781 err_bad_extra_size: 3782 if (secctx) 3783 security_release_secctx(secctx, secctx_sz); 3784 err_get_secctx_failed: 3785 kfree(tcomplete); 3786 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3787 err_alloc_tcomplete_failed: 3788 if (trace_binder_txn_latency_free_enabled()) 3789 binder_txn_latency_free(t); 3790 kfree(t); 3791 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3792 err_alloc_t_failed: 3793 err_bad_todo_list: 3794 err_bad_call_stack: 3795 err_empty_call_stack: 3796 err_dead_binder: 3797 err_invalid_target_handle: 3798 if (target_node) { 3799 binder_dec_node(target_node, 1, 0); 3800 binder_dec_node_tmpref(target_node); 3801 } 3802 3803 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3804 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", 3805 proc->pid, thread->pid, reply ? "reply" : 3806 (tr->flags & TF_ONE_WAY ? "async" : "call"), 3807 target_proc ? target_proc->pid : 0, 3808 target_thread ? target_thread->pid : 0, 3809 t_debug_id, return_error, return_error_param, 3810 (u64)tr->data_size, (u64)tr->offsets_size, 3811 return_error_line); 3812 3813 if (target_thread) 3814 binder_thread_dec_tmpref(target_thread); 3815 if (target_proc) 3816 binder_proc_dec_tmpref(target_proc); 3817 3818 { 3819 struct binder_transaction_log_entry *fe; 3820 3821 e->return_error = return_error; 3822 e->return_error_param = return_error_param; 3823 e->return_error_line = return_error_line; 3824 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3825 *fe = *e; 3826 /* 3827 * write barrier to synchronize with initialization 3828 * of log entry 3829 */ 3830 smp_wmb(); 3831 WRITE_ONCE(e->debug_id_done, t_debug_id); 3832 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3833 } 3834 3835 BUG_ON(thread->return_error.cmd != BR_OK); 3836 if (in_reply_to) { 3837 binder_set_txn_from_error(in_reply_to, t_debug_id, 3838 return_error, return_error_param); 3839 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3840 binder_enqueue_thread_work(thread, &thread->return_error.work); 3841 binder_send_failed_reply(in_reply_to, return_error); 3842 } else { 3843 binder_inner_proc_lock(proc); 3844 binder_set_extended_error(&thread->ee, t_debug_id, 3845 return_error, return_error_param); 3846 binder_inner_proc_unlock(proc); 3847 thread->return_error.cmd = return_error; 3848 binder_enqueue_thread_work(thread, &thread->return_error.work); 3849 } 3850 } 3851 3852 static int 3853 binder_request_freeze_notification(struct binder_proc *proc, 3854 struct binder_thread *thread, 3855 struct binder_handle_cookie *handle_cookie) 3856 { 3857 struct binder_ref_freeze *freeze; 3858 struct binder_ref *ref; 3859 3860 freeze = kzalloc(sizeof(*freeze), GFP_KERNEL); 3861 if (!freeze) 3862 return -ENOMEM; 3863 binder_proc_lock(proc); 3864 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false); 3865 if (!ref) { 3866 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n", 3867 proc->pid, thread->pid, handle_cookie->handle); 3868 binder_proc_unlock(proc); 3869 kfree(freeze); 3870 return -EINVAL; 3871 } 3872 3873 binder_node_lock(ref->node); 3874 if (ref->freeze) { 3875 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n", 3876 proc->pid, thread->pid); 3877 binder_node_unlock(ref->node); 3878 binder_proc_unlock(proc); 3879 kfree(freeze); 3880 return -EINVAL; 3881 } 3882 3883 binder_stats_created(BINDER_STAT_FREEZE); 3884 INIT_LIST_HEAD(&freeze->work.entry); 3885 freeze->cookie = handle_cookie->cookie; 3886 freeze->work.type = BINDER_WORK_FROZEN_BINDER; 3887 ref->freeze = freeze; 3888 3889 if (ref->node->proc) { 3890 binder_inner_proc_lock(ref->node->proc); 3891 freeze->is_frozen = ref->node->proc->is_frozen; 3892 binder_inner_proc_unlock(ref->node->proc); 3893 3894 binder_inner_proc_lock(proc); 3895 binder_enqueue_work_ilocked(&freeze->work, &proc->todo); 3896 binder_wakeup_proc_ilocked(proc); 3897 binder_inner_proc_unlock(proc); 3898 } 3899 3900 binder_node_unlock(ref->node); 3901 binder_proc_unlock(proc); 3902 return 0; 3903 } 3904 3905 static int 3906 binder_clear_freeze_notification(struct binder_proc *proc, 3907 struct binder_thread *thread, 3908 struct binder_handle_cookie *handle_cookie) 3909 { 3910 struct binder_ref_freeze *freeze; 3911 struct binder_ref *ref; 3912 3913 binder_proc_lock(proc); 3914 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false); 3915 if (!ref) { 3916 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n", 3917 proc->pid, thread->pid, handle_cookie->handle); 3918 binder_proc_unlock(proc); 3919 return -EINVAL; 3920 } 3921 3922 binder_node_lock(ref->node); 3923 3924 if (!ref->freeze) { 3925 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n", 3926 proc->pid, thread->pid); 3927 binder_node_unlock(ref->node); 3928 binder_proc_unlock(proc); 3929 return -EINVAL; 3930 } 3931 freeze = ref->freeze; 3932 binder_inner_proc_lock(proc); 3933 if (freeze->cookie != handle_cookie->cookie) { 3934 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n", 3935 proc->pid, thread->pid, (u64)freeze->cookie, 3936 (u64)handle_cookie->cookie); 3937 binder_inner_proc_unlock(proc); 3938 binder_node_unlock(ref->node); 3939 binder_proc_unlock(proc); 3940 return -EINVAL; 3941 } 3942 ref->freeze = NULL; 3943 /* 3944 * Take the existing freeze object and overwrite its work type. There are three cases here: 3945 * 1. No pending notification. In this case just add the work to the queue. 3946 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we 3947 * should resend with the new work type. 3948 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing 3949 * needs to be done here. 3950 */ 3951 freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION; 3952 if (list_empty(&freeze->work.entry)) { 3953 binder_enqueue_work_ilocked(&freeze->work, &proc->todo); 3954 binder_wakeup_proc_ilocked(proc); 3955 } else if (freeze->sent) { 3956 freeze->resend = true; 3957 } 3958 binder_inner_proc_unlock(proc); 3959 binder_node_unlock(ref->node); 3960 binder_proc_unlock(proc); 3961 return 0; 3962 } 3963 3964 static int 3965 binder_freeze_notification_done(struct binder_proc *proc, 3966 struct binder_thread *thread, 3967 binder_uintptr_t cookie) 3968 { 3969 struct binder_ref_freeze *freeze = NULL; 3970 struct binder_work *w; 3971 3972 binder_inner_proc_lock(proc); 3973 list_for_each_entry(w, &proc->delivered_freeze, entry) { 3974 struct binder_ref_freeze *tmp_freeze = 3975 container_of(w, struct binder_ref_freeze, work); 3976 3977 if (tmp_freeze->cookie == cookie) { 3978 freeze = tmp_freeze; 3979 break; 3980 } 3981 } 3982 if (!freeze) { 3983 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n", 3984 proc->pid, thread->pid, (u64)cookie); 3985 binder_inner_proc_unlock(proc); 3986 return -EINVAL; 3987 } 3988 binder_dequeue_work_ilocked(&freeze->work); 3989 freeze->sent = false; 3990 if (freeze->resend) { 3991 freeze->resend = false; 3992 binder_enqueue_work_ilocked(&freeze->work, &proc->todo); 3993 binder_wakeup_proc_ilocked(proc); 3994 } 3995 binder_inner_proc_unlock(proc); 3996 return 0; 3997 } 3998 3999 /** 4000 * binder_free_buf() - free the specified buffer 4001 * @proc: binder proc that owns buffer 4002 * @buffer: buffer to be freed 4003 * @is_failure: failed to send transaction 4004 * 4005 * If buffer for an async transaction, enqueue the next async 4006 * transaction from the node. 4007 * 4008 * Cleanup buffer and free it. 4009 */ 4010 static void 4011 binder_free_buf(struct binder_proc *proc, 4012 struct binder_thread *thread, 4013 struct binder_buffer *buffer, bool is_failure) 4014 { 4015 binder_inner_proc_lock(proc); 4016 if (buffer->transaction) { 4017 buffer->transaction->buffer = NULL; 4018 buffer->transaction = NULL; 4019 } 4020 binder_inner_proc_unlock(proc); 4021 if (buffer->async_transaction && buffer->target_node) { 4022 struct binder_node *buf_node; 4023 struct binder_work *w; 4024 4025 buf_node = buffer->target_node; 4026 binder_node_inner_lock(buf_node); 4027 BUG_ON(!buf_node->has_async_transaction); 4028 BUG_ON(buf_node->proc != proc); 4029 w = binder_dequeue_work_head_ilocked( 4030 &buf_node->async_todo); 4031 if (!w) { 4032 buf_node->has_async_transaction = false; 4033 } else { 4034 binder_enqueue_work_ilocked( 4035 w, &proc->todo); 4036 binder_wakeup_proc_ilocked(proc); 4037 } 4038 binder_node_inner_unlock(buf_node); 4039 } 4040 trace_binder_transaction_buffer_release(buffer); 4041 binder_release_entire_buffer(proc, thread, buffer, is_failure); 4042 binder_alloc_free_buf(&proc->alloc, buffer); 4043 } 4044 4045 static int binder_thread_write(struct binder_proc *proc, 4046 struct binder_thread *thread, 4047 binder_uintptr_t binder_buffer, size_t size, 4048 binder_size_t *consumed) 4049 { 4050 uint32_t cmd; 4051 struct binder_context *context = proc->context; 4052 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4053 void __user *ptr = buffer + *consumed; 4054 void __user *end = buffer + size; 4055 4056 while (ptr < end && thread->return_error.cmd == BR_OK) { 4057 int ret; 4058 4059 if (get_user(cmd, (uint32_t __user *)ptr)) 4060 return -EFAULT; 4061 ptr += sizeof(uint32_t); 4062 trace_binder_command(cmd); 4063 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 4064 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 4065 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 4066 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 4067 } 4068 switch (cmd) { 4069 case BC_INCREFS: 4070 case BC_ACQUIRE: 4071 case BC_RELEASE: 4072 case BC_DECREFS: { 4073 uint32_t target; 4074 const char *debug_string; 4075 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 4076 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 4077 struct binder_ref_data rdata; 4078 4079 if (get_user(target, (uint32_t __user *)ptr)) 4080 return -EFAULT; 4081 4082 ptr += sizeof(uint32_t); 4083 ret = -1; 4084 if (increment && !target) { 4085 struct binder_node *ctx_mgr_node; 4086 4087 mutex_lock(&context->context_mgr_node_lock); 4088 ctx_mgr_node = context->binder_context_mgr_node; 4089 if (ctx_mgr_node) { 4090 if (ctx_mgr_node->proc == proc) { 4091 binder_user_error("%d:%d context manager tried to acquire desc 0\n", 4092 proc->pid, thread->pid); 4093 mutex_unlock(&context->context_mgr_node_lock); 4094 return -EINVAL; 4095 } 4096 ret = binder_inc_ref_for_node( 4097 proc, ctx_mgr_node, 4098 strong, NULL, &rdata); 4099 } 4100 mutex_unlock(&context->context_mgr_node_lock); 4101 } 4102 if (ret) 4103 ret = binder_update_ref_for_handle( 4104 proc, target, increment, strong, 4105 &rdata); 4106 if (!ret && rdata.desc != target) { 4107 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 4108 proc->pid, thread->pid, 4109 target, rdata.desc); 4110 } 4111 switch (cmd) { 4112 case BC_INCREFS: 4113 debug_string = "IncRefs"; 4114 break; 4115 case BC_ACQUIRE: 4116 debug_string = "Acquire"; 4117 break; 4118 case BC_RELEASE: 4119 debug_string = "Release"; 4120 break; 4121 case BC_DECREFS: 4122 default: 4123 debug_string = "DecRefs"; 4124 break; 4125 } 4126 if (ret) { 4127 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 4128 proc->pid, thread->pid, debug_string, 4129 strong, target, ret); 4130 break; 4131 } 4132 binder_debug(BINDER_DEBUG_USER_REFS, 4133 "%d:%d %s ref %d desc %d s %d w %d\n", 4134 proc->pid, thread->pid, debug_string, 4135 rdata.debug_id, rdata.desc, rdata.strong, 4136 rdata.weak); 4137 break; 4138 } 4139 case BC_INCREFS_DONE: 4140 case BC_ACQUIRE_DONE: { 4141 binder_uintptr_t node_ptr; 4142 binder_uintptr_t cookie; 4143 struct binder_node *node; 4144 bool free_node; 4145 4146 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4147 return -EFAULT; 4148 ptr += sizeof(binder_uintptr_t); 4149 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4150 return -EFAULT; 4151 ptr += sizeof(binder_uintptr_t); 4152 node = binder_get_node(proc, node_ptr); 4153 if (node == NULL) { 4154 binder_user_error("%d:%d %s u%016llx no match\n", 4155 proc->pid, thread->pid, 4156 cmd == BC_INCREFS_DONE ? 4157 "BC_INCREFS_DONE" : 4158 "BC_ACQUIRE_DONE", 4159 (u64)node_ptr); 4160 break; 4161 } 4162 if (cookie != node->cookie) { 4163 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 4164 proc->pid, thread->pid, 4165 cmd == BC_INCREFS_DONE ? 4166 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 4167 (u64)node_ptr, node->debug_id, 4168 (u64)cookie, (u64)node->cookie); 4169 binder_put_node(node); 4170 break; 4171 } 4172 binder_node_inner_lock(node); 4173 if (cmd == BC_ACQUIRE_DONE) { 4174 if (node->pending_strong_ref == 0) { 4175 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 4176 proc->pid, thread->pid, 4177 node->debug_id); 4178 binder_node_inner_unlock(node); 4179 binder_put_node(node); 4180 break; 4181 } 4182 node->pending_strong_ref = 0; 4183 } else { 4184 if (node->pending_weak_ref == 0) { 4185 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 4186 proc->pid, thread->pid, 4187 node->debug_id); 4188 binder_node_inner_unlock(node); 4189 binder_put_node(node); 4190 break; 4191 } 4192 node->pending_weak_ref = 0; 4193 } 4194 free_node = binder_dec_node_nilocked(node, 4195 cmd == BC_ACQUIRE_DONE, 0); 4196 WARN_ON(free_node); 4197 binder_debug(BINDER_DEBUG_USER_REFS, 4198 "%d:%d %s node %d ls %d lw %d tr %d\n", 4199 proc->pid, thread->pid, 4200 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 4201 node->debug_id, node->local_strong_refs, 4202 node->local_weak_refs, node->tmp_refs); 4203 binder_node_inner_unlock(node); 4204 binder_put_node(node); 4205 break; 4206 } 4207 case BC_ATTEMPT_ACQUIRE: 4208 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 4209 return -EINVAL; 4210 case BC_ACQUIRE_RESULT: 4211 pr_err("BC_ACQUIRE_RESULT not supported\n"); 4212 return -EINVAL; 4213 4214 case BC_FREE_BUFFER: { 4215 binder_uintptr_t data_ptr; 4216 struct binder_buffer *buffer; 4217 4218 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 4219 return -EFAULT; 4220 ptr += sizeof(binder_uintptr_t); 4221 4222 buffer = binder_alloc_prepare_to_free(&proc->alloc, 4223 data_ptr); 4224 if (IS_ERR_OR_NULL(buffer)) { 4225 if (PTR_ERR(buffer) == -EPERM) { 4226 binder_user_error( 4227 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 4228 proc->pid, thread->pid, 4229 (u64)data_ptr); 4230 } else { 4231 binder_user_error( 4232 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 4233 proc->pid, thread->pid, 4234 (u64)data_ptr); 4235 } 4236 break; 4237 } 4238 binder_debug(BINDER_DEBUG_FREE_BUFFER, 4239 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 4240 proc->pid, thread->pid, (u64)data_ptr, 4241 buffer->debug_id, 4242 buffer->transaction ? "active" : "finished"); 4243 binder_free_buf(proc, thread, buffer, false); 4244 break; 4245 } 4246 4247 case BC_TRANSACTION_SG: 4248 case BC_REPLY_SG: { 4249 struct binder_transaction_data_sg tr; 4250 4251 if (copy_from_user(&tr, ptr, sizeof(tr))) 4252 return -EFAULT; 4253 ptr += sizeof(tr); 4254 binder_transaction(proc, thread, &tr.transaction_data, 4255 cmd == BC_REPLY_SG, tr.buffers_size); 4256 break; 4257 } 4258 case BC_TRANSACTION: 4259 case BC_REPLY: { 4260 struct binder_transaction_data tr; 4261 4262 if (copy_from_user(&tr, ptr, sizeof(tr))) 4263 return -EFAULT; 4264 ptr += sizeof(tr); 4265 binder_transaction(proc, thread, &tr, 4266 cmd == BC_REPLY, 0); 4267 break; 4268 } 4269 4270 case BC_REGISTER_LOOPER: 4271 binder_debug(BINDER_DEBUG_THREADS, 4272 "%d:%d BC_REGISTER_LOOPER\n", 4273 proc->pid, thread->pid); 4274 binder_inner_proc_lock(proc); 4275 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 4276 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4277 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 4278 proc->pid, thread->pid); 4279 } else if (proc->requested_threads == 0) { 4280 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4281 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 4282 proc->pid, thread->pid); 4283 } else { 4284 proc->requested_threads--; 4285 proc->requested_threads_started++; 4286 } 4287 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 4288 binder_inner_proc_unlock(proc); 4289 break; 4290 case BC_ENTER_LOOPER: 4291 binder_debug(BINDER_DEBUG_THREADS, 4292 "%d:%d BC_ENTER_LOOPER\n", 4293 proc->pid, thread->pid); 4294 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 4295 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4296 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 4297 proc->pid, thread->pid); 4298 } 4299 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 4300 break; 4301 case BC_EXIT_LOOPER: 4302 binder_debug(BINDER_DEBUG_THREADS, 4303 "%d:%d BC_EXIT_LOOPER\n", 4304 proc->pid, thread->pid); 4305 thread->looper |= BINDER_LOOPER_STATE_EXITED; 4306 break; 4307 4308 case BC_REQUEST_DEATH_NOTIFICATION: 4309 case BC_CLEAR_DEATH_NOTIFICATION: { 4310 uint32_t target; 4311 binder_uintptr_t cookie; 4312 struct binder_ref *ref; 4313 struct binder_ref_death *death = NULL; 4314 4315 if (get_user(target, (uint32_t __user *)ptr)) 4316 return -EFAULT; 4317 ptr += sizeof(uint32_t); 4318 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4319 return -EFAULT; 4320 ptr += sizeof(binder_uintptr_t); 4321 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 4322 /* 4323 * Allocate memory for death notification 4324 * before taking lock 4325 */ 4326 death = kzalloc(sizeof(*death), GFP_KERNEL); 4327 if (death == NULL) { 4328 WARN_ON(thread->return_error.cmd != 4329 BR_OK); 4330 thread->return_error.cmd = BR_ERROR; 4331 binder_enqueue_thread_work( 4332 thread, 4333 &thread->return_error.work); 4334 binder_debug( 4335 BINDER_DEBUG_FAILED_TRANSACTION, 4336 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 4337 proc->pid, thread->pid); 4338 break; 4339 } 4340 } 4341 binder_proc_lock(proc); 4342 ref = binder_get_ref_olocked(proc, target, false); 4343 if (ref == NULL) { 4344 binder_user_error("%d:%d %s invalid ref %d\n", 4345 proc->pid, thread->pid, 4346 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 4347 "BC_REQUEST_DEATH_NOTIFICATION" : 4348 "BC_CLEAR_DEATH_NOTIFICATION", 4349 target); 4350 binder_proc_unlock(proc); 4351 kfree(death); 4352 break; 4353 } 4354 4355 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4356 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 4357 proc->pid, thread->pid, 4358 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 4359 "BC_REQUEST_DEATH_NOTIFICATION" : 4360 "BC_CLEAR_DEATH_NOTIFICATION", 4361 (u64)cookie, ref->data.debug_id, 4362 ref->data.desc, ref->data.strong, 4363 ref->data.weak, ref->node->debug_id); 4364 4365 binder_node_lock(ref->node); 4366 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 4367 if (ref->death) { 4368 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 4369 proc->pid, thread->pid); 4370 binder_node_unlock(ref->node); 4371 binder_proc_unlock(proc); 4372 kfree(death); 4373 break; 4374 } 4375 binder_stats_created(BINDER_STAT_DEATH); 4376 INIT_LIST_HEAD(&death->work.entry); 4377 death->cookie = cookie; 4378 ref->death = death; 4379 if (ref->node->proc == NULL) { 4380 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4381 4382 binder_inner_proc_lock(proc); 4383 binder_enqueue_work_ilocked( 4384 &ref->death->work, &proc->todo); 4385 binder_wakeup_proc_ilocked(proc); 4386 binder_inner_proc_unlock(proc); 4387 } 4388 } else { 4389 if (ref->death == NULL) { 4390 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 4391 proc->pid, thread->pid); 4392 binder_node_unlock(ref->node); 4393 binder_proc_unlock(proc); 4394 break; 4395 } 4396 death = ref->death; 4397 if (death->cookie != cookie) { 4398 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 4399 proc->pid, thread->pid, 4400 (u64)death->cookie, 4401 (u64)cookie); 4402 binder_node_unlock(ref->node); 4403 binder_proc_unlock(proc); 4404 break; 4405 } 4406 ref->death = NULL; 4407 binder_inner_proc_lock(proc); 4408 if (list_empty(&death->work.entry)) { 4409 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4410 if (thread->looper & 4411 (BINDER_LOOPER_STATE_REGISTERED | 4412 BINDER_LOOPER_STATE_ENTERED)) 4413 binder_enqueue_thread_work_ilocked( 4414 thread, 4415 &death->work); 4416 else { 4417 binder_enqueue_work_ilocked( 4418 &death->work, 4419 &proc->todo); 4420 binder_wakeup_proc_ilocked( 4421 proc); 4422 } 4423 } else { 4424 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 4425 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 4426 } 4427 binder_inner_proc_unlock(proc); 4428 } 4429 binder_node_unlock(ref->node); 4430 binder_proc_unlock(proc); 4431 } break; 4432 case BC_DEAD_BINDER_DONE: { 4433 struct binder_work *w; 4434 binder_uintptr_t cookie; 4435 struct binder_ref_death *death = NULL; 4436 4437 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4438 return -EFAULT; 4439 4440 ptr += sizeof(cookie); 4441 binder_inner_proc_lock(proc); 4442 list_for_each_entry(w, &proc->delivered_death, 4443 entry) { 4444 struct binder_ref_death *tmp_death = 4445 container_of(w, 4446 struct binder_ref_death, 4447 work); 4448 4449 if (tmp_death->cookie == cookie) { 4450 death = tmp_death; 4451 break; 4452 } 4453 } 4454 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4455 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 4456 proc->pid, thread->pid, (u64)cookie, 4457 death); 4458 if (death == NULL) { 4459 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 4460 proc->pid, thread->pid, (u64)cookie); 4461 binder_inner_proc_unlock(proc); 4462 break; 4463 } 4464 binder_dequeue_work_ilocked(&death->work); 4465 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 4466 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4467 if (thread->looper & 4468 (BINDER_LOOPER_STATE_REGISTERED | 4469 BINDER_LOOPER_STATE_ENTERED)) 4470 binder_enqueue_thread_work_ilocked( 4471 thread, &death->work); 4472 else { 4473 binder_enqueue_work_ilocked( 4474 &death->work, 4475 &proc->todo); 4476 binder_wakeup_proc_ilocked(proc); 4477 } 4478 } 4479 binder_inner_proc_unlock(proc); 4480 } break; 4481 4482 case BC_REQUEST_FREEZE_NOTIFICATION: { 4483 struct binder_handle_cookie handle_cookie; 4484 int error; 4485 4486 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie))) 4487 return -EFAULT; 4488 ptr += sizeof(handle_cookie); 4489 error = binder_request_freeze_notification(proc, thread, 4490 &handle_cookie); 4491 if (error) 4492 return error; 4493 } break; 4494 4495 case BC_CLEAR_FREEZE_NOTIFICATION: { 4496 struct binder_handle_cookie handle_cookie; 4497 int error; 4498 4499 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie))) 4500 return -EFAULT; 4501 ptr += sizeof(handle_cookie); 4502 error = binder_clear_freeze_notification(proc, thread, &handle_cookie); 4503 if (error) 4504 return error; 4505 } break; 4506 4507 case BC_FREEZE_NOTIFICATION_DONE: { 4508 binder_uintptr_t cookie; 4509 int error; 4510 4511 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4512 return -EFAULT; 4513 4514 ptr += sizeof(cookie); 4515 error = binder_freeze_notification_done(proc, thread, cookie); 4516 if (error) 4517 return error; 4518 } break; 4519 4520 default: 4521 pr_err("%d:%d unknown command %u\n", 4522 proc->pid, thread->pid, cmd); 4523 return -EINVAL; 4524 } 4525 *consumed = ptr - buffer; 4526 } 4527 return 0; 4528 } 4529 4530 static void binder_stat_br(struct binder_proc *proc, 4531 struct binder_thread *thread, uint32_t cmd) 4532 { 4533 trace_binder_return(cmd); 4534 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4535 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4536 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4537 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4538 } 4539 } 4540 4541 static int binder_put_node_cmd(struct binder_proc *proc, 4542 struct binder_thread *thread, 4543 void __user **ptrp, 4544 binder_uintptr_t node_ptr, 4545 binder_uintptr_t node_cookie, 4546 int node_debug_id, 4547 uint32_t cmd, const char *cmd_name) 4548 { 4549 void __user *ptr = *ptrp; 4550 4551 if (put_user(cmd, (uint32_t __user *)ptr)) 4552 return -EFAULT; 4553 ptr += sizeof(uint32_t); 4554 4555 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4556 return -EFAULT; 4557 ptr += sizeof(binder_uintptr_t); 4558 4559 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4560 return -EFAULT; 4561 ptr += sizeof(binder_uintptr_t); 4562 4563 binder_stat_br(proc, thread, cmd); 4564 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4565 proc->pid, thread->pid, cmd_name, node_debug_id, 4566 (u64)node_ptr, (u64)node_cookie); 4567 4568 *ptrp = ptr; 4569 return 0; 4570 } 4571 4572 static int binder_wait_for_work(struct binder_thread *thread, 4573 bool do_proc_work) 4574 { 4575 DEFINE_WAIT(wait); 4576 struct binder_proc *proc = thread->proc; 4577 int ret = 0; 4578 4579 binder_inner_proc_lock(proc); 4580 for (;;) { 4581 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); 4582 if (binder_has_work_ilocked(thread, do_proc_work)) 4583 break; 4584 if (do_proc_work) 4585 list_add(&thread->waiting_thread_node, 4586 &proc->waiting_threads); 4587 binder_inner_proc_unlock(proc); 4588 schedule(); 4589 binder_inner_proc_lock(proc); 4590 list_del_init(&thread->waiting_thread_node); 4591 if (signal_pending(current)) { 4592 ret = -EINTR; 4593 break; 4594 } 4595 } 4596 finish_wait(&thread->wait, &wait); 4597 binder_inner_proc_unlock(proc); 4598 4599 return ret; 4600 } 4601 4602 /** 4603 * binder_apply_fd_fixups() - finish fd translation 4604 * @proc: binder_proc associated @t->buffer 4605 * @t: binder transaction with list of fd fixups 4606 * 4607 * Now that we are in the context of the transaction target 4608 * process, we can allocate and install fds. Process the 4609 * list of fds to translate and fixup the buffer with the 4610 * new fds first and only then install the files. 4611 * 4612 * If we fail to allocate an fd, skip the install and release 4613 * any fds that have already been allocated. 4614 */ 4615 static int binder_apply_fd_fixups(struct binder_proc *proc, 4616 struct binder_transaction *t) 4617 { 4618 struct binder_txn_fd_fixup *fixup, *tmp; 4619 int ret = 0; 4620 4621 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4622 int fd = get_unused_fd_flags(O_CLOEXEC); 4623 4624 if (fd < 0) { 4625 binder_debug(BINDER_DEBUG_TRANSACTION, 4626 "failed fd fixup txn %d fd %d\n", 4627 t->debug_id, fd); 4628 ret = -ENOMEM; 4629 goto err; 4630 } 4631 binder_debug(BINDER_DEBUG_TRANSACTION, 4632 "fd fixup txn %d fd %d\n", 4633 t->debug_id, fd); 4634 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4635 fixup->target_fd = fd; 4636 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4637 fixup->offset, &fd, 4638 sizeof(u32))) { 4639 ret = -EINVAL; 4640 goto err; 4641 } 4642 } 4643 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4644 fd_install(fixup->target_fd, fixup->file); 4645 list_del(&fixup->fixup_entry); 4646 kfree(fixup); 4647 } 4648 4649 return ret; 4650 4651 err: 4652 binder_free_txn_fixups(t); 4653 return ret; 4654 } 4655 4656 static int binder_thread_read(struct binder_proc *proc, 4657 struct binder_thread *thread, 4658 binder_uintptr_t binder_buffer, size_t size, 4659 binder_size_t *consumed, int non_block) 4660 { 4661 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4662 void __user *ptr = buffer + *consumed; 4663 void __user *end = buffer + size; 4664 4665 int ret = 0; 4666 int wait_for_proc_work; 4667 4668 if (*consumed == 0) { 4669 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4670 return -EFAULT; 4671 ptr += sizeof(uint32_t); 4672 } 4673 4674 retry: 4675 binder_inner_proc_lock(proc); 4676 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4677 binder_inner_proc_unlock(proc); 4678 4679 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4680 4681 trace_binder_wait_for_work(wait_for_proc_work, 4682 !!thread->transaction_stack, 4683 !binder_worklist_empty(proc, &thread->todo)); 4684 if (wait_for_proc_work) { 4685 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4686 BINDER_LOOPER_STATE_ENTERED))) { 4687 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4688 proc->pid, thread->pid, thread->looper); 4689 wait_event_interruptible(binder_user_error_wait, 4690 binder_stop_on_user_error < 2); 4691 } 4692 binder_set_nice(proc->default_priority); 4693 } 4694 4695 if (non_block) { 4696 if (!binder_has_work(thread, wait_for_proc_work)) 4697 ret = -EAGAIN; 4698 } else { 4699 ret = binder_wait_for_work(thread, wait_for_proc_work); 4700 } 4701 4702 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4703 4704 if (ret) 4705 return ret; 4706 4707 while (1) { 4708 uint32_t cmd; 4709 struct binder_transaction_data_secctx tr; 4710 struct binder_transaction_data *trd = &tr.transaction_data; 4711 struct binder_work *w = NULL; 4712 struct list_head *list = NULL; 4713 struct binder_transaction *t = NULL; 4714 struct binder_thread *t_from; 4715 size_t trsize = sizeof(*trd); 4716 4717 binder_inner_proc_lock(proc); 4718 if (!binder_worklist_empty_ilocked(&thread->todo)) 4719 list = &thread->todo; 4720 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4721 wait_for_proc_work) 4722 list = &proc->todo; 4723 else { 4724 binder_inner_proc_unlock(proc); 4725 4726 /* no data added */ 4727 if (ptr - buffer == 4 && !thread->looper_need_return) 4728 goto retry; 4729 break; 4730 } 4731 4732 if (end - ptr < sizeof(tr) + 4) { 4733 binder_inner_proc_unlock(proc); 4734 break; 4735 } 4736 w = binder_dequeue_work_head_ilocked(list); 4737 if (binder_worklist_empty_ilocked(&thread->todo)) 4738 thread->process_todo = false; 4739 4740 switch (w->type) { 4741 case BINDER_WORK_TRANSACTION: { 4742 binder_inner_proc_unlock(proc); 4743 t = container_of(w, struct binder_transaction, work); 4744 } break; 4745 case BINDER_WORK_RETURN_ERROR: { 4746 struct binder_error *e = container_of( 4747 w, struct binder_error, work); 4748 4749 WARN_ON(e->cmd == BR_OK); 4750 binder_inner_proc_unlock(proc); 4751 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4752 return -EFAULT; 4753 cmd = e->cmd; 4754 e->cmd = BR_OK; 4755 ptr += sizeof(uint32_t); 4756 4757 binder_stat_br(proc, thread, cmd); 4758 } break; 4759 case BINDER_WORK_TRANSACTION_COMPLETE: 4760 case BINDER_WORK_TRANSACTION_PENDING: 4761 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { 4762 if (proc->oneway_spam_detection_enabled && 4763 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) 4764 cmd = BR_ONEWAY_SPAM_SUSPECT; 4765 else if (w->type == BINDER_WORK_TRANSACTION_PENDING) 4766 cmd = BR_TRANSACTION_PENDING_FROZEN; 4767 else 4768 cmd = BR_TRANSACTION_COMPLETE; 4769 binder_inner_proc_unlock(proc); 4770 kfree(w); 4771 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4772 if (put_user(cmd, (uint32_t __user *)ptr)) 4773 return -EFAULT; 4774 ptr += sizeof(uint32_t); 4775 4776 binder_stat_br(proc, thread, cmd); 4777 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4778 "%d:%d BR_TRANSACTION_COMPLETE\n", 4779 proc->pid, thread->pid); 4780 } break; 4781 case BINDER_WORK_NODE: { 4782 struct binder_node *node = container_of(w, struct binder_node, work); 4783 int strong, weak; 4784 binder_uintptr_t node_ptr = node->ptr; 4785 binder_uintptr_t node_cookie = node->cookie; 4786 int node_debug_id = node->debug_id; 4787 int has_weak_ref; 4788 int has_strong_ref; 4789 void __user *orig_ptr = ptr; 4790 4791 BUG_ON(proc != node->proc); 4792 strong = node->internal_strong_refs || 4793 node->local_strong_refs; 4794 weak = !hlist_empty(&node->refs) || 4795 node->local_weak_refs || 4796 node->tmp_refs || strong; 4797 has_strong_ref = node->has_strong_ref; 4798 has_weak_ref = node->has_weak_ref; 4799 4800 if (weak && !has_weak_ref) { 4801 node->has_weak_ref = 1; 4802 node->pending_weak_ref = 1; 4803 node->local_weak_refs++; 4804 } 4805 if (strong && !has_strong_ref) { 4806 node->has_strong_ref = 1; 4807 node->pending_strong_ref = 1; 4808 node->local_strong_refs++; 4809 } 4810 if (!strong && has_strong_ref) 4811 node->has_strong_ref = 0; 4812 if (!weak && has_weak_ref) 4813 node->has_weak_ref = 0; 4814 if (!weak && !strong) { 4815 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4816 "%d:%d node %d u%016llx c%016llx deleted\n", 4817 proc->pid, thread->pid, 4818 node_debug_id, 4819 (u64)node_ptr, 4820 (u64)node_cookie); 4821 rb_erase(&node->rb_node, &proc->nodes); 4822 binder_inner_proc_unlock(proc); 4823 binder_node_lock(node); 4824 /* 4825 * Acquire the node lock before freeing the 4826 * node to serialize with other threads that 4827 * may have been holding the node lock while 4828 * decrementing this node (avoids race where 4829 * this thread frees while the other thread 4830 * is unlocking the node after the final 4831 * decrement) 4832 */ 4833 binder_node_unlock(node); 4834 binder_free_node(node); 4835 } else 4836 binder_inner_proc_unlock(proc); 4837 4838 if (weak && !has_weak_ref) 4839 ret = binder_put_node_cmd( 4840 proc, thread, &ptr, node_ptr, 4841 node_cookie, node_debug_id, 4842 BR_INCREFS, "BR_INCREFS"); 4843 if (!ret && strong && !has_strong_ref) 4844 ret = binder_put_node_cmd( 4845 proc, thread, &ptr, node_ptr, 4846 node_cookie, node_debug_id, 4847 BR_ACQUIRE, "BR_ACQUIRE"); 4848 if (!ret && !strong && has_strong_ref) 4849 ret = binder_put_node_cmd( 4850 proc, thread, &ptr, node_ptr, 4851 node_cookie, node_debug_id, 4852 BR_RELEASE, "BR_RELEASE"); 4853 if (!ret && !weak && has_weak_ref) 4854 ret = binder_put_node_cmd( 4855 proc, thread, &ptr, node_ptr, 4856 node_cookie, node_debug_id, 4857 BR_DECREFS, "BR_DECREFS"); 4858 if (orig_ptr == ptr) 4859 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4860 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4861 proc->pid, thread->pid, 4862 node_debug_id, 4863 (u64)node_ptr, 4864 (u64)node_cookie); 4865 if (ret) 4866 return ret; 4867 } break; 4868 case BINDER_WORK_DEAD_BINDER: 4869 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4870 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4871 struct binder_ref_death *death; 4872 uint32_t cmd; 4873 binder_uintptr_t cookie; 4874 4875 death = container_of(w, struct binder_ref_death, work); 4876 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4877 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4878 else 4879 cmd = BR_DEAD_BINDER; 4880 cookie = death->cookie; 4881 4882 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4883 "%d:%d %s %016llx\n", 4884 proc->pid, thread->pid, 4885 cmd == BR_DEAD_BINDER ? 4886 "BR_DEAD_BINDER" : 4887 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4888 (u64)cookie); 4889 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4890 binder_inner_proc_unlock(proc); 4891 kfree(death); 4892 binder_stats_deleted(BINDER_STAT_DEATH); 4893 } else { 4894 binder_enqueue_work_ilocked( 4895 w, &proc->delivered_death); 4896 binder_inner_proc_unlock(proc); 4897 } 4898 if (put_user(cmd, (uint32_t __user *)ptr)) 4899 return -EFAULT; 4900 ptr += sizeof(uint32_t); 4901 if (put_user(cookie, 4902 (binder_uintptr_t __user *)ptr)) 4903 return -EFAULT; 4904 ptr += sizeof(binder_uintptr_t); 4905 binder_stat_br(proc, thread, cmd); 4906 if (cmd == BR_DEAD_BINDER) 4907 goto done; /* DEAD_BINDER notifications can cause transactions */ 4908 } break; 4909 4910 case BINDER_WORK_FROZEN_BINDER: { 4911 struct binder_ref_freeze *freeze; 4912 struct binder_frozen_state_info info; 4913 4914 memset(&info, 0, sizeof(info)); 4915 freeze = container_of(w, struct binder_ref_freeze, work); 4916 info.is_frozen = freeze->is_frozen; 4917 info.cookie = freeze->cookie; 4918 freeze->sent = true; 4919 binder_enqueue_work_ilocked(w, &proc->delivered_freeze); 4920 binder_inner_proc_unlock(proc); 4921 4922 if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr)) 4923 return -EFAULT; 4924 ptr += sizeof(uint32_t); 4925 if (copy_to_user(ptr, &info, sizeof(info))) 4926 return -EFAULT; 4927 ptr += sizeof(info); 4928 binder_stat_br(proc, thread, BR_FROZEN_BINDER); 4929 goto done; /* BR_FROZEN_BINDER notifications can cause transactions */ 4930 } break; 4931 4932 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: { 4933 struct binder_ref_freeze *freeze = 4934 container_of(w, struct binder_ref_freeze, work); 4935 binder_uintptr_t cookie = freeze->cookie; 4936 4937 binder_inner_proc_unlock(proc); 4938 kfree(freeze); 4939 binder_stats_deleted(BINDER_STAT_FREEZE); 4940 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr)) 4941 return -EFAULT; 4942 ptr += sizeof(uint32_t); 4943 if (put_user(cookie, (binder_uintptr_t __user *)ptr)) 4944 return -EFAULT; 4945 ptr += sizeof(binder_uintptr_t); 4946 binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE); 4947 } break; 4948 4949 default: 4950 binder_inner_proc_unlock(proc); 4951 pr_err("%d:%d: bad work type %d\n", 4952 proc->pid, thread->pid, w->type); 4953 break; 4954 } 4955 4956 if (!t) 4957 continue; 4958 4959 BUG_ON(t->buffer == NULL); 4960 if (t->buffer->target_node) { 4961 struct binder_node *target_node = t->buffer->target_node; 4962 4963 trd->target.ptr = target_node->ptr; 4964 trd->cookie = target_node->cookie; 4965 t->saved_priority = task_nice(current); 4966 if (t->priority < target_node->min_priority && 4967 !(t->flags & TF_ONE_WAY)) 4968 binder_set_nice(t->priority); 4969 else if (!(t->flags & TF_ONE_WAY) || 4970 t->saved_priority > target_node->min_priority) 4971 binder_set_nice(target_node->min_priority); 4972 cmd = BR_TRANSACTION; 4973 } else { 4974 trd->target.ptr = 0; 4975 trd->cookie = 0; 4976 cmd = BR_REPLY; 4977 } 4978 trd->code = t->code; 4979 trd->flags = t->flags; 4980 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4981 4982 t_from = binder_get_txn_from(t); 4983 if (t_from) { 4984 struct task_struct *sender = t_from->proc->tsk; 4985 4986 trd->sender_pid = 4987 task_tgid_nr_ns(sender, 4988 task_active_pid_ns(current)); 4989 } else { 4990 trd->sender_pid = 0; 4991 } 4992 4993 ret = binder_apply_fd_fixups(proc, t); 4994 if (ret) { 4995 struct binder_buffer *buffer = t->buffer; 4996 bool oneway = !!(t->flags & TF_ONE_WAY); 4997 int tid = t->debug_id; 4998 4999 if (t_from) 5000 binder_thread_dec_tmpref(t_from); 5001 buffer->transaction = NULL; 5002 binder_cleanup_transaction(t, "fd fixups failed", 5003 BR_FAILED_REPLY); 5004 binder_free_buf(proc, thread, buffer, true); 5005 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 5006 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 5007 proc->pid, thread->pid, 5008 oneway ? "async " : 5009 (cmd == BR_REPLY ? "reply " : ""), 5010 tid, BR_FAILED_REPLY, ret, __LINE__); 5011 if (cmd == BR_REPLY) { 5012 cmd = BR_FAILED_REPLY; 5013 if (put_user(cmd, (uint32_t __user *)ptr)) 5014 return -EFAULT; 5015 ptr += sizeof(uint32_t); 5016 binder_stat_br(proc, thread, cmd); 5017 break; 5018 } 5019 continue; 5020 } 5021 trd->data_size = t->buffer->data_size; 5022 trd->offsets_size = t->buffer->offsets_size; 5023 trd->data.ptr.buffer = t->buffer->user_data; 5024 trd->data.ptr.offsets = trd->data.ptr.buffer + 5025 ALIGN(t->buffer->data_size, 5026 sizeof(void *)); 5027 5028 tr.secctx = t->security_ctx; 5029 if (t->security_ctx) { 5030 cmd = BR_TRANSACTION_SEC_CTX; 5031 trsize = sizeof(tr); 5032 } 5033 if (put_user(cmd, (uint32_t __user *)ptr)) { 5034 if (t_from) 5035 binder_thread_dec_tmpref(t_from); 5036 5037 binder_cleanup_transaction(t, "put_user failed", 5038 BR_FAILED_REPLY); 5039 5040 return -EFAULT; 5041 } 5042 ptr += sizeof(uint32_t); 5043 if (copy_to_user(ptr, &tr, trsize)) { 5044 if (t_from) 5045 binder_thread_dec_tmpref(t_from); 5046 5047 binder_cleanup_transaction(t, "copy_to_user failed", 5048 BR_FAILED_REPLY); 5049 5050 return -EFAULT; 5051 } 5052 ptr += trsize; 5053 5054 trace_binder_transaction_received(t); 5055 binder_stat_br(proc, thread, cmd); 5056 binder_debug(BINDER_DEBUG_TRANSACTION, 5057 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", 5058 proc->pid, thread->pid, 5059 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 5060 (cmd == BR_TRANSACTION_SEC_CTX) ? 5061 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 5062 t->debug_id, t_from ? t_from->proc->pid : 0, 5063 t_from ? t_from->pid : 0, cmd, 5064 t->buffer->data_size, t->buffer->offsets_size, 5065 (u64)trd->data.ptr.buffer, 5066 (u64)trd->data.ptr.offsets); 5067 5068 if (t_from) 5069 binder_thread_dec_tmpref(t_from); 5070 t->buffer->allow_user_free = 1; 5071 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 5072 binder_inner_proc_lock(thread->proc); 5073 t->to_parent = thread->transaction_stack; 5074 t->to_thread = thread; 5075 thread->transaction_stack = t; 5076 binder_inner_proc_unlock(thread->proc); 5077 } else { 5078 binder_free_transaction(t); 5079 } 5080 break; 5081 } 5082 5083 done: 5084 5085 *consumed = ptr - buffer; 5086 binder_inner_proc_lock(proc); 5087 if (proc->requested_threads == 0 && 5088 list_empty(&thread->proc->waiting_threads) && 5089 proc->requested_threads_started < proc->max_threads && 5090 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 5091 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 5092 /*spawn a new thread if we leave this out */) { 5093 proc->requested_threads++; 5094 binder_inner_proc_unlock(proc); 5095 binder_debug(BINDER_DEBUG_THREADS, 5096 "%d:%d BR_SPAWN_LOOPER\n", 5097 proc->pid, thread->pid); 5098 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 5099 return -EFAULT; 5100 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 5101 } else 5102 binder_inner_proc_unlock(proc); 5103 return 0; 5104 } 5105 5106 static void binder_release_work(struct binder_proc *proc, 5107 struct list_head *list) 5108 { 5109 struct binder_work *w; 5110 enum binder_work_type wtype; 5111 5112 while (1) { 5113 binder_inner_proc_lock(proc); 5114 w = binder_dequeue_work_head_ilocked(list); 5115 wtype = w ? w->type : 0; 5116 binder_inner_proc_unlock(proc); 5117 if (!w) 5118 return; 5119 5120 switch (wtype) { 5121 case BINDER_WORK_TRANSACTION: { 5122 struct binder_transaction *t; 5123 5124 t = container_of(w, struct binder_transaction, work); 5125 5126 binder_cleanup_transaction(t, "process died.", 5127 BR_DEAD_REPLY); 5128 } break; 5129 case BINDER_WORK_RETURN_ERROR: { 5130 struct binder_error *e = container_of( 5131 w, struct binder_error, work); 5132 5133 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 5134 "undelivered TRANSACTION_ERROR: %u\n", 5135 e->cmd); 5136 } break; 5137 case BINDER_WORK_TRANSACTION_PENDING: 5138 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: 5139 case BINDER_WORK_TRANSACTION_COMPLETE: { 5140 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 5141 "undelivered TRANSACTION_COMPLETE\n"); 5142 kfree(w); 5143 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 5144 } break; 5145 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5146 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 5147 struct binder_ref_death *death; 5148 5149 death = container_of(w, struct binder_ref_death, work); 5150 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 5151 "undelivered death notification, %016llx\n", 5152 (u64)death->cookie); 5153 kfree(death); 5154 binder_stats_deleted(BINDER_STAT_DEATH); 5155 } break; 5156 case BINDER_WORK_NODE: 5157 break; 5158 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: { 5159 struct binder_ref_freeze *freeze; 5160 5161 freeze = container_of(w, struct binder_ref_freeze, work); 5162 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 5163 "undelivered freeze notification, %016llx\n", 5164 (u64)freeze->cookie); 5165 kfree(freeze); 5166 binder_stats_deleted(BINDER_STAT_FREEZE); 5167 } break; 5168 default: 5169 pr_err("unexpected work type, %d, not freed\n", 5170 wtype); 5171 break; 5172 } 5173 } 5174 5175 } 5176 5177 static struct binder_thread *binder_get_thread_ilocked( 5178 struct binder_proc *proc, struct binder_thread *new_thread) 5179 { 5180 struct binder_thread *thread = NULL; 5181 struct rb_node *parent = NULL; 5182 struct rb_node **p = &proc->threads.rb_node; 5183 5184 while (*p) { 5185 parent = *p; 5186 thread = rb_entry(parent, struct binder_thread, rb_node); 5187 5188 if (current->pid < thread->pid) 5189 p = &(*p)->rb_left; 5190 else if (current->pid > thread->pid) 5191 p = &(*p)->rb_right; 5192 else 5193 return thread; 5194 } 5195 if (!new_thread) 5196 return NULL; 5197 thread = new_thread; 5198 binder_stats_created(BINDER_STAT_THREAD); 5199 thread->proc = proc; 5200 thread->pid = current->pid; 5201 atomic_set(&thread->tmp_ref, 0); 5202 init_waitqueue_head(&thread->wait); 5203 INIT_LIST_HEAD(&thread->todo); 5204 rb_link_node(&thread->rb_node, parent, p); 5205 rb_insert_color(&thread->rb_node, &proc->threads); 5206 thread->looper_need_return = true; 5207 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 5208 thread->return_error.cmd = BR_OK; 5209 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 5210 thread->reply_error.cmd = BR_OK; 5211 thread->ee.command = BR_OK; 5212 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 5213 return thread; 5214 } 5215 5216 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 5217 { 5218 struct binder_thread *thread; 5219 struct binder_thread *new_thread; 5220 5221 binder_inner_proc_lock(proc); 5222 thread = binder_get_thread_ilocked(proc, NULL); 5223 binder_inner_proc_unlock(proc); 5224 if (!thread) { 5225 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 5226 if (new_thread == NULL) 5227 return NULL; 5228 binder_inner_proc_lock(proc); 5229 thread = binder_get_thread_ilocked(proc, new_thread); 5230 binder_inner_proc_unlock(proc); 5231 if (thread != new_thread) 5232 kfree(new_thread); 5233 } 5234 return thread; 5235 } 5236 5237 static void binder_free_proc(struct binder_proc *proc) 5238 { 5239 struct binder_device *device; 5240 5241 BUG_ON(!list_empty(&proc->todo)); 5242 BUG_ON(!list_empty(&proc->delivered_death)); 5243 if (proc->outstanding_txns) 5244 pr_warn("%s: Unexpected outstanding_txns %d\n", 5245 __func__, proc->outstanding_txns); 5246 device = container_of(proc->context, struct binder_device, context); 5247 if (refcount_dec_and_test(&device->ref)) { 5248 kfree(proc->context->name); 5249 kfree(device); 5250 } 5251 binder_alloc_deferred_release(&proc->alloc); 5252 put_task_struct(proc->tsk); 5253 put_cred(proc->cred); 5254 binder_stats_deleted(BINDER_STAT_PROC); 5255 dbitmap_free(&proc->dmap); 5256 kfree(proc); 5257 } 5258 5259 static void binder_free_thread(struct binder_thread *thread) 5260 { 5261 BUG_ON(!list_empty(&thread->todo)); 5262 binder_stats_deleted(BINDER_STAT_THREAD); 5263 binder_proc_dec_tmpref(thread->proc); 5264 kfree(thread); 5265 } 5266 5267 static int binder_thread_release(struct binder_proc *proc, 5268 struct binder_thread *thread) 5269 { 5270 struct binder_transaction *t; 5271 struct binder_transaction *send_reply = NULL; 5272 int active_transactions = 0; 5273 struct binder_transaction *last_t = NULL; 5274 5275 binder_inner_proc_lock(thread->proc); 5276 /* 5277 * take a ref on the proc so it survives 5278 * after we remove this thread from proc->threads. 5279 * The corresponding dec is when we actually 5280 * free the thread in binder_free_thread() 5281 */ 5282 proc->tmp_ref++; 5283 /* 5284 * take a ref on this thread to ensure it 5285 * survives while we are releasing it 5286 */ 5287 atomic_inc(&thread->tmp_ref); 5288 rb_erase(&thread->rb_node, &proc->threads); 5289 t = thread->transaction_stack; 5290 if (t) { 5291 spin_lock(&t->lock); 5292 if (t->to_thread == thread) 5293 send_reply = t; 5294 } else { 5295 __acquire(&t->lock); 5296 } 5297 thread->is_dead = true; 5298 5299 while (t) { 5300 last_t = t; 5301 active_transactions++; 5302 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 5303 "release %d:%d transaction %d %s, still active\n", 5304 proc->pid, thread->pid, 5305 t->debug_id, 5306 (t->to_thread == thread) ? "in" : "out"); 5307 5308 if (t->to_thread == thread) { 5309 thread->proc->outstanding_txns--; 5310 t->to_proc = NULL; 5311 t->to_thread = NULL; 5312 if (t->buffer) { 5313 t->buffer->transaction = NULL; 5314 t->buffer = NULL; 5315 } 5316 t = t->to_parent; 5317 } else if (t->from == thread) { 5318 t->from = NULL; 5319 t = t->from_parent; 5320 } else 5321 BUG(); 5322 spin_unlock(&last_t->lock); 5323 if (t) 5324 spin_lock(&t->lock); 5325 else 5326 __acquire(&t->lock); 5327 } 5328 /* annotation for sparse, lock not acquired in last iteration above */ 5329 __release(&t->lock); 5330 5331 /* 5332 * If this thread used poll, make sure we remove the waitqueue from any 5333 * poll data structures holding it. 5334 */ 5335 if (thread->looper & BINDER_LOOPER_STATE_POLL) 5336 wake_up_pollfree(&thread->wait); 5337 5338 binder_inner_proc_unlock(thread->proc); 5339 5340 /* 5341 * This is needed to avoid races between wake_up_pollfree() above and 5342 * someone else removing the last entry from the queue for other reasons 5343 * (e.g. ep_remove_wait_queue() being called due to an epoll file 5344 * descriptor being closed). Such other users hold an RCU read lock, so 5345 * we can be sure they're done after we call synchronize_rcu(). 5346 */ 5347 if (thread->looper & BINDER_LOOPER_STATE_POLL) 5348 synchronize_rcu(); 5349 5350 if (send_reply) 5351 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 5352 binder_release_work(proc, &thread->todo); 5353 binder_thread_dec_tmpref(thread); 5354 return active_transactions; 5355 } 5356 5357 static __poll_t binder_poll(struct file *filp, 5358 struct poll_table_struct *wait) 5359 { 5360 struct binder_proc *proc = filp->private_data; 5361 struct binder_thread *thread = NULL; 5362 bool wait_for_proc_work; 5363 5364 thread = binder_get_thread(proc); 5365 if (!thread) 5366 return EPOLLERR; 5367 5368 binder_inner_proc_lock(thread->proc); 5369 thread->looper |= BINDER_LOOPER_STATE_POLL; 5370 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 5371 5372 binder_inner_proc_unlock(thread->proc); 5373 5374 poll_wait(filp, &thread->wait, wait); 5375 5376 if (binder_has_work(thread, wait_for_proc_work)) 5377 return EPOLLIN; 5378 5379 return 0; 5380 } 5381 5382 static int binder_ioctl_write_read(struct file *filp, unsigned long arg, 5383 struct binder_thread *thread) 5384 { 5385 int ret = 0; 5386 struct binder_proc *proc = filp->private_data; 5387 void __user *ubuf = (void __user *)arg; 5388 struct binder_write_read bwr; 5389 5390 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 5391 ret = -EFAULT; 5392 goto out; 5393 } 5394 binder_debug(BINDER_DEBUG_READ_WRITE, 5395 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 5396 proc->pid, thread->pid, 5397 (u64)bwr.write_size, (u64)bwr.write_buffer, 5398 (u64)bwr.read_size, (u64)bwr.read_buffer); 5399 5400 if (bwr.write_size > 0) { 5401 ret = binder_thread_write(proc, thread, 5402 bwr.write_buffer, 5403 bwr.write_size, 5404 &bwr.write_consumed); 5405 trace_binder_write_done(ret); 5406 if (ret < 0) { 5407 bwr.read_consumed = 0; 5408 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 5409 ret = -EFAULT; 5410 goto out; 5411 } 5412 } 5413 if (bwr.read_size > 0) { 5414 ret = binder_thread_read(proc, thread, bwr.read_buffer, 5415 bwr.read_size, 5416 &bwr.read_consumed, 5417 filp->f_flags & O_NONBLOCK); 5418 trace_binder_read_done(ret); 5419 binder_inner_proc_lock(proc); 5420 if (!binder_worklist_empty_ilocked(&proc->todo)) 5421 binder_wakeup_proc_ilocked(proc); 5422 binder_inner_proc_unlock(proc); 5423 if (ret < 0) { 5424 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 5425 ret = -EFAULT; 5426 goto out; 5427 } 5428 } 5429 binder_debug(BINDER_DEBUG_READ_WRITE, 5430 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 5431 proc->pid, thread->pid, 5432 (u64)bwr.write_consumed, (u64)bwr.write_size, 5433 (u64)bwr.read_consumed, (u64)bwr.read_size); 5434 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 5435 ret = -EFAULT; 5436 goto out; 5437 } 5438 out: 5439 return ret; 5440 } 5441 5442 static int binder_ioctl_set_ctx_mgr(struct file *filp, 5443 struct flat_binder_object *fbo) 5444 { 5445 int ret = 0; 5446 struct binder_proc *proc = filp->private_data; 5447 struct binder_context *context = proc->context; 5448 struct binder_node *new_node; 5449 kuid_t curr_euid = current_euid(); 5450 5451 mutex_lock(&context->context_mgr_node_lock); 5452 if (context->binder_context_mgr_node) { 5453 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 5454 ret = -EBUSY; 5455 goto out; 5456 } 5457 ret = security_binder_set_context_mgr(proc->cred); 5458 if (ret < 0) 5459 goto out; 5460 if (uid_valid(context->binder_context_mgr_uid)) { 5461 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 5462 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 5463 from_kuid(&init_user_ns, curr_euid), 5464 from_kuid(&init_user_ns, 5465 context->binder_context_mgr_uid)); 5466 ret = -EPERM; 5467 goto out; 5468 } 5469 } else { 5470 context->binder_context_mgr_uid = curr_euid; 5471 } 5472 new_node = binder_new_node(proc, fbo); 5473 if (!new_node) { 5474 ret = -ENOMEM; 5475 goto out; 5476 } 5477 binder_node_lock(new_node); 5478 new_node->local_weak_refs++; 5479 new_node->local_strong_refs++; 5480 new_node->has_strong_ref = 1; 5481 new_node->has_weak_ref = 1; 5482 context->binder_context_mgr_node = new_node; 5483 binder_node_unlock(new_node); 5484 binder_put_node(new_node); 5485 out: 5486 mutex_unlock(&context->context_mgr_node_lock); 5487 return ret; 5488 } 5489 5490 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 5491 struct binder_node_info_for_ref *info) 5492 { 5493 struct binder_node *node; 5494 struct binder_context *context = proc->context; 5495 __u32 handle = info->handle; 5496 5497 if (info->strong_count || info->weak_count || info->reserved1 || 5498 info->reserved2 || info->reserved3) { 5499 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 5500 proc->pid); 5501 return -EINVAL; 5502 } 5503 5504 /* This ioctl may only be used by the context manager */ 5505 mutex_lock(&context->context_mgr_node_lock); 5506 if (!context->binder_context_mgr_node || 5507 context->binder_context_mgr_node->proc != proc) { 5508 mutex_unlock(&context->context_mgr_node_lock); 5509 return -EPERM; 5510 } 5511 mutex_unlock(&context->context_mgr_node_lock); 5512 5513 node = binder_get_node_from_ref(proc, handle, true, NULL); 5514 if (!node) 5515 return -EINVAL; 5516 5517 info->strong_count = node->local_strong_refs + 5518 node->internal_strong_refs; 5519 info->weak_count = node->local_weak_refs; 5520 5521 binder_put_node(node); 5522 5523 return 0; 5524 } 5525 5526 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 5527 struct binder_node_debug_info *info) 5528 { 5529 struct rb_node *n; 5530 binder_uintptr_t ptr = info->ptr; 5531 5532 memset(info, 0, sizeof(*info)); 5533 5534 binder_inner_proc_lock(proc); 5535 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5536 struct binder_node *node = rb_entry(n, struct binder_node, 5537 rb_node); 5538 if (node->ptr > ptr) { 5539 info->ptr = node->ptr; 5540 info->cookie = node->cookie; 5541 info->has_strong_ref = node->has_strong_ref; 5542 info->has_weak_ref = node->has_weak_ref; 5543 break; 5544 } 5545 } 5546 binder_inner_proc_unlock(proc); 5547 5548 return 0; 5549 } 5550 5551 static bool binder_txns_pending_ilocked(struct binder_proc *proc) 5552 { 5553 struct rb_node *n; 5554 struct binder_thread *thread; 5555 5556 if (proc->outstanding_txns > 0) 5557 return true; 5558 5559 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { 5560 thread = rb_entry(n, struct binder_thread, rb_node); 5561 if (thread->transaction_stack) 5562 return true; 5563 } 5564 return false; 5565 } 5566 5567 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen) 5568 { 5569 struct binder_node *prev = NULL; 5570 struct rb_node *n; 5571 struct binder_ref *ref; 5572 5573 binder_inner_proc_lock(proc); 5574 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) { 5575 struct binder_node *node; 5576 5577 node = rb_entry(n, struct binder_node, rb_node); 5578 binder_inc_node_tmpref_ilocked(node); 5579 binder_inner_proc_unlock(proc); 5580 if (prev) 5581 binder_put_node(prev); 5582 binder_node_lock(node); 5583 hlist_for_each_entry(ref, &node->refs, node_entry) { 5584 /* 5585 * Need the node lock to synchronize 5586 * with new notification requests and the 5587 * inner lock to synchronize with queued 5588 * freeze notifications. 5589 */ 5590 binder_inner_proc_lock(ref->proc); 5591 if (!ref->freeze) { 5592 binder_inner_proc_unlock(ref->proc); 5593 continue; 5594 } 5595 ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER; 5596 if (list_empty(&ref->freeze->work.entry)) { 5597 ref->freeze->is_frozen = is_frozen; 5598 binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo); 5599 binder_wakeup_proc_ilocked(ref->proc); 5600 } else { 5601 if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen) 5602 ref->freeze->resend = true; 5603 ref->freeze->is_frozen = is_frozen; 5604 } 5605 binder_inner_proc_unlock(ref->proc); 5606 } 5607 prev = node; 5608 binder_node_unlock(node); 5609 binder_inner_proc_lock(proc); 5610 if (proc->is_dead) 5611 break; 5612 } 5613 binder_inner_proc_unlock(proc); 5614 if (prev) 5615 binder_put_node(prev); 5616 } 5617 5618 static int binder_ioctl_freeze(struct binder_freeze_info *info, 5619 struct binder_proc *target_proc) 5620 { 5621 int ret = 0; 5622 5623 if (!info->enable) { 5624 binder_inner_proc_lock(target_proc); 5625 target_proc->sync_recv = false; 5626 target_proc->async_recv = false; 5627 target_proc->is_frozen = false; 5628 binder_inner_proc_unlock(target_proc); 5629 binder_add_freeze_work(target_proc, false); 5630 return 0; 5631 } 5632 5633 /* 5634 * Freezing the target. Prevent new transactions by 5635 * setting frozen state. If timeout specified, wait 5636 * for transactions to drain. 5637 */ 5638 binder_inner_proc_lock(target_proc); 5639 target_proc->sync_recv = false; 5640 target_proc->async_recv = false; 5641 target_proc->is_frozen = true; 5642 binder_inner_proc_unlock(target_proc); 5643 5644 if (info->timeout_ms > 0) 5645 ret = wait_event_interruptible_timeout( 5646 target_proc->freeze_wait, 5647 (!target_proc->outstanding_txns), 5648 msecs_to_jiffies(info->timeout_ms)); 5649 5650 /* Check pending transactions that wait for reply */ 5651 if (ret >= 0) { 5652 binder_inner_proc_lock(target_proc); 5653 if (binder_txns_pending_ilocked(target_proc)) 5654 ret = -EAGAIN; 5655 binder_inner_proc_unlock(target_proc); 5656 } 5657 5658 if (ret < 0) { 5659 binder_inner_proc_lock(target_proc); 5660 target_proc->is_frozen = false; 5661 binder_inner_proc_unlock(target_proc); 5662 } else { 5663 binder_add_freeze_work(target_proc, true); 5664 } 5665 5666 return ret; 5667 } 5668 5669 static int binder_ioctl_get_freezer_info( 5670 struct binder_frozen_status_info *info) 5671 { 5672 struct binder_proc *target_proc; 5673 bool found = false; 5674 __u32 txns_pending; 5675 5676 info->sync_recv = 0; 5677 info->async_recv = 0; 5678 5679 mutex_lock(&binder_procs_lock); 5680 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5681 if (target_proc->pid == info->pid) { 5682 found = true; 5683 binder_inner_proc_lock(target_proc); 5684 txns_pending = binder_txns_pending_ilocked(target_proc); 5685 info->sync_recv |= target_proc->sync_recv | 5686 (txns_pending << 1); 5687 info->async_recv |= target_proc->async_recv; 5688 binder_inner_proc_unlock(target_proc); 5689 } 5690 } 5691 mutex_unlock(&binder_procs_lock); 5692 5693 if (!found) 5694 return -EINVAL; 5695 5696 return 0; 5697 } 5698 5699 static int binder_ioctl_get_extended_error(struct binder_thread *thread, 5700 void __user *ubuf) 5701 { 5702 struct binder_extended_error ee; 5703 5704 binder_inner_proc_lock(thread->proc); 5705 ee = thread->ee; 5706 binder_set_extended_error(&thread->ee, 0, BR_OK, 0); 5707 binder_inner_proc_unlock(thread->proc); 5708 5709 if (copy_to_user(ubuf, &ee, sizeof(ee))) 5710 return -EFAULT; 5711 5712 return 0; 5713 } 5714 5715 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 5716 { 5717 int ret; 5718 struct binder_proc *proc = filp->private_data; 5719 struct binder_thread *thread; 5720 void __user *ubuf = (void __user *)arg; 5721 5722 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 5723 proc->pid, current->pid, cmd, arg);*/ 5724 5725 binder_selftest_alloc(&proc->alloc); 5726 5727 trace_binder_ioctl(cmd, arg); 5728 5729 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5730 if (ret) 5731 goto err_unlocked; 5732 5733 thread = binder_get_thread(proc); 5734 if (thread == NULL) { 5735 ret = -ENOMEM; 5736 goto err; 5737 } 5738 5739 switch (cmd) { 5740 case BINDER_WRITE_READ: 5741 ret = binder_ioctl_write_read(filp, arg, thread); 5742 if (ret) 5743 goto err; 5744 break; 5745 case BINDER_SET_MAX_THREADS: { 5746 u32 max_threads; 5747 5748 if (copy_from_user(&max_threads, ubuf, 5749 sizeof(max_threads))) { 5750 ret = -EINVAL; 5751 goto err; 5752 } 5753 binder_inner_proc_lock(proc); 5754 proc->max_threads = max_threads; 5755 binder_inner_proc_unlock(proc); 5756 break; 5757 } 5758 case BINDER_SET_CONTEXT_MGR_EXT: { 5759 struct flat_binder_object fbo; 5760 5761 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5762 ret = -EINVAL; 5763 goto err; 5764 } 5765 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5766 if (ret) 5767 goto err; 5768 break; 5769 } 5770 case BINDER_SET_CONTEXT_MGR: 5771 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5772 if (ret) 5773 goto err; 5774 break; 5775 case BINDER_THREAD_EXIT: 5776 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5777 proc->pid, thread->pid); 5778 binder_thread_release(proc, thread); 5779 thread = NULL; 5780 break; 5781 case BINDER_VERSION: { 5782 struct binder_version __user *ver = ubuf; 5783 5784 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5785 &ver->protocol_version)) { 5786 ret = -EINVAL; 5787 goto err; 5788 } 5789 break; 5790 } 5791 case BINDER_GET_NODE_INFO_FOR_REF: { 5792 struct binder_node_info_for_ref info; 5793 5794 if (copy_from_user(&info, ubuf, sizeof(info))) { 5795 ret = -EFAULT; 5796 goto err; 5797 } 5798 5799 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5800 if (ret < 0) 5801 goto err; 5802 5803 if (copy_to_user(ubuf, &info, sizeof(info))) { 5804 ret = -EFAULT; 5805 goto err; 5806 } 5807 5808 break; 5809 } 5810 case BINDER_GET_NODE_DEBUG_INFO: { 5811 struct binder_node_debug_info info; 5812 5813 if (copy_from_user(&info, ubuf, sizeof(info))) { 5814 ret = -EFAULT; 5815 goto err; 5816 } 5817 5818 ret = binder_ioctl_get_node_debug_info(proc, &info); 5819 if (ret < 0) 5820 goto err; 5821 5822 if (copy_to_user(ubuf, &info, sizeof(info))) { 5823 ret = -EFAULT; 5824 goto err; 5825 } 5826 break; 5827 } 5828 case BINDER_FREEZE: { 5829 struct binder_freeze_info info; 5830 struct binder_proc **target_procs = NULL, *target_proc; 5831 int target_procs_count = 0, i = 0; 5832 5833 ret = 0; 5834 5835 if (copy_from_user(&info, ubuf, sizeof(info))) { 5836 ret = -EFAULT; 5837 goto err; 5838 } 5839 5840 mutex_lock(&binder_procs_lock); 5841 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5842 if (target_proc->pid == info.pid) 5843 target_procs_count++; 5844 } 5845 5846 if (target_procs_count == 0) { 5847 mutex_unlock(&binder_procs_lock); 5848 ret = -EINVAL; 5849 goto err; 5850 } 5851 5852 target_procs = kcalloc(target_procs_count, 5853 sizeof(struct binder_proc *), 5854 GFP_KERNEL); 5855 5856 if (!target_procs) { 5857 mutex_unlock(&binder_procs_lock); 5858 ret = -ENOMEM; 5859 goto err; 5860 } 5861 5862 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5863 if (target_proc->pid != info.pid) 5864 continue; 5865 5866 binder_inner_proc_lock(target_proc); 5867 target_proc->tmp_ref++; 5868 binder_inner_proc_unlock(target_proc); 5869 5870 target_procs[i++] = target_proc; 5871 } 5872 mutex_unlock(&binder_procs_lock); 5873 5874 for (i = 0; i < target_procs_count; i++) { 5875 if (ret >= 0) 5876 ret = binder_ioctl_freeze(&info, 5877 target_procs[i]); 5878 5879 binder_proc_dec_tmpref(target_procs[i]); 5880 } 5881 5882 kfree(target_procs); 5883 5884 if (ret < 0) 5885 goto err; 5886 break; 5887 } 5888 case BINDER_GET_FROZEN_INFO: { 5889 struct binder_frozen_status_info info; 5890 5891 if (copy_from_user(&info, ubuf, sizeof(info))) { 5892 ret = -EFAULT; 5893 goto err; 5894 } 5895 5896 ret = binder_ioctl_get_freezer_info(&info); 5897 if (ret < 0) 5898 goto err; 5899 5900 if (copy_to_user(ubuf, &info, sizeof(info))) { 5901 ret = -EFAULT; 5902 goto err; 5903 } 5904 break; 5905 } 5906 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { 5907 uint32_t enable; 5908 5909 if (copy_from_user(&enable, ubuf, sizeof(enable))) { 5910 ret = -EFAULT; 5911 goto err; 5912 } 5913 binder_inner_proc_lock(proc); 5914 proc->oneway_spam_detection_enabled = (bool)enable; 5915 binder_inner_proc_unlock(proc); 5916 break; 5917 } 5918 case BINDER_GET_EXTENDED_ERROR: 5919 ret = binder_ioctl_get_extended_error(thread, ubuf); 5920 if (ret < 0) 5921 goto err; 5922 break; 5923 default: 5924 ret = -EINVAL; 5925 goto err; 5926 } 5927 ret = 0; 5928 err: 5929 if (thread) 5930 thread->looper_need_return = false; 5931 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5932 if (ret && ret != -EINTR) 5933 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5934 err_unlocked: 5935 trace_binder_ioctl_done(ret); 5936 return ret; 5937 } 5938 5939 static void binder_vma_open(struct vm_area_struct *vma) 5940 { 5941 struct binder_proc *proc = vma->vm_private_data; 5942 5943 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5944 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5945 proc->pid, vma->vm_start, vma->vm_end, 5946 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5947 (unsigned long)pgprot_val(vma->vm_page_prot)); 5948 } 5949 5950 static void binder_vma_close(struct vm_area_struct *vma) 5951 { 5952 struct binder_proc *proc = vma->vm_private_data; 5953 5954 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5955 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5956 proc->pid, vma->vm_start, vma->vm_end, 5957 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5958 (unsigned long)pgprot_val(vma->vm_page_prot)); 5959 binder_alloc_vma_close(&proc->alloc); 5960 } 5961 5962 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5963 { 5964 return VM_FAULT_SIGBUS; 5965 } 5966 5967 static const struct vm_operations_struct binder_vm_ops = { 5968 .open = binder_vma_open, 5969 .close = binder_vma_close, 5970 .fault = binder_vm_fault, 5971 }; 5972 5973 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5974 { 5975 struct binder_proc *proc = filp->private_data; 5976 5977 if (proc->tsk != current->group_leader) 5978 return -EINVAL; 5979 5980 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5981 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5982 __func__, proc->pid, vma->vm_start, vma->vm_end, 5983 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5984 (unsigned long)pgprot_val(vma->vm_page_prot)); 5985 5986 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5987 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5988 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); 5989 return -EPERM; 5990 } 5991 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE); 5992 5993 vma->vm_ops = &binder_vm_ops; 5994 vma->vm_private_data = proc; 5995 5996 return binder_alloc_mmap_handler(&proc->alloc, vma); 5997 } 5998 5999 static int binder_open(struct inode *nodp, struct file *filp) 6000 { 6001 struct binder_proc *proc, *itr; 6002 struct binder_device *binder_dev; 6003 struct binderfs_info *info; 6004 struct dentry *binder_binderfs_dir_entry_proc = NULL; 6005 bool existing_pid = false; 6006 6007 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 6008 current->group_leader->pid, current->pid); 6009 6010 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 6011 if (proc == NULL) 6012 return -ENOMEM; 6013 6014 dbitmap_init(&proc->dmap); 6015 spin_lock_init(&proc->inner_lock); 6016 spin_lock_init(&proc->outer_lock); 6017 get_task_struct(current->group_leader); 6018 proc->tsk = current->group_leader; 6019 proc->cred = get_cred(filp->f_cred); 6020 INIT_LIST_HEAD(&proc->todo); 6021 init_waitqueue_head(&proc->freeze_wait); 6022 proc->default_priority = task_nice(current); 6023 /* binderfs stashes devices in i_private */ 6024 if (is_binderfs_device(nodp)) { 6025 binder_dev = nodp->i_private; 6026 info = nodp->i_sb->s_fs_info; 6027 binder_binderfs_dir_entry_proc = info->proc_log_dir; 6028 } else { 6029 binder_dev = container_of(filp->private_data, 6030 struct binder_device, miscdev); 6031 } 6032 refcount_inc(&binder_dev->ref); 6033 proc->context = &binder_dev->context; 6034 binder_alloc_init(&proc->alloc); 6035 6036 binder_stats_created(BINDER_STAT_PROC); 6037 proc->pid = current->group_leader->pid; 6038 INIT_LIST_HEAD(&proc->delivered_death); 6039 INIT_LIST_HEAD(&proc->delivered_freeze); 6040 INIT_LIST_HEAD(&proc->waiting_threads); 6041 filp->private_data = proc; 6042 6043 mutex_lock(&binder_procs_lock); 6044 hlist_for_each_entry(itr, &binder_procs, proc_node) { 6045 if (itr->pid == proc->pid) { 6046 existing_pid = true; 6047 break; 6048 } 6049 } 6050 hlist_add_head(&proc->proc_node, &binder_procs); 6051 mutex_unlock(&binder_procs_lock); 6052 6053 if (binder_debugfs_dir_entry_proc && !existing_pid) { 6054 char strbuf[11]; 6055 6056 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 6057 /* 6058 * proc debug entries are shared between contexts. 6059 * Only create for the first PID to avoid debugfs log spamming 6060 * The printing code will anyway print all contexts for a given 6061 * PID so this is not a problem. 6062 */ 6063 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 6064 binder_debugfs_dir_entry_proc, 6065 (void *)(unsigned long)proc->pid, 6066 &proc_fops); 6067 } 6068 6069 if (binder_binderfs_dir_entry_proc && !existing_pid) { 6070 char strbuf[11]; 6071 struct dentry *binderfs_entry; 6072 6073 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 6074 /* 6075 * Similar to debugfs, the process specific log file is shared 6076 * between contexts. Only create for the first PID. 6077 * This is ok since same as debugfs, the log file will contain 6078 * information on all contexts of a given PID. 6079 */ 6080 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 6081 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 6082 if (!IS_ERR(binderfs_entry)) { 6083 proc->binderfs_entry = binderfs_entry; 6084 } else { 6085 int error; 6086 6087 error = PTR_ERR(binderfs_entry); 6088 pr_warn("Unable to create file %s in binderfs (error %d)\n", 6089 strbuf, error); 6090 } 6091 } 6092 6093 return 0; 6094 } 6095 6096 static int binder_flush(struct file *filp, fl_owner_t id) 6097 { 6098 struct binder_proc *proc = filp->private_data; 6099 6100 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 6101 6102 return 0; 6103 } 6104 6105 static void binder_deferred_flush(struct binder_proc *proc) 6106 { 6107 struct rb_node *n; 6108 int wake_count = 0; 6109 6110 binder_inner_proc_lock(proc); 6111 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 6112 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 6113 6114 thread->looper_need_return = true; 6115 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 6116 wake_up_interruptible(&thread->wait); 6117 wake_count++; 6118 } 6119 } 6120 binder_inner_proc_unlock(proc); 6121 6122 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 6123 "binder_flush: %d woke %d threads\n", proc->pid, 6124 wake_count); 6125 } 6126 6127 static int binder_release(struct inode *nodp, struct file *filp) 6128 { 6129 struct binder_proc *proc = filp->private_data; 6130 6131 debugfs_remove(proc->debugfs_entry); 6132 6133 if (proc->binderfs_entry) { 6134 binderfs_remove_file(proc->binderfs_entry); 6135 proc->binderfs_entry = NULL; 6136 } 6137 6138 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 6139 6140 return 0; 6141 } 6142 6143 static int binder_node_release(struct binder_node *node, int refs) 6144 { 6145 struct binder_ref *ref; 6146 int death = 0; 6147 struct binder_proc *proc = node->proc; 6148 6149 binder_release_work(proc, &node->async_todo); 6150 6151 binder_node_lock(node); 6152 binder_inner_proc_lock(proc); 6153 binder_dequeue_work_ilocked(&node->work); 6154 /* 6155 * The caller must have taken a temporary ref on the node, 6156 */ 6157 BUG_ON(!node->tmp_refs); 6158 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 6159 binder_inner_proc_unlock(proc); 6160 binder_node_unlock(node); 6161 binder_free_node(node); 6162 6163 return refs; 6164 } 6165 6166 node->proc = NULL; 6167 node->local_strong_refs = 0; 6168 node->local_weak_refs = 0; 6169 binder_inner_proc_unlock(proc); 6170 6171 spin_lock(&binder_dead_nodes_lock); 6172 hlist_add_head(&node->dead_node, &binder_dead_nodes); 6173 spin_unlock(&binder_dead_nodes_lock); 6174 6175 hlist_for_each_entry(ref, &node->refs, node_entry) { 6176 refs++; 6177 /* 6178 * Need the node lock to synchronize 6179 * with new notification requests and the 6180 * inner lock to synchronize with queued 6181 * death notifications. 6182 */ 6183 binder_inner_proc_lock(ref->proc); 6184 if (!ref->death) { 6185 binder_inner_proc_unlock(ref->proc); 6186 continue; 6187 } 6188 6189 death++; 6190 6191 BUG_ON(!list_empty(&ref->death->work.entry)); 6192 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 6193 binder_enqueue_work_ilocked(&ref->death->work, 6194 &ref->proc->todo); 6195 binder_wakeup_proc_ilocked(ref->proc); 6196 binder_inner_proc_unlock(ref->proc); 6197 } 6198 6199 binder_debug(BINDER_DEBUG_DEAD_BINDER, 6200 "node %d now dead, refs %d, death %d\n", 6201 node->debug_id, refs, death); 6202 binder_node_unlock(node); 6203 binder_put_node(node); 6204 6205 return refs; 6206 } 6207 6208 static void binder_deferred_release(struct binder_proc *proc) 6209 { 6210 struct binder_context *context = proc->context; 6211 struct rb_node *n; 6212 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 6213 6214 mutex_lock(&binder_procs_lock); 6215 hlist_del(&proc->proc_node); 6216 mutex_unlock(&binder_procs_lock); 6217 6218 mutex_lock(&context->context_mgr_node_lock); 6219 if (context->binder_context_mgr_node && 6220 context->binder_context_mgr_node->proc == proc) { 6221 binder_debug(BINDER_DEBUG_DEAD_BINDER, 6222 "%s: %d context_mgr_node gone\n", 6223 __func__, proc->pid); 6224 context->binder_context_mgr_node = NULL; 6225 } 6226 mutex_unlock(&context->context_mgr_node_lock); 6227 binder_inner_proc_lock(proc); 6228 /* 6229 * Make sure proc stays alive after we 6230 * remove all the threads 6231 */ 6232 proc->tmp_ref++; 6233 6234 proc->is_dead = true; 6235 proc->is_frozen = false; 6236 proc->sync_recv = false; 6237 proc->async_recv = false; 6238 threads = 0; 6239 active_transactions = 0; 6240 while ((n = rb_first(&proc->threads))) { 6241 struct binder_thread *thread; 6242 6243 thread = rb_entry(n, struct binder_thread, rb_node); 6244 binder_inner_proc_unlock(proc); 6245 threads++; 6246 active_transactions += binder_thread_release(proc, thread); 6247 binder_inner_proc_lock(proc); 6248 } 6249 6250 nodes = 0; 6251 incoming_refs = 0; 6252 while ((n = rb_first(&proc->nodes))) { 6253 struct binder_node *node; 6254 6255 node = rb_entry(n, struct binder_node, rb_node); 6256 nodes++; 6257 /* 6258 * take a temporary ref on the node before 6259 * calling binder_node_release() which will either 6260 * kfree() the node or call binder_put_node() 6261 */ 6262 binder_inc_node_tmpref_ilocked(node); 6263 rb_erase(&node->rb_node, &proc->nodes); 6264 binder_inner_proc_unlock(proc); 6265 incoming_refs = binder_node_release(node, incoming_refs); 6266 binder_inner_proc_lock(proc); 6267 } 6268 binder_inner_proc_unlock(proc); 6269 6270 outgoing_refs = 0; 6271 binder_proc_lock(proc); 6272 while ((n = rb_first(&proc->refs_by_desc))) { 6273 struct binder_ref *ref; 6274 6275 ref = rb_entry(n, struct binder_ref, rb_node_desc); 6276 outgoing_refs++; 6277 binder_cleanup_ref_olocked(ref); 6278 binder_proc_unlock(proc); 6279 binder_free_ref(ref); 6280 binder_proc_lock(proc); 6281 } 6282 binder_proc_unlock(proc); 6283 6284 binder_release_work(proc, &proc->todo); 6285 binder_release_work(proc, &proc->delivered_death); 6286 binder_release_work(proc, &proc->delivered_freeze); 6287 6288 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 6289 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 6290 __func__, proc->pid, threads, nodes, incoming_refs, 6291 outgoing_refs, active_transactions); 6292 6293 binder_proc_dec_tmpref(proc); 6294 } 6295 6296 static void binder_deferred_func(struct work_struct *work) 6297 { 6298 struct binder_proc *proc; 6299 6300 int defer; 6301 6302 do { 6303 mutex_lock(&binder_deferred_lock); 6304 if (!hlist_empty(&binder_deferred_list)) { 6305 proc = hlist_entry(binder_deferred_list.first, 6306 struct binder_proc, deferred_work_node); 6307 hlist_del_init(&proc->deferred_work_node); 6308 defer = proc->deferred_work; 6309 proc->deferred_work = 0; 6310 } else { 6311 proc = NULL; 6312 defer = 0; 6313 } 6314 mutex_unlock(&binder_deferred_lock); 6315 6316 if (defer & BINDER_DEFERRED_FLUSH) 6317 binder_deferred_flush(proc); 6318 6319 if (defer & BINDER_DEFERRED_RELEASE) 6320 binder_deferred_release(proc); /* frees proc */ 6321 } while (proc); 6322 } 6323 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 6324 6325 static void 6326 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 6327 { 6328 mutex_lock(&binder_deferred_lock); 6329 proc->deferred_work |= defer; 6330 if (hlist_unhashed(&proc->deferred_work_node)) { 6331 hlist_add_head(&proc->deferred_work_node, 6332 &binder_deferred_list); 6333 schedule_work(&binder_deferred_work); 6334 } 6335 mutex_unlock(&binder_deferred_lock); 6336 } 6337 6338 static void print_binder_transaction_ilocked(struct seq_file *m, 6339 struct binder_proc *proc, 6340 const char *prefix, 6341 struct binder_transaction *t) 6342 { 6343 struct binder_proc *to_proc; 6344 struct binder_buffer *buffer = t->buffer; 6345 ktime_t current_time = ktime_get(); 6346 6347 spin_lock(&t->lock); 6348 to_proc = t->to_proc; 6349 seq_printf(m, 6350 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms", 6351 prefix, t->debug_id, t, 6352 t->from_pid, 6353 t->from_tid, 6354 to_proc ? to_proc->pid : 0, 6355 t->to_thread ? t->to_thread->pid : 0, 6356 t->code, t->flags, t->priority, t->need_reply, 6357 ktime_ms_delta(current_time, t->start_time)); 6358 spin_unlock(&t->lock); 6359 6360 if (proc != to_proc) { 6361 /* 6362 * Can only safely deref buffer if we are holding the 6363 * correct proc inner lock for this node 6364 */ 6365 seq_puts(m, "\n"); 6366 return; 6367 } 6368 6369 if (buffer == NULL) { 6370 seq_puts(m, " buffer free\n"); 6371 return; 6372 } 6373 if (buffer->target_node) 6374 seq_printf(m, " node %d", buffer->target_node->debug_id); 6375 seq_printf(m, " size %zd:%zd offset %lx\n", 6376 buffer->data_size, buffer->offsets_size, 6377 proc->alloc.buffer - buffer->user_data); 6378 } 6379 6380 static void print_binder_work_ilocked(struct seq_file *m, 6381 struct binder_proc *proc, 6382 const char *prefix, 6383 const char *transaction_prefix, 6384 struct binder_work *w) 6385 { 6386 struct binder_node *node; 6387 struct binder_transaction *t; 6388 6389 switch (w->type) { 6390 case BINDER_WORK_TRANSACTION: 6391 t = container_of(w, struct binder_transaction, work); 6392 print_binder_transaction_ilocked( 6393 m, proc, transaction_prefix, t); 6394 break; 6395 case BINDER_WORK_RETURN_ERROR: { 6396 struct binder_error *e = container_of( 6397 w, struct binder_error, work); 6398 6399 seq_printf(m, "%stransaction error: %u\n", 6400 prefix, e->cmd); 6401 } break; 6402 case BINDER_WORK_TRANSACTION_COMPLETE: 6403 seq_printf(m, "%stransaction complete\n", prefix); 6404 break; 6405 case BINDER_WORK_NODE: 6406 node = container_of(w, struct binder_node, work); 6407 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 6408 prefix, node->debug_id, 6409 (u64)node->ptr, (u64)node->cookie); 6410 break; 6411 case BINDER_WORK_DEAD_BINDER: 6412 seq_printf(m, "%shas dead binder\n", prefix); 6413 break; 6414 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 6415 seq_printf(m, "%shas cleared dead binder\n", prefix); 6416 break; 6417 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 6418 seq_printf(m, "%shas cleared death notification\n", prefix); 6419 break; 6420 case BINDER_WORK_FROZEN_BINDER: 6421 seq_printf(m, "%shas frozen binder\n", prefix); 6422 break; 6423 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: 6424 seq_printf(m, "%shas cleared freeze notification\n", prefix); 6425 break; 6426 default: 6427 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 6428 break; 6429 } 6430 } 6431 6432 static void print_binder_thread_ilocked(struct seq_file *m, 6433 struct binder_thread *thread, 6434 int print_always) 6435 { 6436 struct binder_transaction *t; 6437 struct binder_work *w; 6438 size_t start_pos = m->count; 6439 size_t header_pos; 6440 6441 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 6442 thread->pid, thread->looper, 6443 thread->looper_need_return, 6444 atomic_read(&thread->tmp_ref)); 6445 header_pos = m->count; 6446 t = thread->transaction_stack; 6447 while (t) { 6448 if (t->from == thread) { 6449 print_binder_transaction_ilocked(m, thread->proc, 6450 " outgoing transaction", t); 6451 t = t->from_parent; 6452 } else if (t->to_thread == thread) { 6453 print_binder_transaction_ilocked(m, thread->proc, 6454 " incoming transaction", t); 6455 t = t->to_parent; 6456 } else { 6457 print_binder_transaction_ilocked(m, thread->proc, 6458 " bad transaction", t); 6459 t = NULL; 6460 } 6461 } 6462 list_for_each_entry(w, &thread->todo, entry) { 6463 print_binder_work_ilocked(m, thread->proc, " ", 6464 " pending transaction", w); 6465 } 6466 if (!print_always && m->count == header_pos) 6467 m->count = start_pos; 6468 } 6469 6470 static void print_binder_node_nilocked(struct seq_file *m, 6471 struct binder_node *node) 6472 { 6473 struct binder_ref *ref; 6474 struct binder_work *w; 6475 int count; 6476 6477 count = hlist_count_nodes(&node->refs); 6478 6479 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 6480 node->debug_id, (u64)node->ptr, (u64)node->cookie, 6481 node->has_strong_ref, node->has_weak_ref, 6482 node->local_strong_refs, node->local_weak_refs, 6483 node->internal_strong_refs, count, node->tmp_refs); 6484 if (count) { 6485 seq_puts(m, " proc"); 6486 hlist_for_each_entry(ref, &node->refs, node_entry) 6487 seq_printf(m, " %d", ref->proc->pid); 6488 } 6489 seq_puts(m, "\n"); 6490 if (node->proc) { 6491 list_for_each_entry(w, &node->async_todo, entry) 6492 print_binder_work_ilocked(m, node->proc, " ", 6493 " pending async transaction", w); 6494 } 6495 } 6496 6497 static void print_binder_ref_olocked(struct seq_file *m, 6498 struct binder_ref *ref) 6499 { 6500 binder_node_lock(ref->node); 6501 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 6502 ref->data.debug_id, ref->data.desc, 6503 ref->node->proc ? "" : "dead ", 6504 ref->node->debug_id, ref->data.strong, 6505 ref->data.weak, ref->death); 6506 binder_node_unlock(ref->node); 6507 } 6508 6509 static void print_binder_proc(struct seq_file *m, 6510 struct binder_proc *proc, int print_all) 6511 { 6512 struct binder_work *w; 6513 struct rb_node *n; 6514 size_t start_pos = m->count; 6515 size_t header_pos; 6516 struct binder_node *last_node = NULL; 6517 6518 seq_printf(m, "proc %d\n", proc->pid); 6519 seq_printf(m, "context %s\n", proc->context->name); 6520 header_pos = m->count; 6521 6522 binder_inner_proc_lock(proc); 6523 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6524 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 6525 rb_node), print_all); 6526 6527 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 6528 struct binder_node *node = rb_entry(n, struct binder_node, 6529 rb_node); 6530 if (!print_all && !node->has_async_transaction) 6531 continue; 6532 6533 /* 6534 * take a temporary reference on the node so it 6535 * survives and isn't removed from the tree 6536 * while we print it. 6537 */ 6538 binder_inc_node_tmpref_ilocked(node); 6539 /* Need to drop inner lock to take node lock */ 6540 binder_inner_proc_unlock(proc); 6541 if (last_node) 6542 binder_put_node(last_node); 6543 binder_node_inner_lock(node); 6544 print_binder_node_nilocked(m, node); 6545 binder_node_inner_unlock(node); 6546 last_node = node; 6547 binder_inner_proc_lock(proc); 6548 } 6549 binder_inner_proc_unlock(proc); 6550 if (last_node) 6551 binder_put_node(last_node); 6552 6553 if (print_all) { 6554 binder_proc_lock(proc); 6555 for (n = rb_first(&proc->refs_by_desc); 6556 n != NULL; 6557 n = rb_next(n)) 6558 print_binder_ref_olocked(m, rb_entry(n, 6559 struct binder_ref, 6560 rb_node_desc)); 6561 binder_proc_unlock(proc); 6562 } 6563 binder_alloc_print_allocated(m, &proc->alloc); 6564 binder_inner_proc_lock(proc); 6565 list_for_each_entry(w, &proc->todo, entry) 6566 print_binder_work_ilocked(m, proc, " ", 6567 " pending transaction", w); 6568 list_for_each_entry(w, &proc->delivered_death, entry) { 6569 seq_puts(m, " has delivered dead binder\n"); 6570 break; 6571 } 6572 list_for_each_entry(w, &proc->delivered_freeze, entry) { 6573 seq_puts(m, " has delivered freeze binder\n"); 6574 break; 6575 } 6576 binder_inner_proc_unlock(proc); 6577 if (!print_all && m->count == header_pos) 6578 m->count = start_pos; 6579 } 6580 6581 static const char * const binder_return_strings[] = { 6582 "BR_ERROR", 6583 "BR_OK", 6584 "BR_TRANSACTION", 6585 "BR_REPLY", 6586 "BR_ACQUIRE_RESULT", 6587 "BR_DEAD_REPLY", 6588 "BR_TRANSACTION_COMPLETE", 6589 "BR_INCREFS", 6590 "BR_ACQUIRE", 6591 "BR_RELEASE", 6592 "BR_DECREFS", 6593 "BR_ATTEMPT_ACQUIRE", 6594 "BR_NOOP", 6595 "BR_SPAWN_LOOPER", 6596 "BR_FINISHED", 6597 "BR_DEAD_BINDER", 6598 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 6599 "BR_FAILED_REPLY", 6600 "BR_FROZEN_REPLY", 6601 "BR_ONEWAY_SPAM_SUSPECT", 6602 "BR_TRANSACTION_PENDING_FROZEN", 6603 "BR_FROZEN_BINDER", 6604 "BR_CLEAR_FREEZE_NOTIFICATION_DONE", 6605 }; 6606 6607 static const char * const binder_command_strings[] = { 6608 "BC_TRANSACTION", 6609 "BC_REPLY", 6610 "BC_ACQUIRE_RESULT", 6611 "BC_FREE_BUFFER", 6612 "BC_INCREFS", 6613 "BC_ACQUIRE", 6614 "BC_RELEASE", 6615 "BC_DECREFS", 6616 "BC_INCREFS_DONE", 6617 "BC_ACQUIRE_DONE", 6618 "BC_ATTEMPT_ACQUIRE", 6619 "BC_REGISTER_LOOPER", 6620 "BC_ENTER_LOOPER", 6621 "BC_EXIT_LOOPER", 6622 "BC_REQUEST_DEATH_NOTIFICATION", 6623 "BC_CLEAR_DEATH_NOTIFICATION", 6624 "BC_DEAD_BINDER_DONE", 6625 "BC_TRANSACTION_SG", 6626 "BC_REPLY_SG", 6627 "BC_REQUEST_FREEZE_NOTIFICATION", 6628 "BC_CLEAR_FREEZE_NOTIFICATION", 6629 "BC_FREEZE_NOTIFICATION_DONE", 6630 }; 6631 6632 static const char * const binder_objstat_strings[] = { 6633 "proc", 6634 "thread", 6635 "node", 6636 "ref", 6637 "death", 6638 "transaction", 6639 "transaction_complete", 6640 "freeze", 6641 }; 6642 6643 static void print_binder_stats(struct seq_file *m, const char *prefix, 6644 struct binder_stats *stats) 6645 { 6646 int i; 6647 6648 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 6649 ARRAY_SIZE(binder_command_strings)); 6650 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 6651 int temp = atomic_read(&stats->bc[i]); 6652 6653 if (temp) 6654 seq_printf(m, "%s%s: %d\n", prefix, 6655 binder_command_strings[i], temp); 6656 } 6657 6658 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 6659 ARRAY_SIZE(binder_return_strings)); 6660 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 6661 int temp = atomic_read(&stats->br[i]); 6662 6663 if (temp) 6664 seq_printf(m, "%s%s: %d\n", prefix, 6665 binder_return_strings[i], temp); 6666 } 6667 6668 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6669 ARRAY_SIZE(binder_objstat_strings)); 6670 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6671 ARRAY_SIZE(stats->obj_deleted)); 6672 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 6673 int created = atomic_read(&stats->obj_created[i]); 6674 int deleted = atomic_read(&stats->obj_deleted[i]); 6675 6676 if (created || deleted) 6677 seq_printf(m, "%s%s: active %d total %d\n", 6678 prefix, 6679 binder_objstat_strings[i], 6680 created - deleted, 6681 created); 6682 } 6683 } 6684 6685 static void print_binder_proc_stats(struct seq_file *m, 6686 struct binder_proc *proc) 6687 { 6688 struct binder_work *w; 6689 struct binder_thread *thread; 6690 struct rb_node *n; 6691 int count, strong, weak, ready_threads; 6692 size_t free_async_space = 6693 binder_alloc_get_free_async_space(&proc->alloc); 6694 6695 seq_printf(m, "proc %d\n", proc->pid); 6696 seq_printf(m, "context %s\n", proc->context->name); 6697 count = 0; 6698 ready_threads = 0; 6699 binder_inner_proc_lock(proc); 6700 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6701 count++; 6702 6703 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 6704 ready_threads++; 6705 6706 seq_printf(m, " threads: %d\n", count); 6707 seq_printf(m, " requested threads: %d+%d/%d\n" 6708 " ready threads %d\n" 6709 " free async space %zd\n", proc->requested_threads, 6710 proc->requested_threads_started, proc->max_threads, 6711 ready_threads, 6712 free_async_space); 6713 count = 0; 6714 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 6715 count++; 6716 binder_inner_proc_unlock(proc); 6717 seq_printf(m, " nodes: %d\n", count); 6718 count = 0; 6719 strong = 0; 6720 weak = 0; 6721 binder_proc_lock(proc); 6722 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 6723 struct binder_ref *ref = rb_entry(n, struct binder_ref, 6724 rb_node_desc); 6725 count++; 6726 strong += ref->data.strong; 6727 weak += ref->data.weak; 6728 } 6729 binder_proc_unlock(proc); 6730 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 6731 6732 count = binder_alloc_get_allocated_count(&proc->alloc); 6733 seq_printf(m, " buffers: %d\n", count); 6734 6735 binder_alloc_print_pages(m, &proc->alloc); 6736 6737 count = 0; 6738 binder_inner_proc_lock(proc); 6739 list_for_each_entry(w, &proc->todo, entry) { 6740 if (w->type == BINDER_WORK_TRANSACTION) 6741 count++; 6742 } 6743 binder_inner_proc_unlock(proc); 6744 seq_printf(m, " pending transactions: %d\n", count); 6745 6746 print_binder_stats(m, " ", &proc->stats); 6747 } 6748 6749 static int state_show(struct seq_file *m, void *unused) 6750 { 6751 struct binder_proc *proc; 6752 struct binder_node *node; 6753 struct binder_node *last_node = NULL; 6754 6755 seq_puts(m, "binder state:\n"); 6756 6757 spin_lock(&binder_dead_nodes_lock); 6758 if (!hlist_empty(&binder_dead_nodes)) 6759 seq_puts(m, "dead nodes:\n"); 6760 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 6761 /* 6762 * take a temporary reference on the node so it 6763 * survives and isn't removed from the list 6764 * while we print it. 6765 */ 6766 node->tmp_refs++; 6767 spin_unlock(&binder_dead_nodes_lock); 6768 if (last_node) 6769 binder_put_node(last_node); 6770 binder_node_lock(node); 6771 print_binder_node_nilocked(m, node); 6772 binder_node_unlock(node); 6773 last_node = node; 6774 spin_lock(&binder_dead_nodes_lock); 6775 } 6776 spin_unlock(&binder_dead_nodes_lock); 6777 if (last_node) 6778 binder_put_node(last_node); 6779 6780 mutex_lock(&binder_procs_lock); 6781 hlist_for_each_entry(proc, &binder_procs, proc_node) 6782 print_binder_proc(m, proc, 1); 6783 mutex_unlock(&binder_procs_lock); 6784 6785 return 0; 6786 } 6787 6788 static int stats_show(struct seq_file *m, void *unused) 6789 { 6790 struct binder_proc *proc; 6791 6792 seq_puts(m, "binder stats:\n"); 6793 6794 print_binder_stats(m, "", &binder_stats); 6795 6796 mutex_lock(&binder_procs_lock); 6797 hlist_for_each_entry(proc, &binder_procs, proc_node) 6798 print_binder_proc_stats(m, proc); 6799 mutex_unlock(&binder_procs_lock); 6800 6801 return 0; 6802 } 6803 6804 static int transactions_show(struct seq_file *m, void *unused) 6805 { 6806 struct binder_proc *proc; 6807 6808 seq_puts(m, "binder transactions:\n"); 6809 mutex_lock(&binder_procs_lock); 6810 hlist_for_each_entry(proc, &binder_procs, proc_node) 6811 print_binder_proc(m, proc, 0); 6812 mutex_unlock(&binder_procs_lock); 6813 6814 return 0; 6815 } 6816 6817 static int proc_show(struct seq_file *m, void *unused) 6818 { 6819 struct binder_proc *itr; 6820 int pid = (unsigned long)m->private; 6821 6822 mutex_lock(&binder_procs_lock); 6823 hlist_for_each_entry(itr, &binder_procs, proc_node) { 6824 if (itr->pid == pid) { 6825 seq_puts(m, "binder proc state:\n"); 6826 print_binder_proc(m, itr, 1); 6827 } 6828 } 6829 mutex_unlock(&binder_procs_lock); 6830 6831 return 0; 6832 } 6833 6834 static void print_binder_transaction_log_entry(struct seq_file *m, 6835 struct binder_transaction_log_entry *e) 6836 { 6837 int debug_id = READ_ONCE(e->debug_id_done); 6838 /* 6839 * read barrier to guarantee debug_id_done read before 6840 * we print the log values 6841 */ 6842 smp_rmb(); 6843 seq_printf(m, 6844 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 6845 e->debug_id, (e->call_type == 2) ? "reply" : 6846 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 6847 e->from_thread, e->to_proc, e->to_thread, e->context_name, 6848 e->to_node, e->target_handle, e->data_size, e->offsets_size, 6849 e->return_error, e->return_error_param, 6850 e->return_error_line); 6851 /* 6852 * read-barrier to guarantee read of debug_id_done after 6853 * done printing the fields of the entry 6854 */ 6855 smp_rmb(); 6856 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 6857 "\n" : " (incomplete)\n"); 6858 } 6859 6860 static int transaction_log_show(struct seq_file *m, void *unused) 6861 { 6862 struct binder_transaction_log *log = m->private; 6863 unsigned int log_cur = atomic_read(&log->cur); 6864 unsigned int count; 6865 unsigned int cur; 6866 int i; 6867 6868 count = log_cur + 1; 6869 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 6870 0 : count % ARRAY_SIZE(log->entry); 6871 if (count > ARRAY_SIZE(log->entry) || log->full) 6872 count = ARRAY_SIZE(log->entry); 6873 for (i = 0; i < count; i++) { 6874 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 6875 6876 print_binder_transaction_log_entry(m, &log->entry[index]); 6877 } 6878 return 0; 6879 } 6880 6881 const struct file_operations binder_fops = { 6882 .owner = THIS_MODULE, 6883 .poll = binder_poll, 6884 .unlocked_ioctl = binder_ioctl, 6885 .compat_ioctl = compat_ptr_ioctl, 6886 .mmap = binder_mmap, 6887 .open = binder_open, 6888 .flush = binder_flush, 6889 .release = binder_release, 6890 }; 6891 6892 DEFINE_SHOW_ATTRIBUTE(state); 6893 DEFINE_SHOW_ATTRIBUTE(stats); 6894 DEFINE_SHOW_ATTRIBUTE(transactions); 6895 DEFINE_SHOW_ATTRIBUTE(transaction_log); 6896 6897 const struct binder_debugfs_entry binder_debugfs_entries[] = { 6898 { 6899 .name = "state", 6900 .mode = 0444, 6901 .fops = &state_fops, 6902 .data = NULL, 6903 }, 6904 { 6905 .name = "stats", 6906 .mode = 0444, 6907 .fops = &stats_fops, 6908 .data = NULL, 6909 }, 6910 { 6911 .name = "transactions", 6912 .mode = 0444, 6913 .fops = &transactions_fops, 6914 .data = NULL, 6915 }, 6916 { 6917 .name = "transaction_log", 6918 .mode = 0444, 6919 .fops = &transaction_log_fops, 6920 .data = &binder_transaction_log, 6921 }, 6922 { 6923 .name = "failed_transaction_log", 6924 .mode = 0444, 6925 .fops = &transaction_log_fops, 6926 .data = &binder_transaction_log_failed, 6927 }, 6928 {} /* terminator */ 6929 }; 6930 6931 static int __init init_binder_device(const char *name) 6932 { 6933 int ret; 6934 struct binder_device *binder_device; 6935 6936 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6937 if (!binder_device) 6938 return -ENOMEM; 6939 6940 binder_device->miscdev.fops = &binder_fops; 6941 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6942 binder_device->miscdev.name = name; 6943 6944 refcount_set(&binder_device->ref, 1); 6945 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6946 binder_device->context.name = name; 6947 mutex_init(&binder_device->context.context_mgr_node_lock); 6948 6949 ret = misc_register(&binder_device->miscdev); 6950 if (ret < 0) { 6951 kfree(binder_device); 6952 return ret; 6953 } 6954 6955 hlist_add_head(&binder_device->hlist, &binder_devices); 6956 6957 return ret; 6958 } 6959 6960 static int __init binder_init(void) 6961 { 6962 int ret; 6963 char *device_name, *device_tmp; 6964 struct binder_device *device; 6965 struct hlist_node *tmp; 6966 char *device_names = NULL; 6967 const struct binder_debugfs_entry *db_entry; 6968 6969 ret = binder_alloc_shrinker_init(); 6970 if (ret) 6971 return ret; 6972 6973 atomic_set(&binder_transaction_log.cur, ~0U); 6974 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6975 6976 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6977 6978 binder_for_each_debugfs_entry(db_entry) 6979 debugfs_create_file(db_entry->name, 6980 db_entry->mode, 6981 binder_debugfs_dir_entry_root, 6982 db_entry->data, 6983 db_entry->fops); 6984 6985 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6986 binder_debugfs_dir_entry_root); 6987 6988 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 6989 strcmp(binder_devices_param, "") != 0) { 6990 /* 6991 * Copy the module_parameter string, because we don't want to 6992 * tokenize it in-place. 6993 */ 6994 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6995 if (!device_names) { 6996 ret = -ENOMEM; 6997 goto err_alloc_device_names_failed; 6998 } 6999 7000 device_tmp = device_names; 7001 while ((device_name = strsep(&device_tmp, ","))) { 7002 ret = init_binder_device(device_name); 7003 if (ret) 7004 goto err_init_binder_device_failed; 7005 } 7006 } 7007 7008 ret = init_binderfs(); 7009 if (ret) 7010 goto err_init_binder_device_failed; 7011 7012 return ret; 7013 7014 err_init_binder_device_failed: 7015 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 7016 misc_deregister(&device->miscdev); 7017 hlist_del(&device->hlist); 7018 kfree(device); 7019 } 7020 7021 kfree(device_names); 7022 7023 err_alloc_device_names_failed: 7024 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 7025 binder_alloc_shrinker_exit(); 7026 7027 return ret; 7028 } 7029 7030 device_initcall(binder_init); 7031 7032 #define CREATE_TRACE_POINTS 7033 #include "binder_trace.h" 7034 7035 MODULE_LICENSE("GPL v2"); 7036