1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->nodes) and all todo lists associated 32 * with the binder_proc (proc->todo, thread->todo, 33 * proc->delivered_death and node->async_todo), as well as 34 * thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <asm/cacheflush.h> 55 #include <linux/fdtable.h> 56 #include <linux/file.h> 57 #include <linux/freezer.h> 58 #include <linux/fs.h> 59 #include <linux/list.h> 60 #include <linux/miscdevice.h> 61 #include <linux/module.h> 62 #include <linux/mutex.h> 63 #include <linux/nsproxy.h> 64 #include <linux/poll.h> 65 #include <linux/debugfs.h> 66 #include <linux/rbtree.h> 67 #include <linux/sched/signal.h> 68 #include <linux/sched/mm.h> 69 #include <linux/seq_file.h> 70 #include <linux/uaccess.h> 71 #include <linux/pid_namespace.h> 72 #include <linux/security.h> 73 #include <linux/spinlock.h> 74 75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 76 #define BINDER_IPC_32BIT 1 77 #endif 78 79 #include <uapi/linux/android/binder.h> 80 #include "binder_alloc.h" 81 #include "binder_trace.h" 82 83 static HLIST_HEAD(binder_deferred_list); 84 static DEFINE_MUTEX(binder_deferred_lock); 85 86 static HLIST_HEAD(binder_devices); 87 static HLIST_HEAD(binder_procs); 88 static DEFINE_MUTEX(binder_procs_lock); 89 90 static HLIST_HEAD(binder_dead_nodes); 91 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 92 93 static struct dentry *binder_debugfs_dir_entry_root; 94 static struct dentry *binder_debugfs_dir_entry_proc; 95 static atomic_t binder_last_id; 96 97 #define BINDER_DEBUG_ENTRY(name) \ 98 static int binder_##name##_open(struct inode *inode, struct file *file) \ 99 { \ 100 return single_open(file, binder_##name##_show, inode->i_private); \ 101 } \ 102 \ 103 static const struct file_operations binder_##name##_fops = { \ 104 .owner = THIS_MODULE, \ 105 .open = binder_##name##_open, \ 106 .read = seq_read, \ 107 .llseek = seq_lseek, \ 108 .release = single_release, \ 109 } 110 111 static int binder_proc_show(struct seq_file *m, void *unused); 112 BINDER_DEBUG_ENTRY(proc); 113 114 /* This is only defined in include/asm-arm/sizes.h */ 115 #ifndef SZ_1K 116 #define SZ_1K 0x400 117 #endif 118 119 #ifndef SZ_4M 120 #define SZ_4M 0x400000 121 #endif 122 123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 124 125 enum { 126 BINDER_DEBUG_USER_ERROR = 1U << 0, 127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 130 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 132 BINDER_DEBUG_READ_WRITE = 1U << 6, 133 BINDER_DEBUG_USER_REFS = 1U << 7, 134 BINDER_DEBUG_THREADS = 1U << 8, 135 BINDER_DEBUG_TRANSACTION = 1U << 9, 136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 137 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 140 BINDER_DEBUG_SPINLOCKS = 1U << 14, 141 }; 142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 145 146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 147 module_param_named(devices, binder_devices_param, charp, 0444); 148 149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 150 static int binder_stop_on_user_error; 151 152 static int binder_set_stop_on_user_error(const char *val, 153 struct kernel_param *kp) 154 { 155 int ret; 156 157 ret = param_set_int(val, kp); 158 if (binder_stop_on_user_error < 2) 159 wake_up(&binder_user_error_wait); 160 return ret; 161 } 162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 163 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 164 165 #define binder_debug(mask, x...) \ 166 do { \ 167 if (binder_debug_mask & mask) \ 168 pr_info(x); \ 169 } while (0) 170 171 #define binder_user_error(x...) \ 172 do { \ 173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 174 pr_info(x); \ 175 if (binder_stop_on_user_error) \ 176 binder_stop_on_user_error = 2; \ 177 } while (0) 178 179 #define to_flat_binder_object(hdr) \ 180 container_of(hdr, struct flat_binder_object, hdr) 181 182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 183 184 #define to_binder_buffer_object(hdr) \ 185 container_of(hdr, struct binder_buffer_object, hdr) 186 187 #define to_binder_fd_array_object(hdr) \ 188 container_of(hdr, struct binder_fd_array_object, hdr) 189 190 enum binder_stat_types { 191 BINDER_STAT_PROC, 192 BINDER_STAT_THREAD, 193 BINDER_STAT_NODE, 194 BINDER_STAT_REF, 195 BINDER_STAT_DEATH, 196 BINDER_STAT_TRANSACTION, 197 BINDER_STAT_TRANSACTION_COMPLETE, 198 BINDER_STAT_COUNT 199 }; 200 201 struct binder_stats { 202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 204 atomic_t obj_created[BINDER_STAT_COUNT]; 205 atomic_t obj_deleted[BINDER_STAT_COUNT]; 206 }; 207 208 static struct binder_stats binder_stats; 209 210 static inline void binder_stats_deleted(enum binder_stat_types type) 211 { 212 atomic_inc(&binder_stats.obj_deleted[type]); 213 } 214 215 static inline void binder_stats_created(enum binder_stat_types type) 216 { 217 atomic_inc(&binder_stats.obj_created[type]); 218 } 219 220 struct binder_transaction_log_entry { 221 int debug_id; 222 int debug_id_done; 223 int call_type; 224 int from_proc; 225 int from_thread; 226 int target_handle; 227 int to_proc; 228 int to_thread; 229 int to_node; 230 int data_size; 231 int offsets_size; 232 int return_error_line; 233 uint32_t return_error; 234 uint32_t return_error_param; 235 const char *context_name; 236 }; 237 struct binder_transaction_log { 238 atomic_t cur; 239 bool full; 240 struct binder_transaction_log_entry entry[32]; 241 }; 242 static struct binder_transaction_log binder_transaction_log; 243 static struct binder_transaction_log binder_transaction_log_failed; 244 245 static struct binder_transaction_log_entry *binder_transaction_log_add( 246 struct binder_transaction_log *log) 247 { 248 struct binder_transaction_log_entry *e; 249 unsigned int cur = atomic_inc_return(&log->cur); 250 251 if (cur >= ARRAY_SIZE(log->entry)) 252 log->full = 1; 253 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 254 WRITE_ONCE(e->debug_id_done, 0); 255 /* 256 * write-barrier to synchronize access to e->debug_id_done. 257 * We make sure the initialized 0 value is seen before 258 * memset() other fields are zeroed by memset. 259 */ 260 smp_wmb(); 261 memset(e, 0, sizeof(*e)); 262 return e; 263 } 264 265 struct binder_context { 266 struct binder_node *binder_context_mgr_node; 267 struct mutex context_mgr_node_lock; 268 269 kuid_t binder_context_mgr_uid; 270 const char *name; 271 }; 272 273 struct binder_device { 274 struct hlist_node hlist; 275 struct miscdevice miscdev; 276 struct binder_context context; 277 }; 278 279 /** 280 * struct binder_work - work enqueued on a worklist 281 * @entry: node enqueued on list 282 * @type: type of work to be performed 283 * 284 * There are separate work lists for proc, thread, and node (async). 285 */ 286 struct binder_work { 287 struct list_head entry; 288 289 enum { 290 BINDER_WORK_TRANSACTION = 1, 291 BINDER_WORK_TRANSACTION_COMPLETE, 292 BINDER_WORK_RETURN_ERROR, 293 BINDER_WORK_NODE, 294 BINDER_WORK_DEAD_BINDER, 295 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 297 } type; 298 }; 299 300 struct binder_error { 301 struct binder_work work; 302 uint32_t cmd; 303 }; 304 305 /** 306 * struct binder_node - binder node bookkeeping 307 * @debug_id: unique ID for debugging 308 * (invariant after initialized) 309 * @lock: lock for node fields 310 * @work: worklist element for node work 311 * (protected by @proc->inner_lock) 312 * @rb_node: element for proc->nodes tree 313 * (protected by @proc->inner_lock) 314 * @dead_node: element for binder_dead_nodes list 315 * (protected by binder_dead_nodes_lock) 316 * @proc: binder_proc that owns this node 317 * (invariant after initialized) 318 * @refs: list of references on this node 319 * (protected by @lock) 320 * @internal_strong_refs: used to take strong references when 321 * initiating a transaction 322 * (protected by @proc->inner_lock if @proc 323 * and by @lock) 324 * @local_weak_refs: weak user refs from local process 325 * (protected by @proc->inner_lock if @proc 326 * and by @lock) 327 * @local_strong_refs: strong user refs from local process 328 * (protected by @proc->inner_lock if @proc 329 * and by @lock) 330 * @tmp_refs: temporary kernel refs 331 * (protected by @proc->inner_lock while @proc 332 * is valid, and by binder_dead_nodes_lock 333 * if @proc is NULL. During inc/dec and node release 334 * it is also protected by @lock to provide safety 335 * as the node dies and @proc becomes NULL) 336 * @ptr: userspace pointer for node 337 * (invariant, no lock needed) 338 * @cookie: userspace cookie for node 339 * (invariant, no lock needed) 340 * @has_strong_ref: userspace notified of strong ref 341 * (protected by @proc->inner_lock if @proc 342 * and by @lock) 343 * @pending_strong_ref: userspace has acked notification of strong ref 344 * (protected by @proc->inner_lock if @proc 345 * and by @lock) 346 * @has_weak_ref: userspace notified of weak ref 347 * (protected by @proc->inner_lock if @proc 348 * and by @lock) 349 * @pending_weak_ref: userspace has acked notification of weak ref 350 * (protected by @proc->inner_lock if @proc 351 * and by @lock) 352 * @has_async_transaction: async transaction to node in progress 353 * (protected by @lock) 354 * @accept_fds: file descriptor operations supported for node 355 * (invariant after initialized) 356 * @min_priority: minimum scheduling priority 357 * (invariant after initialized) 358 * @async_todo: list of async work items 359 * (protected by @proc->inner_lock) 360 * 361 * Bookkeeping structure for binder nodes. 362 */ 363 struct binder_node { 364 int debug_id; 365 spinlock_t lock; 366 struct binder_work work; 367 union { 368 struct rb_node rb_node; 369 struct hlist_node dead_node; 370 }; 371 struct binder_proc *proc; 372 struct hlist_head refs; 373 int internal_strong_refs; 374 int local_weak_refs; 375 int local_strong_refs; 376 int tmp_refs; 377 binder_uintptr_t ptr; 378 binder_uintptr_t cookie; 379 struct { 380 /* 381 * bitfield elements protected by 382 * proc inner_lock 383 */ 384 u8 has_strong_ref:1; 385 u8 pending_strong_ref:1; 386 u8 has_weak_ref:1; 387 u8 pending_weak_ref:1; 388 }; 389 struct { 390 /* 391 * invariant after initialization 392 */ 393 u8 accept_fds:1; 394 u8 min_priority; 395 }; 396 bool has_async_transaction; 397 struct list_head async_todo; 398 }; 399 400 struct binder_ref_death { 401 /** 402 * @work: worklist element for death notifications 403 * (protected by inner_lock of the proc that 404 * this ref belongs to) 405 */ 406 struct binder_work work; 407 binder_uintptr_t cookie; 408 }; 409 410 /** 411 * struct binder_ref_data - binder_ref counts and id 412 * @debug_id: unique ID for the ref 413 * @desc: unique userspace handle for ref 414 * @strong: strong ref count (debugging only if not locked) 415 * @weak: weak ref count (debugging only if not locked) 416 * 417 * Structure to hold ref count and ref id information. Since 418 * the actual ref can only be accessed with a lock, this structure 419 * is used to return information about the ref to callers of 420 * ref inc/dec functions. 421 */ 422 struct binder_ref_data { 423 int debug_id; 424 uint32_t desc; 425 int strong; 426 int weak; 427 }; 428 429 /** 430 * struct binder_ref - struct to track references on nodes 431 * @data: binder_ref_data containing id, handle, and current refcounts 432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 433 * @rb_node_node: node for lookup by @node in proc's rb_tree 434 * @node_entry: list entry for node->refs list in target node 435 * (protected by @node->lock) 436 * @proc: binder_proc containing ref 437 * @node: binder_node of target node. When cleaning up a 438 * ref for deletion in binder_cleanup_ref, a non-NULL 439 * @node indicates the node must be freed 440 * @death: pointer to death notification (ref_death) if requested 441 * (protected by @node->lock) 442 * 443 * Structure to track references from procA to target node (on procB). This 444 * structure is unsafe to access without holding @proc->outer_lock. 445 */ 446 struct binder_ref { 447 /* Lookups needed: */ 448 /* node + proc => ref (transaction) */ 449 /* desc + proc => ref (transaction, inc/dec ref) */ 450 /* node => refs + procs (proc exit) */ 451 struct binder_ref_data data; 452 struct rb_node rb_node_desc; 453 struct rb_node rb_node_node; 454 struct hlist_node node_entry; 455 struct binder_proc *proc; 456 struct binder_node *node; 457 struct binder_ref_death *death; 458 }; 459 460 enum binder_deferred_state { 461 BINDER_DEFERRED_PUT_FILES = 0x01, 462 BINDER_DEFERRED_FLUSH = 0x02, 463 BINDER_DEFERRED_RELEASE = 0x04, 464 }; 465 466 /** 467 * struct binder_proc - binder process bookkeeping 468 * @proc_node: element for binder_procs list 469 * @threads: rbtree of binder_threads in this proc 470 * (protected by @inner_lock) 471 * @nodes: rbtree of binder nodes associated with 472 * this proc ordered by node->ptr 473 * (protected by @inner_lock) 474 * @refs_by_desc: rbtree of refs ordered by ref->desc 475 * (protected by @outer_lock) 476 * @refs_by_node: rbtree of refs ordered by ref->node 477 * (protected by @outer_lock) 478 * @pid PID of group_leader of process 479 * (invariant after initialized) 480 * @tsk task_struct for group_leader of process 481 * (invariant after initialized) 482 * @files files_struct for process 483 * (invariant after initialized) 484 * @deferred_work_node: element for binder_deferred_list 485 * (protected by binder_deferred_lock) 486 * @deferred_work: bitmap of deferred work to perform 487 * (protected by binder_deferred_lock) 488 * @is_dead: process is dead and awaiting free 489 * when outstanding transactions are cleaned up 490 * (protected by @inner_lock) 491 * @todo: list of work for this process 492 * (protected by @inner_lock) 493 * @wait: wait queue head to wait for proc work 494 * (invariant after initialized) 495 * @stats: per-process binder statistics 496 * (atomics, no lock needed) 497 * @delivered_death: list of delivered death notification 498 * (protected by @inner_lock) 499 * @max_threads: cap on number of binder threads 500 * (protected by @inner_lock) 501 * @requested_threads: number of binder threads requested but not 502 * yet started. In current implementation, can 503 * only be 0 or 1. 504 * (protected by @inner_lock) 505 * @requested_threads_started: number binder threads started 506 * (protected by @inner_lock) 507 * @ready_threads: number of threads waiting for proc work 508 * (protected by @inner_lock) 509 * @tmp_ref: temporary reference to indicate proc is in use 510 * (protected by @inner_lock) 511 * @default_priority: default scheduler priority 512 * (invariant after initialized) 513 * @debugfs_entry: debugfs node 514 * @alloc: binder allocator bookkeeping 515 * @context: binder_context for this proc 516 * (invariant after initialized) 517 * @inner_lock: can nest under outer_lock and/or node lock 518 * @outer_lock: no nesting under innor or node lock 519 * Lock order: 1) outer, 2) node, 3) inner 520 * 521 * Bookkeeping structure for binder processes 522 */ 523 struct binder_proc { 524 struct hlist_node proc_node; 525 struct rb_root threads; 526 struct rb_root nodes; 527 struct rb_root refs_by_desc; 528 struct rb_root refs_by_node; 529 int pid; 530 struct task_struct *tsk; 531 struct files_struct *files; 532 struct hlist_node deferred_work_node; 533 int deferred_work; 534 bool is_dead; 535 536 struct list_head todo; 537 wait_queue_head_t wait; 538 struct binder_stats stats; 539 struct list_head delivered_death; 540 int max_threads; 541 int requested_threads; 542 int requested_threads_started; 543 int ready_threads; 544 int tmp_ref; 545 long default_priority; 546 struct dentry *debugfs_entry; 547 struct binder_alloc alloc; 548 struct binder_context *context; 549 spinlock_t inner_lock; 550 spinlock_t outer_lock; 551 }; 552 553 enum { 554 BINDER_LOOPER_STATE_REGISTERED = 0x01, 555 BINDER_LOOPER_STATE_ENTERED = 0x02, 556 BINDER_LOOPER_STATE_EXITED = 0x04, 557 BINDER_LOOPER_STATE_INVALID = 0x08, 558 BINDER_LOOPER_STATE_WAITING = 0x10, 559 }; 560 561 /** 562 * struct binder_thread - binder thread bookkeeping 563 * @proc: binder process for this thread 564 * (invariant after initialization) 565 * @rb_node: element for proc->threads rbtree 566 * (protected by @proc->inner_lock) 567 * @pid: PID for this thread 568 * (invariant after initialization) 569 * @looper: bitmap of looping state 570 * (only accessed by this thread) 571 * @looper_needs_return: looping thread needs to exit driver 572 * (no lock needed) 573 * @transaction_stack: stack of in-progress transactions for this thread 574 * (protected by @proc->inner_lock) 575 * @todo: list of work to do for this thread 576 * (protected by @proc->inner_lock) 577 * @return_error: transaction errors reported by this thread 578 * (only accessed by this thread) 579 * @reply_error: transaction errors reported by target thread 580 * (protected by @proc->inner_lock) 581 * @wait: wait queue for thread work 582 * @stats: per-thread statistics 583 * (atomics, no lock needed) 584 * @tmp_ref: temporary reference to indicate thread is in use 585 * (atomic since @proc->inner_lock cannot 586 * always be acquired) 587 * @is_dead: thread is dead and awaiting free 588 * when outstanding transactions are cleaned up 589 * (protected by @proc->inner_lock) 590 * 591 * Bookkeeping structure for binder threads. 592 */ 593 struct binder_thread { 594 struct binder_proc *proc; 595 struct rb_node rb_node; 596 int pid; 597 int looper; /* only modified by this thread */ 598 bool looper_need_return; /* can be written by other thread */ 599 struct binder_transaction *transaction_stack; 600 struct list_head todo; 601 struct binder_error return_error; 602 struct binder_error reply_error; 603 wait_queue_head_t wait; 604 struct binder_stats stats; 605 atomic_t tmp_ref; 606 bool is_dead; 607 }; 608 609 struct binder_transaction { 610 int debug_id; 611 struct binder_work work; 612 struct binder_thread *from; 613 struct binder_transaction *from_parent; 614 struct binder_proc *to_proc; 615 struct binder_thread *to_thread; 616 struct binder_transaction *to_parent; 617 unsigned need_reply:1; 618 /* unsigned is_dead:1; */ /* not used at the moment */ 619 620 struct binder_buffer *buffer; 621 unsigned int code; 622 unsigned int flags; 623 long priority; 624 long saved_priority; 625 kuid_t sender_euid; 626 /** 627 * @lock: protects @from, @to_proc, and @to_thread 628 * 629 * @from, @to_proc, and @to_thread can be set to NULL 630 * during thread teardown 631 */ 632 spinlock_t lock; 633 }; 634 635 /** 636 * binder_proc_lock() - Acquire outer lock for given binder_proc 637 * @proc: struct binder_proc to acquire 638 * 639 * Acquires proc->outer_lock. Used to protect binder_ref 640 * structures associated with the given proc. 641 */ 642 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 643 static void 644 _binder_proc_lock(struct binder_proc *proc, int line) 645 { 646 binder_debug(BINDER_DEBUG_SPINLOCKS, 647 "%s: line=%d\n", __func__, line); 648 spin_lock(&proc->outer_lock); 649 } 650 651 /** 652 * binder_proc_unlock() - Release spinlock for given binder_proc 653 * @proc: struct binder_proc to acquire 654 * 655 * Release lock acquired via binder_proc_lock() 656 */ 657 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 658 static void 659 _binder_proc_unlock(struct binder_proc *proc, int line) 660 { 661 binder_debug(BINDER_DEBUG_SPINLOCKS, 662 "%s: line=%d\n", __func__, line); 663 spin_unlock(&proc->outer_lock); 664 } 665 666 /** 667 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 668 * @proc: struct binder_proc to acquire 669 * 670 * Acquires proc->inner_lock. Used to protect todo lists 671 */ 672 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 673 static void 674 _binder_inner_proc_lock(struct binder_proc *proc, int line) 675 { 676 binder_debug(BINDER_DEBUG_SPINLOCKS, 677 "%s: line=%d\n", __func__, line); 678 spin_lock(&proc->inner_lock); 679 } 680 681 /** 682 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 683 * @proc: struct binder_proc to acquire 684 * 685 * Release lock acquired via binder_inner_proc_lock() 686 */ 687 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 688 static void 689 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 690 { 691 binder_debug(BINDER_DEBUG_SPINLOCKS, 692 "%s: line=%d\n", __func__, line); 693 spin_unlock(&proc->inner_lock); 694 } 695 696 /** 697 * binder_node_lock() - Acquire spinlock for given binder_node 698 * @node: struct binder_node to acquire 699 * 700 * Acquires node->lock. Used to protect binder_node fields 701 */ 702 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 703 static void 704 _binder_node_lock(struct binder_node *node, int line) 705 { 706 binder_debug(BINDER_DEBUG_SPINLOCKS, 707 "%s: line=%d\n", __func__, line); 708 spin_lock(&node->lock); 709 } 710 711 /** 712 * binder_node_unlock() - Release spinlock for given binder_proc 713 * @node: struct binder_node to acquire 714 * 715 * Release lock acquired via binder_node_lock() 716 */ 717 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 718 static void 719 _binder_node_unlock(struct binder_node *node, int line) 720 { 721 binder_debug(BINDER_DEBUG_SPINLOCKS, 722 "%s: line=%d\n", __func__, line); 723 spin_unlock(&node->lock); 724 } 725 726 /** 727 * binder_node_inner_lock() - Acquire node and inner locks 728 * @node: struct binder_node to acquire 729 * 730 * Acquires node->lock. If node->proc also acquires 731 * proc->inner_lock. Used to protect binder_node fields 732 */ 733 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 734 static void 735 _binder_node_inner_lock(struct binder_node *node, int line) 736 { 737 binder_debug(BINDER_DEBUG_SPINLOCKS, 738 "%s: line=%d\n", __func__, line); 739 spin_lock(&node->lock); 740 if (node->proc) 741 binder_inner_proc_lock(node->proc); 742 } 743 744 /** 745 * binder_node_unlock() - Release node and inner locks 746 * @node: struct binder_node to acquire 747 * 748 * Release lock acquired via binder_node_lock() 749 */ 750 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 751 static void 752 _binder_node_inner_unlock(struct binder_node *node, int line) 753 { 754 struct binder_proc *proc = node->proc; 755 756 binder_debug(BINDER_DEBUG_SPINLOCKS, 757 "%s: line=%d\n", __func__, line); 758 if (proc) 759 binder_inner_proc_unlock(proc); 760 spin_unlock(&node->lock); 761 } 762 763 static bool binder_worklist_empty_ilocked(struct list_head *list) 764 { 765 return list_empty(list); 766 } 767 768 /** 769 * binder_worklist_empty() - Check if no items on the work list 770 * @proc: binder_proc associated with list 771 * @list: list to check 772 * 773 * Return: true if there are no items on list, else false 774 */ 775 static bool binder_worklist_empty(struct binder_proc *proc, 776 struct list_head *list) 777 { 778 bool ret; 779 780 binder_inner_proc_lock(proc); 781 ret = binder_worklist_empty_ilocked(list); 782 binder_inner_proc_unlock(proc); 783 return ret; 784 } 785 786 static void 787 binder_enqueue_work_ilocked(struct binder_work *work, 788 struct list_head *target_list) 789 { 790 BUG_ON(target_list == NULL); 791 BUG_ON(work->entry.next && !list_empty(&work->entry)); 792 list_add_tail(&work->entry, target_list); 793 } 794 795 /** 796 * binder_enqueue_work() - Add an item to the work list 797 * @proc: binder_proc associated with list 798 * @work: struct binder_work to add to list 799 * @target_list: list to add work to 800 * 801 * Adds the work to the specified list. Asserts that work 802 * is not already on a list. 803 */ 804 static void 805 binder_enqueue_work(struct binder_proc *proc, 806 struct binder_work *work, 807 struct list_head *target_list) 808 { 809 binder_inner_proc_lock(proc); 810 binder_enqueue_work_ilocked(work, target_list); 811 binder_inner_proc_unlock(proc); 812 } 813 814 static void 815 binder_dequeue_work_ilocked(struct binder_work *work) 816 { 817 list_del_init(&work->entry); 818 } 819 820 /** 821 * binder_dequeue_work() - Removes an item from the work list 822 * @proc: binder_proc associated with list 823 * @work: struct binder_work to remove from list 824 * 825 * Removes the specified work item from whatever list it is on. 826 * Can safely be called if work is not on any list. 827 */ 828 static void 829 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 830 { 831 binder_inner_proc_lock(proc); 832 binder_dequeue_work_ilocked(work); 833 binder_inner_proc_unlock(proc); 834 } 835 836 static struct binder_work *binder_dequeue_work_head_ilocked( 837 struct list_head *list) 838 { 839 struct binder_work *w; 840 841 w = list_first_entry_or_null(list, struct binder_work, entry); 842 if (w) 843 list_del_init(&w->entry); 844 return w; 845 } 846 847 /** 848 * binder_dequeue_work_head() - Dequeues the item at head of list 849 * @proc: binder_proc associated with list 850 * @list: list to dequeue head 851 * 852 * Removes the head of the list if there are items on the list 853 * 854 * Return: pointer dequeued binder_work, NULL if list was empty 855 */ 856 static struct binder_work *binder_dequeue_work_head( 857 struct binder_proc *proc, 858 struct list_head *list) 859 { 860 struct binder_work *w; 861 862 binder_inner_proc_lock(proc); 863 w = binder_dequeue_work_head_ilocked(list); 864 binder_inner_proc_unlock(proc); 865 return w; 866 } 867 868 static void 869 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 870 static void binder_free_thread(struct binder_thread *thread); 871 static void binder_free_proc(struct binder_proc *proc); 872 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 873 874 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 875 { 876 struct files_struct *files = proc->files; 877 unsigned long rlim_cur; 878 unsigned long irqs; 879 880 if (files == NULL) 881 return -ESRCH; 882 883 if (!lock_task_sighand(proc->tsk, &irqs)) 884 return -EMFILE; 885 886 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 887 unlock_task_sighand(proc->tsk, &irqs); 888 889 return __alloc_fd(files, 0, rlim_cur, flags); 890 } 891 892 /* 893 * copied from fd_install 894 */ 895 static void task_fd_install( 896 struct binder_proc *proc, unsigned int fd, struct file *file) 897 { 898 if (proc->files) 899 __fd_install(proc->files, fd, file); 900 } 901 902 /* 903 * copied from sys_close 904 */ 905 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 906 { 907 int retval; 908 909 if (proc->files == NULL) 910 return -ESRCH; 911 912 retval = __close_fd(proc->files, fd); 913 /* can't restart close syscall because file table entry was cleared */ 914 if (unlikely(retval == -ERESTARTSYS || 915 retval == -ERESTARTNOINTR || 916 retval == -ERESTARTNOHAND || 917 retval == -ERESTART_RESTARTBLOCK)) 918 retval = -EINTR; 919 920 return retval; 921 } 922 923 static void binder_set_nice(long nice) 924 { 925 long min_nice; 926 927 if (can_nice(current, nice)) { 928 set_user_nice(current, nice); 929 return; 930 } 931 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 932 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 933 "%d: nice value %ld not allowed use %ld instead\n", 934 current->pid, nice, min_nice); 935 set_user_nice(current, min_nice); 936 if (min_nice <= MAX_NICE) 937 return; 938 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 939 } 940 941 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 942 binder_uintptr_t ptr) 943 { 944 struct rb_node *n = proc->nodes.rb_node; 945 struct binder_node *node; 946 947 BUG_ON(!spin_is_locked(&proc->inner_lock)); 948 949 while (n) { 950 node = rb_entry(n, struct binder_node, rb_node); 951 952 if (ptr < node->ptr) 953 n = n->rb_left; 954 else if (ptr > node->ptr) 955 n = n->rb_right; 956 else { 957 /* 958 * take an implicit weak reference 959 * to ensure node stays alive until 960 * call to binder_put_node() 961 */ 962 binder_inc_node_tmpref_ilocked(node); 963 return node; 964 } 965 } 966 return NULL; 967 } 968 969 static struct binder_node *binder_get_node(struct binder_proc *proc, 970 binder_uintptr_t ptr) 971 { 972 struct binder_node *node; 973 974 binder_inner_proc_lock(proc); 975 node = binder_get_node_ilocked(proc, ptr); 976 binder_inner_proc_unlock(proc); 977 return node; 978 } 979 980 static struct binder_node *binder_init_node_ilocked( 981 struct binder_proc *proc, 982 struct binder_node *new_node, 983 struct flat_binder_object *fp) 984 { 985 struct rb_node **p = &proc->nodes.rb_node; 986 struct rb_node *parent = NULL; 987 struct binder_node *node; 988 binder_uintptr_t ptr = fp ? fp->binder : 0; 989 binder_uintptr_t cookie = fp ? fp->cookie : 0; 990 __u32 flags = fp ? fp->flags : 0; 991 992 BUG_ON(!spin_is_locked(&proc->inner_lock)); 993 while (*p) { 994 995 parent = *p; 996 node = rb_entry(parent, struct binder_node, rb_node); 997 998 if (ptr < node->ptr) 999 p = &(*p)->rb_left; 1000 else if (ptr > node->ptr) 1001 p = &(*p)->rb_right; 1002 else { 1003 /* 1004 * A matching node is already in 1005 * the rb tree. Abandon the init 1006 * and return it. 1007 */ 1008 binder_inc_node_tmpref_ilocked(node); 1009 return node; 1010 } 1011 } 1012 node = new_node; 1013 binder_stats_created(BINDER_STAT_NODE); 1014 node->tmp_refs++; 1015 rb_link_node(&node->rb_node, parent, p); 1016 rb_insert_color(&node->rb_node, &proc->nodes); 1017 node->debug_id = atomic_inc_return(&binder_last_id); 1018 node->proc = proc; 1019 node->ptr = ptr; 1020 node->cookie = cookie; 1021 node->work.type = BINDER_WORK_NODE; 1022 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1023 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1024 spin_lock_init(&node->lock); 1025 INIT_LIST_HEAD(&node->work.entry); 1026 INIT_LIST_HEAD(&node->async_todo); 1027 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1028 "%d:%d node %d u%016llx c%016llx created\n", 1029 proc->pid, current->pid, node->debug_id, 1030 (u64)node->ptr, (u64)node->cookie); 1031 1032 return node; 1033 } 1034 1035 static struct binder_node *binder_new_node(struct binder_proc *proc, 1036 struct flat_binder_object *fp) 1037 { 1038 struct binder_node *node; 1039 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1040 1041 if (!new_node) 1042 return NULL; 1043 binder_inner_proc_lock(proc); 1044 node = binder_init_node_ilocked(proc, new_node, fp); 1045 binder_inner_proc_unlock(proc); 1046 if (node != new_node) 1047 /* 1048 * The node was already added by another thread 1049 */ 1050 kfree(new_node); 1051 1052 return node; 1053 } 1054 1055 static void binder_free_node(struct binder_node *node) 1056 { 1057 kfree(node); 1058 binder_stats_deleted(BINDER_STAT_NODE); 1059 } 1060 1061 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1062 int internal, 1063 struct list_head *target_list) 1064 { 1065 struct binder_proc *proc = node->proc; 1066 1067 BUG_ON(!spin_is_locked(&node->lock)); 1068 if (proc) 1069 BUG_ON(!spin_is_locked(&proc->inner_lock)); 1070 if (strong) { 1071 if (internal) { 1072 if (target_list == NULL && 1073 node->internal_strong_refs == 0 && 1074 !(node->proc && 1075 node == node->proc->context->binder_context_mgr_node && 1076 node->has_strong_ref)) { 1077 pr_err("invalid inc strong node for %d\n", 1078 node->debug_id); 1079 return -EINVAL; 1080 } 1081 node->internal_strong_refs++; 1082 } else 1083 node->local_strong_refs++; 1084 if (!node->has_strong_ref && target_list) { 1085 binder_dequeue_work_ilocked(&node->work); 1086 binder_enqueue_work_ilocked(&node->work, target_list); 1087 } 1088 } else { 1089 if (!internal) 1090 node->local_weak_refs++; 1091 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1092 if (target_list == NULL) { 1093 pr_err("invalid inc weak node for %d\n", 1094 node->debug_id); 1095 return -EINVAL; 1096 } 1097 binder_enqueue_work_ilocked(&node->work, target_list); 1098 } 1099 } 1100 return 0; 1101 } 1102 1103 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1104 struct list_head *target_list) 1105 { 1106 int ret; 1107 1108 binder_node_inner_lock(node); 1109 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1110 binder_node_inner_unlock(node); 1111 1112 return ret; 1113 } 1114 1115 static bool binder_dec_node_nilocked(struct binder_node *node, 1116 int strong, int internal) 1117 { 1118 struct binder_proc *proc = node->proc; 1119 1120 BUG_ON(!spin_is_locked(&node->lock)); 1121 if (proc) 1122 BUG_ON(!spin_is_locked(&proc->inner_lock)); 1123 if (strong) { 1124 if (internal) 1125 node->internal_strong_refs--; 1126 else 1127 node->local_strong_refs--; 1128 if (node->local_strong_refs || node->internal_strong_refs) 1129 return false; 1130 } else { 1131 if (!internal) 1132 node->local_weak_refs--; 1133 if (node->local_weak_refs || node->tmp_refs || 1134 !hlist_empty(&node->refs)) 1135 return false; 1136 } 1137 1138 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1139 if (list_empty(&node->work.entry)) { 1140 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1141 wake_up_interruptible(&node->proc->wait); 1142 } 1143 } else { 1144 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1145 !node->local_weak_refs && !node->tmp_refs) { 1146 if (proc) { 1147 binder_dequeue_work_ilocked(&node->work); 1148 rb_erase(&node->rb_node, &proc->nodes); 1149 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1150 "refless node %d deleted\n", 1151 node->debug_id); 1152 } else { 1153 BUG_ON(!list_empty(&node->work.entry)); 1154 spin_lock(&binder_dead_nodes_lock); 1155 /* 1156 * tmp_refs could have changed so 1157 * check it again 1158 */ 1159 if (node->tmp_refs) { 1160 spin_unlock(&binder_dead_nodes_lock); 1161 return false; 1162 } 1163 hlist_del(&node->dead_node); 1164 spin_unlock(&binder_dead_nodes_lock); 1165 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1166 "dead node %d deleted\n", 1167 node->debug_id); 1168 } 1169 return true; 1170 } 1171 } 1172 return false; 1173 } 1174 1175 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1176 { 1177 bool free_node; 1178 1179 binder_node_inner_lock(node); 1180 free_node = binder_dec_node_nilocked(node, strong, internal); 1181 binder_node_inner_unlock(node); 1182 if (free_node) 1183 binder_free_node(node); 1184 } 1185 1186 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1187 { 1188 /* 1189 * No call to binder_inc_node() is needed since we 1190 * don't need to inform userspace of any changes to 1191 * tmp_refs 1192 */ 1193 node->tmp_refs++; 1194 } 1195 1196 /** 1197 * binder_inc_node_tmpref() - take a temporary reference on node 1198 * @node: node to reference 1199 * 1200 * Take reference on node to prevent the node from being freed 1201 * while referenced only by a local variable. The inner lock is 1202 * needed to serialize with the node work on the queue (which 1203 * isn't needed after the node is dead). If the node is dead 1204 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1205 * node->tmp_refs against dead-node-only cases where the node 1206 * lock cannot be acquired (eg traversing the dead node list to 1207 * print nodes) 1208 */ 1209 static void binder_inc_node_tmpref(struct binder_node *node) 1210 { 1211 binder_node_lock(node); 1212 if (node->proc) 1213 binder_inner_proc_lock(node->proc); 1214 else 1215 spin_lock(&binder_dead_nodes_lock); 1216 binder_inc_node_tmpref_ilocked(node); 1217 if (node->proc) 1218 binder_inner_proc_unlock(node->proc); 1219 else 1220 spin_unlock(&binder_dead_nodes_lock); 1221 binder_node_unlock(node); 1222 } 1223 1224 /** 1225 * binder_dec_node_tmpref() - remove a temporary reference on node 1226 * @node: node to reference 1227 * 1228 * Release temporary reference on node taken via binder_inc_node_tmpref() 1229 */ 1230 static void binder_dec_node_tmpref(struct binder_node *node) 1231 { 1232 bool free_node; 1233 1234 binder_node_inner_lock(node); 1235 if (!node->proc) 1236 spin_lock(&binder_dead_nodes_lock); 1237 node->tmp_refs--; 1238 BUG_ON(node->tmp_refs < 0); 1239 if (!node->proc) 1240 spin_unlock(&binder_dead_nodes_lock); 1241 /* 1242 * Call binder_dec_node() to check if all refcounts are 0 1243 * and cleanup is needed. Calling with strong=0 and internal=1 1244 * causes no actual reference to be released in binder_dec_node(). 1245 * If that changes, a change is needed here too. 1246 */ 1247 free_node = binder_dec_node_nilocked(node, 0, 1); 1248 binder_node_inner_unlock(node); 1249 if (free_node) 1250 binder_free_node(node); 1251 } 1252 1253 static void binder_put_node(struct binder_node *node) 1254 { 1255 binder_dec_node_tmpref(node); 1256 } 1257 1258 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1259 u32 desc, bool need_strong_ref) 1260 { 1261 struct rb_node *n = proc->refs_by_desc.rb_node; 1262 struct binder_ref *ref; 1263 1264 while (n) { 1265 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1266 1267 if (desc < ref->data.desc) { 1268 n = n->rb_left; 1269 } else if (desc > ref->data.desc) { 1270 n = n->rb_right; 1271 } else if (need_strong_ref && !ref->data.strong) { 1272 binder_user_error("tried to use weak ref as strong ref\n"); 1273 return NULL; 1274 } else { 1275 return ref; 1276 } 1277 } 1278 return NULL; 1279 } 1280 1281 /** 1282 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1283 * @proc: binder_proc that owns the ref 1284 * @node: binder_node of target 1285 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1286 * 1287 * Look up the ref for the given node and return it if it exists 1288 * 1289 * If it doesn't exist and the caller provides a newly allocated 1290 * ref, initialize the fields of the newly allocated ref and insert 1291 * into the given proc rb_trees and node refs list. 1292 * 1293 * Return: the ref for node. It is possible that another thread 1294 * allocated/initialized the ref first in which case the 1295 * returned ref would be different than the passed-in 1296 * new_ref. new_ref must be kfree'd by the caller in 1297 * this case. 1298 */ 1299 static struct binder_ref *binder_get_ref_for_node_olocked( 1300 struct binder_proc *proc, 1301 struct binder_node *node, 1302 struct binder_ref *new_ref) 1303 { 1304 struct binder_context *context = proc->context; 1305 struct rb_node **p = &proc->refs_by_node.rb_node; 1306 struct rb_node *parent = NULL; 1307 struct binder_ref *ref; 1308 struct rb_node *n; 1309 1310 while (*p) { 1311 parent = *p; 1312 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1313 1314 if (node < ref->node) 1315 p = &(*p)->rb_left; 1316 else if (node > ref->node) 1317 p = &(*p)->rb_right; 1318 else 1319 return ref; 1320 } 1321 if (!new_ref) 1322 return NULL; 1323 1324 binder_stats_created(BINDER_STAT_REF); 1325 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1326 new_ref->proc = proc; 1327 new_ref->node = node; 1328 rb_link_node(&new_ref->rb_node_node, parent, p); 1329 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1330 1331 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1332 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1333 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1334 if (ref->data.desc > new_ref->data.desc) 1335 break; 1336 new_ref->data.desc = ref->data.desc + 1; 1337 } 1338 1339 p = &proc->refs_by_desc.rb_node; 1340 while (*p) { 1341 parent = *p; 1342 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1343 1344 if (new_ref->data.desc < ref->data.desc) 1345 p = &(*p)->rb_left; 1346 else if (new_ref->data.desc > ref->data.desc) 1347 p = &(*p)->rb_right; 1348 else 1349 BUG(); 1350 } 1351 rb_link_node(&new_ref->rb_node_desc, parent, p); 1352 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1353 1354 binder_node_lock(node); 1355 hlist_add_head(&new_ref->node_entry, &node->refs); 1356 1357 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1358 "%d new ref %d desc %d for node %d\n", 1359 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1360 node->debug_id); 1361 binder_node_unlock(node); 1362 return new_ref; 1363 } 1364 1365 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1366 { 1367 bool delete_node = false; 1368 1369 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1370 "%d delete ref %d desc %d for node %d\n", 1371 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1372 ref->node->debug_id); 1373 1374 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1375 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1376 1377 binder_node_inner_lock(ref->node); 1378 if (ref->data.strong) 1379 binder_dec_node_nilocked(ref->node, 1, 1); 1380 1381 hlist_del(&ref->node_entry); 1382 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1383 binder_node_inner_unlock(ref->node); 1384 /* 1385 * Clear ref->node unless we want the caller to free the node 1386 */ 1387 if (!delete_node) { 1388 /* 1389 * The caller uses ref->node to determine 1390 * whether the node needs to be freed. Clear 1391 * it since the node is still alive. 1392 */ 1393 ref->node = NULL; 1394 } 1395 1396 if (ref->death) { 1397 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1398 "%d delete ref %d desc %d has death notification\n", 1399 ref->proc->pid, ref->data.debug_id, 1400 ref->data.desc); 1401 binder_dequeue_work(ref->proc, &ref->death->work); 1402 binder_stats_deleted(BINDER_STAT_DEATH); 1403 } 1404 binder_stats_deleted(BINDER_STAT_REF); 1405 } 1406 1407 /** 1408 * binder_inc_ref_olocked() - increment the ref for given handle 1409 * @ref: ref to be incremented 1410 * @strong: if true, strong increment, else weak 1411 * @target_list: list to queue node work on 1412 * 1413 * Increment the ref. @ref->proc->outer_lock must be held on entry 1414 * 1415 * Return: 0, if successful, else errno 1416 */ 1417 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1418 struct list_head *target_list) 1419 { 1420 int ret; 1421 1422 if (strong) { 1423 if (ref->data.strong == 0) { 1424 ret = binder_inc_node(ref->node, 1, 1, target_list); 1425 if (ret) 1426 return ret; 1427 } 1428 ref->data.strong++; 1429 } else { 1430 if (ref->data.weak == 0) { 1431 ret = binder_inc_node(ref->node, 0, 1, target_list); 1432 if (ret) 1433 return ret; 1434 } 1435 ref->data.weak++; 1436 } 1437 return 0; 1438 } 1439 1440 /** 1441 * binder_dec_ref() - dec the ref for given handle 1442 * @ref: ref to be decremented 1443 * @strong: if true, strong decrement, else weak 1444 * 1445 * Decrement the ref. 1446 * 1447 * Return: true if ref is cleaned up and ready to be freed 1448 */ 1449 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1450 { 1451 if (strong) { 1452 if (ref->data.strong == 0) { 1453 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1454 ref->proc->pid, ref->data.debug_id, 1455 ref->data.desc, ref->data.strong, 1456 ref->data.weak); 1457 return false; 1458 } 1459 ref->data.strong--; 1460 if (ref->data.strong == 0) 1461 binder_dec_node(ref->node, strong, 1); 1462 } else { 1463 if (ref->data.weak == 0) { 1464 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1465 ref->proc->pid, ref->data.debug_id, 1466 ref->data.desc, ref->data.strong, 1467 ref->data.weak); 1468 return false; 1469 } 1470 ref->data.weak--; 1471 } 1472 if (ref->data.strong == 0 && ref->data.weak == 0) { 1473 binder_cleanup_ref_olocked(ref); 1474 return true; 1475 } 1476 return false; 1477 } 1478 1479 /** 1480 * binder_get_node_from_ref() - get the node from the given proc/desc 1481 * @proc: proc containing the ref 1482 * @desc: the handle associated with the ref 1483 * @need_strong_ref: if true, only return node if ref is strong 1484 * @rdata: the id/refcount data for the ref 1485 * 1486 * Given a proc and ref handle, return the associated binder_node 1487 * 1488 * Return: a binder_node or NULL if not found or not strong when strong required 1489 */ 1490 static struct binder_node *binder_get_node_from_ref( 1491 struct binder_proc *proc, 1492 u32 desc, bool need_strong_ref, 1493 struct binder_ref_data *rdata) 1494 { 1495 struct binder_node *node; 1496 struct binder_ref *ref; 1497 1498 binder_proc_lock(proc); 1499 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1500 if (!ref) 1501 goto err_no_ref; 1502 node = ref->node; 1503 /* 1504 * Take an implicit reference on the node to ensure 1505 * it stays alive until the call to binder_put_node() 1506 */ 1507 binder_inc_node_tmpref(node); 1508 if (rdata) 1509 *rdata = ref->data; 1510 binder_proc_unlock(proc); 1511 1512 return node; 1513 1514 err_no_ref: 1515 binder_proc_unlock(proc); 1516 return NULL; 1517 } 1518 1519 /** 1520 * binder_free_ref() - free the binder_ref 1521 * @ref: ref to free 1522 * 1523 * Free the binder_ref. Free the binder_node indicated by ref->node 1524 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1525 */ 1526 static void binder_free_ref(struct binder_ref *ref) 1527 { 1528 if (ref->node) 1529 binder_free_node(ref->node); 1530 kfree(ref->death); 1531 kfree(ref); 1532 } 1533 1534 /** 1535 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1536 * @proc: proc containing the ref 1537 * @desc: the handle associated with the ref 1538 * @increment: true=inc reference, false=dec reference 1539 * @strong: true=strong reference, false=weak reference 1540 * @rdata: the id/refcount data for the ref 1541 * 1542 * Given a proc and ref handle, increment or decrement the ref 1543 * according to "increment" arg. 1544 * 1545 * Return: 0 if successful, else errno 1546 */ 1547 static int binder_update_ref_for_handle(struct binder_proc *proc, 1548 uint32_t desc, bool increment, bool strong, 1549 struct binder_ref_data *rdata) 1550 { 1551 int ret = 0; 1552 struct binder_ref *ref; 1553 bool delete_ref = false; 1554 1555 binder_proc_lock(proc); 1556 ref = binder_get_ref_olocked(proc, desc, strong); 1557 if (!ref) { 1558 ret = -EINVAL; 1559 goto err_no_ref; 1560 } 1561 if (increment) 1562 ret = binder_inc_ref_olocked(ref, strong, NULL); 1563 else 1564 delete_ref = binder_dec_ref_olocked(ref, strong); 1565 1566 if (rdata) 1567 *rdata = ref->data; 1568 binder_proc_unlock(proc); 1569 1570 if (delete_ref) 1571 binder_free_ref(ref); 1572 return ret; 1573 1574 err_no_ref: 1575 binder_proc_unlock(proc); 1576 return ret; 1577 } 1578 1579 /** 1580 * binder_dec_ref_for_handle() - dec the ref for given handle 1581 * @proc: proc containing the ref 1582 * @desc: the handle associated with the ref 1583 * @strong: true=strong reference, false=weak reference 1584 * @rdata: the id/refcount data for the ref 1585 * 1586 * Just calls binder_update_ref_for_handle() to decrement the ref. 1587 * 1588 * Return: 0 if successful, else errno 1589 */ 1590 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1591 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1592 { 1593 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1594 } 1595 1596 1597 /** 1598 * binder_inc_ref_for_node() - increment the ref for given proc/node 1599 * @proc: proc containing the ref 1600 * @node: target node 1601 * @strong: true=strong reference, false=weak reference 1602 * @target_list: worklist to use if node is incremented 1603 * @rdata: the id/refcount data for the ref 1604 * 1605 * Given a proc and node, increment the ref. Create the ref if it 1606 * doesn't already exist 1607 * 1608 * Return: 0 if successful, else errno 1609 */ 1610 static int binder_inc_ref_for_node(struct binder_proc *proc, 1611 struct binder_node *node, 1612 bool strong, 1613 struct list_head *target_list, 1614 struct binder_ref_data *rdata) 1615 { 1616 struct binder_ref *ref; 1617 struct binder_ref *new_ref = NULL; 1618 int ret = 0; 1619 1620 binder_proc_lock(proc); 1621 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1622 if (!ref) { 1623 binder_proc_unlock(proc); 1624 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1625 if (!new_ref) 1626 return -ENOMEM; 1627 binder_proc_lock(proc); 1628 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1629 } 1630 ret = binder_inc_ref_olocked(ref, strong, target_list); 1631 *rdata = ref->data; 1632 binder_proc_unlock(proc); 1633 if (new_ref && ref != new_ref) 1634 /* 1635 * Another thread created the ref first so 1636 * free the one we allocated 1637 */ 1638 kfree(new_ref); 1639 return ret; 1640 } 1641 1642 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1643 struct binder_transaction *t) 1644 { 1645 BUG_ON(!target_thread); 1646 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock)); 1647 BUG_ON(target_thread->transaction_stack != t); 1648 BUG_ON(target_thread->transaction_stack->from != target_thread); 1649 target_thread->transaction_stack = 1650 target_thread->transaction_stack->from_parent; 1651 t->from = NULL; 1652 } 1653 1654 /** 1655 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1656 * @thread: thread to decrement 1657 * 1658 * A thread needs to be kept alive while being used to create or 1659 * handle a transaction. binder_get_txn_from() is used to safely 1660 * extract t->from from a binder_transaction and keep the thread 1661 * indicated by t->from from being freed. When done with that 1662 * binder_thread, this function is called to decrement the 1663 * tmp_ref and free if appropriate (thread has been released 1664 * and no transaction being processed by the driver) 1665 */ 1666 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1667 { 1668 /* 1669 * atomic is used to protect the counter value while 1670 * it cannot reach zero or thread->is_dead is false 1671 */ 1672 binder_inner_proc_lock(thread->proc); 1673 atomic_dec(&thread->tmp_ref); 1674 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1675 binder_inner_proc_unlock(thread->proc); 1676 binder_free_thread(thread); 1677 return; 1678 } 1679 binder_inner_proc_unlock(thread->proc); 1680 } 1681 1682 /** 1683 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1684 * @proc: proc to decrement 1685 * 1686 * A binder_proc needs to be kept alive while being used to create or 1687 * handle a transaction. proc->tmp_ref is incremented when 1688 * creating a new transaction or the binder_proc is currently in-use 1689 * by threads that are being released. When done with the binder_proc, 1690 * this function is called to decrement the counter and free the 1691 * proc if appropriate (proc has been released, all threads have 1692 * been released and not currenly in-use to process a transaction). 1693 */ 1694 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1695 { 1696 binder_inner_proc_lock(proc); 1697 proc->tmp_ref--; 1698 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1699 !proc->tmp_ref) { 1700 binder_inner_proc_unlock(proc); 1701 binder_free_proc(proc); 1702 return; 1703 } 1704 binder_inner_proc_unlock(proc); 1705 } 1706 1707 /** 1708 * binder_get_txn_from() - safely extract the "from" thread in transaction 1709 * @t: binder transaction for t->from 1710 * 1711 * Atomically return the "from" thread and increment the tmp_ref 1712 * count for the thread to ensure it stays alive until 1713 * binder_thread_dec_tmpref() is called. 1714 * 1715 * Return: the value of t->from 1716 */ 1717 static struct binder_thread *binder_get_txn_from( 1718 struct binder_transaction *t) 1719 { 1720 struct binder_thread *from; 1721 1722 spin_lock(&t->lock); 1723 from = t->from; 1724 if (from) 1725 atomic_inc(&from->tmp_ref); 1726 spin_unlock(&t->lock); 1727 return from; 1728 } 1729 1730 /** 1731 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1732 * @t: binder transaction for t->from 1733 * 1734 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1735 * to guarantee that the thread cannot be released while operating on it. 1736 * The caller must call binder_inner_proc_unlock() to release the inner lock 1737 * as well as call binder_dec_thread_txn() to release the reference. 1738 * 1739 * Return: the value of t->from 1740 */ 1741 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1742 struct binder_transaction *t) 1743 { 1744 struct binder_thread *from; 1745 1746 from = binder_get_txn_from(t); 1747 if (!from) 1748 return NULL; 1749 binder_inner_proc_lock(from->proc); 1750 if (t->from) { 1751 BUG_ON(from != t->from); 1752 return from; 1753 } 1754 binder_inner_proc_unlock(from->proc); 1755 binder_thread_dec_tmpref(from); 1756 return NULL; 1757 } 1758 1759 static void binder_free_transaction(struct binder_transaction *t) 1760 { 1761 if (t->buffer) 1762 t->buffer->transaction = NULL; 1763 kfree(t); 1764 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1765 } 1766 1767 static void binder_send_failed_reply(struct binder_transaction *t, 1768 uint32_t error_code) 1769 { 1770 struct binder_thread *target_thread; 1771 struct binder_transaction *next; 1772 1773 BUG_ON(t->flags & TF_ONE_WAY); 1774 while (1) { 1775 target_thread = binder_get_txn_from_and_acq_inner(t); 1776 if (target_thread) { 1777 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1778 "send failed reply for transaction %d to %d:%d\n", 1779 t->debug_id, 1780 target_thread->proc->pid, 1781 target_thread->pid); 1782 1783 binder_pop_transaction_ilocked(target_thread, t); 1784 if (target_thread->reply_error.cmd == BR_OK) { 1785 target_thread->reply_error.cmd = error_code; 1786 binder_enqueue_work_ilocked( 1787 &target_thread->reply_error.work, 1788 &target_thread->todo); 1789 wake_up_interruptible(&target_thread->wait); 1790 } else { 1791 WARN(1, "Unexpected reply error: %u\n", 1792 target_thread->reply_error.cmd); 1793 } 1794 binder_inner_proc_unlock(target_thread->proc); 1795 binder_thread_dec_tmpref(target_thread); 1796 binder_free_transaction(t); 1797 return; 1798 } 1799 next = t->from_parent; 1800 1801 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1802 "send failed reply for transaction %d, target dead\n", 1803 t->debug_id); 1804 1805 binder_free_transaction(t); 1806 if (next == NULL) { 1807 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1808 "reply failed, no target thread at root\n"); 1809 return; 1810 } 1811 t = next; 1812 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1813 "reply failed, no target thread -- retry %d\n", 1814 t->debug_id); 1815 } 1816 } 1817 1818 /** 1819 * binder_validate_object() - checks for a valid metadata object in a buffer. 1820 * @buffer: binder_buffer that we're parsing. 1821 * @offset: offset in the buffer at which to validate an object. 1822 * 1823 * Return: If there's a valid metadata object at @offset in @buffer, the 1824 * size of that object. Otherwise, it returns zero. 1825 */ 1826 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 1827 { 1828 /* Check if we can read a header first */ 1829 struct binder_object_header *hdr; 1830 size_t object_size = 0; 1831 1832 if (offset > buffer->data_size - sizeof(*hdr) || 1833 buffer->data_size < sizeof(*hdr) || 1834 !IS_ALIGNED(offset, sizeof(u32))) 1835 return 0; 1836 1837 /* Ok, now see if we can read a complete object. */ 1838 hdr = (struct binder_object_header *)(buffer->data + offset); 1839 switch (hdr->type) { 1840 case BINDER_TYPE_BINDER: 1841 case BINDER_TYPE_WEAK_BINDER: 1842 case BINDER_TYPE_HANDLE: 1843 case BINDER_TYPE_WEAK_HANDLE: 1844 object_size = sizeof(struct flat_binder_object); 1845 break; 1846 case BINDER_TYPE_FD: 1847 object_size = sizeof(struct binder_fd_object); 1848 break; 1849 case BINDER_TYPE_PTR: 1850 object_size = sizeof(struct binder_buffer_object); 1851 break; 1852 case BINDER_TYPE_FDA: 1853 object_size = sizeof(struct binder_fd_array_object); 1854 break; 1855 default: 1856 return 0; 1857 } 1858 if (offset <= buffer->data_size - object_size && 1859 buffer->data_size >= object_size) 1860 return object_size; 1861 else 1862 return 0; 1863 } 1864 1865 /** 1866 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1867 * @b: binder_buffer containing the object 1868 * @index: index in offset array at which the binder_buffer_object is 1869 * located 1870 * @start: points to the start of the offset array 1871 * @num_valid: the number of valid offsets in the offset array 1872 * 1873 * Return: If @index is within the valid range of the offset array 1874 * described by @start and @num_valid, and if there's a valid 1875 * binder_buffer_object at the offset found in index @index 1876 * of the offset array, that object is returned. Otherwise, 1877 * %NULL is returned. 1878 * Note that the offset found in index @index itself is not 1879 * verified; this function assumes that @num_valid elements 1880 * from @start were previously verified to have valid offsets. 1881 */ 1882 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 1883 binder_size_t index, 1884 binder_size_t *start, 1885 binder_size_t num_valid) 1886 { 1887 struct binder_buffer_object *buffer_obj; 1888 binder_size_t *offp; 1889 1890 if (index >= num_valid) 1891 return NULL; 1892 1893 offp = start + index; 1894 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 1895 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 1896 return NULL; 1897 1898 return buffer_obj; 1899 } 1900 1901 /** 1902 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1903 * @b: transaction buffer 1904 * @objects_start start of objects buffer 1905 * @buffer: binder_buffer_object in which to fix up 1906 * @offset: start offset in @buffer to fix up 1907 * @last_obj: last binder_buffer_object that we fixed up in 1908 * @last_min_offset: minimum fixup offset in @last_obj 1909 * 1910 * Return: %true if a fixup in buffer @buffer at offset @offset is 1911 * allowed. 1912 * 1913 * For safety reasons, we only allow fixups inside a buffer to happen 1914 * at increasing offsets; additionally, we only allow fixup on the last 1915 * buffer object that was verified, or one of its parents. 1916 * 1917 * Example of what is allowed: 1918 * 1919 * A 1920 * B (parent = A, offset = 0) 1921 * C (parent = A, offset = 16) 1922 * D (parent = C, offset = 0) 1923 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1924 * 1925 * Examples of what is not allowed: 1926 * 1927 * Decreasing offsets within the same parent: 1928 * A 1929 * C (parent = A, offset = 16) 1930 * B (parent = A, offset = 0) // decreasing offset within A 1931 * 1932 * Referring to a parent that wasn't the last object or any of its parents: 1933 * A 1934 * B (parent = A, offset = 0) 1935 * C (parent = A, offset = 0) 1936 * C (parent = A, offset = 16) 1937 * D (parent = B, offset = 0) // B is not A or any of A's parents 1938 */ 1939 static bool binder_validate_fixup(struct binder_buffer *b, 1940 binder_size_t *objects_start, 1941 struct binder_buffer_object *buffer, 1942 binder_size_t fixup_offset, 1943 struct binder_buffer_object *last_obj, 1944 binder_size_t last_min_offset) 1945 { 1946 if (!last_obj) { 1947 /* Nothing to fix up in */ 1948 return false; 1949 } 1950 1951 while (last_obj != buffer) { 1952 /* 1953 * Safe to retrieve the parent of last_obj, since it 1954 * was already previously verified by the driver. 1955 */ 1956 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1957 return false; 1958 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 1959 last_obj = (struct binder_buffer_object *) 1960 (b->data + *(objects_start + last_obj->parent)); 1961 } 1962 return (fixup_offset >= last_min_offset); 1963 } 1964 1965 static void binder_transaction_buffer_release(struct binder_proc *proc, 1966 struct binder_buffer *buffer, 1967 binder_size_t *failed_at) 1968 { 1969 binder_size_t *offp, *off_start, *off_end; 1970 int debug_id = buffer->debug_id; 1971 1972 binder_debug(BINDER_DEBUG_TRANSACTION, 1973 "%d buffer release %d, size %zd-%zd, failed at %p\n", 1974 proc->pid, buffer->debug_id, 1975 buffer->data_size, buffer->offsets_size, failed_at); 1976 1977 if (buffer->target_node) 1978 binder_dec_node(buffer->target_node, 1, 0); 1979 1980 off_start = (binder_size_t *)(buffer->data + 1981 ALIGN(buffer->data_size, sizeof(void *))); 1982 if (failed_at) 1983 off_end = failed_at; 1984 else 1985 off_end = (void *)off_start + buffer->offsets_size; 1986 for (offp = off_start; offp < off_end; offp++) { 1987 struct binder_object_header *hdr; 1988 size_t object_size = binder_validate_object(buffer, *offp); 1989 1990 if (object_size == 0) { 1991 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1992 debug_id, (u64)*offp, buffer->data_size); 1993 continue; 1994 } 1995 hdr = (struct binder_object_header *)(buffer->data + *offp); 1996 switch (hdr->type) { 1997 case BINDER_TYPE_BINDER: 1998 case BINDER_TYPE_WEAK_BINDER: { 1999 struct flat_binder_object *fp; 2000 struct binder_node *node; 2001 2002 fp = to_flat_binder_object(hdr); 2003 node = binder_get_node(proc, fp->binder); 2004 if (node == NULL) { 2005 pr_err("transaction release %d bad node %016llx\n", 2006 debug_id, (u64)fp->binder); 2007 break; 2008 } 2009 binder_debug(BINDER_DEBUG_TRANSACTION, 2010 " node %d u%016llx\n", 2011 node->debug_id, (u64)node->ptr); 2012 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2013 0); 2014 binder_put_node(node); 2015 } break; 2016 case BINDER_TYPE_HANDLE: 2017 case BINDER_TYPE_WEAK_HANDLE: { 2018 struct flat_binder_object *fp; 2019 struct binder_ref_data rdata; 2020 int ret; 2021 2022 fp = to_flat_binder_object(hdr); 2023 ret = binder_dec_ref_for_handle(proc, fp->handle, 2024 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2025 2026 if (ret) { 2027 pr_err("transaction release %d bad handle %d, ret = %d\n", 2028 debug_id, fp->handle, ret); 2029 break; 2030 } 2031 binder_debug(BINDER_DEBUG_TRANSACTION, 2032 " ref %d desc %d\n", 2033 rdata.debug_id, rdata.desc); 2034 } break; 2035 2036 case BINDER_TYPE_FD: { 2037 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2038 2039 binder_debug(BINDER_DEBUG_TRANSACTION, 2040 " fd %d\n", fp->fd); 2041 if (failed_at) 2042 task_close_fd(proc, fp->fd); 2043 } break; 2044 case BINDER_TYPE_PTR: 2045 /* 2046 * Nothing to do here, this will get cleaned up when the 2047 * transaction buffer gets freed 2048 */ 2049 break; 2050 case BINDER_TYPE_FDA: { 2051 struct binder_fd_array_object *fda; 2052 struct binder_buffer_object *parent; 2053 uintptr_t parent_buffer; 2054 u32 *fd_array; 2055 size_t fd_index; 2056 binder_size_t fd_buf_size; 2057 2058 fda = to_binder_fd_array_object(hdr); 2059 parent = binder_validate_ptr(buffer, fda->parent, 2060 off_start, 2061 offp - off_start); 2062 if (!parent) { 2063 pr_err("transaction release %d bad parent offset", 2064 debug_id); 2065 continue; 2066 } 2067 /* 2068 * Since the parent was already fixed up, convert it 2069 * back to kernel address space to access it 2070 */ 2071 parent_buffer = parent->buffer - 2072 binder_alloc_get_user_buffer_offset( 2073 &proc->alloc); 2074 2075 fd_buf_size = sizeof(u32) * fda->num_fds; 2076 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2077 pr_err("transaction release %d invalid number of fds (%lld)\n", 2078 debug_id, (u64)fda->num_fds); 2079 continue; 2080 } 2081 if (fd_buf_size > parent->length || 2082 fda->parent_offset > parent->length - fd_buf_size) { 2083 /* No space for all file descriptors here. */ 2084 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2085 debug_id, (u64)fda->num_fds); 2086 continue; 2087 } 2088 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 2089 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2090 task_close_fd(proc, fd_array[fd_index]); 2091 } break; 2092 default: 2093 pr_err("transaction release %d bad object type %x\n", 2094 debug_id, hdr->type); 2095 break; 2096 } 2097 } 2098 } 2099 2100 static int binder_translate_binder(struct flat_binder_object *fp, 2101 struct binder_transaction *t, 2102 struct binder_thread *thread) 2103 { 2104 struct binder_node *node; 2105 struct binder_proc *proc = thread->proc; 2106 struct binder_proc *target_proc = t->to_proc; 2107 struct binder_ref_data rdata; 2108 int ret = 0; 2109 2110 node = binder_get_node(proc, fp->binder); 2111 if (!node) { 2112 node = binder_new_node(proc, fp); 2113 if (!node) 2114 return -ENOMEM; 2115 } 2116 if (fp->cookie != node->cookie) { 2117 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2118 proc->pid, thread->pid, (u64)fp->binder, 2119 node->debug_id, (u64)fp->cookie, 2120 (u64)node->cookie); 2121 ret = -EINVAL; 2122 goto done; 2123 } 2124 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2125 ret = -EPERM; 2126 goto done; 2127 } 2128 2129 ret = binder_inc_ref_for_node(target_proc, node, 2130 fp->hdr.type == BINDER_TYPE_BINDER, 2131 &thread->todo, &rdata); 2132 if (ret) 2133 goto done; 2134 2135 if (fp->hdr.type == BINDER_TYPE_BINDER) 2136 fp->hdr.type = BINDER_TYPE_HANDLE; 2137 else 2138 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2139 fp->binder = 0; 2140 fp->handle = rdata.desc; 2141 fp->cookie = 0; 2142 2143 trace_binder_transaction_node_to_ref(t, node, &rdata); 2144 binder_debug(BINDER_DEBUG_TRANSACTION, 2145 " node %d u%016llx -> ref %d desc %d\n", 2146 node->debug_id, (u64)node->ptr, 2147 rdata.debug_id, rdata.desc); 2148 done: 2149 binder_put_node(node); 2150 return ret; 2151 } 2152 2153 static int binder_translate_handle(struct flat_binder_object *fp, 2154 struct binder_transaction *t, 2155 struct binder_thread *thread) 2156 { 2157 struct binder_proc *proc = thread->proc; 2158 struct binder_proc *target_proc = t->to_proc; 2159 struct binder_node *node; 2160 struct binder_ref_data src_rdata; 2161 int ret = 0; 2162 2163 node = binder_get_node_from_ref(proc, fp->handle, 2164 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2165 if (!node) { 2166 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2167 proc->pid, thread->pid, fp->handle); 2168 return -EINVAL; 2169 } 2170 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2171 ret = -EPERM; 2172 goto done; 2173 } 2174 2175 binder_node_lock(node); 2176 if (node->proc == target_proc) { 2177 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2178 fp->hdr.type = BINDER_TYPE_BINDER; 2179 else 2180 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2181 fp->binder = node->ptr; 2182 fp->cookie = node->cookie; 2183 if (node->proc) 2184 binder_inner_proc_lock(node->proc); 2185 binder_inc_node_nilocked(node, 2186 fp->hdr.type == BINDER_TYPE_BINDER, 2187 0, NULL); 2188 if (node->proc) 2189 binder_inner_proc_unlock(node->proc); 2190 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2191 binder_debug(BINDER_DEBUG_TRANSACTION, 2192 " ref %d desc %d -> node %d u%016llx\n", 2193 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2194 (u64)node->ptr); 2195 binder_node_unlock(node); 2196 } else { 2197 int ret; 2198 struct binder_ref_data dest_rdata; 2199 2200 binder_node_unlock(node); 2201 ret = binder_inc_ref_for_node(target_proc, node, 2202 fp->hdr.type == BINDER_TYPE_HANDLE, 2203 NULL, &dest_rdata); 2204 if (ret) 2205 goto done; 2206 2207 fp->binder = 0; 2208 fp->handle = dest_rdata.desc; 2209 fp->cookie = 0; 2210 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2211 &dest_rdata); 2212 binder_debug(BINDER_DEBUG_TRANSACTION, 2213 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2214 src_rdata.debug_id, src_rdata.desc, 2215 dest_rdata.debug_id, dest_rdata.desc, 2216 node->debug_id); 2217 } 2218 done: 2219 binder_put_node(node); 2220 return ret; 2221 } 2222 2223 static int binder_translate_fd(int fd, 2224 struct binder_transaction *t, 2225 struct binder_thread *thread, 2226 struct binder_transaction *in_reply_to) 2227 { 2228 struct binder_proc *proc = thread->proc; 2229 struct binder_proc *target_proc = t->to_proc; 2230 int target_fd; 2231 struct file *file; 2232 int ret; 2233 bool target_allows_fd; 2234 2235 if (in_reply_to) 2236 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2237 else 2238 target_allows_fd = t->buffer->target_node->accept_fds; 2239 if (!target_allows_fd) { 2240 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2241 proc->pid, thread->pid, 2242 in_reply_to ? "reply" : "transaction", 2243 fd); 2244 ret = -EPERM; 2245 goto err_fd_not_accepted; 2246 } 2247 2248 file = fget(fd); 2249 if (!file) { 2250 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2251 proc->pid, thread->pid, fd); 2252 ret = -EBADF; 2253 goto err_fget; 2254 } 2255 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2256 if (ret < 0) { 2257 ret = -EPERM; 2258 goto err_security; 2259 } 2260 2261 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 2262 if (target_fd < 0) { 2263 ret = -ENOMEM; 2264 goto err_get_unused_fd; 2265 } 2266 task_fd_install(target_proc, target_fd, file); 2267 trace_binder_transaction_fd(t, fd, target_fd); 2268 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 2269 fd, target_fd); 2270 2271 return target_fd; 2272 2273 err_get_unused_fd: 2274 err_security: 2275 fput(file); 2276 err_fget: 2277 err_fd_not_accepted: 2278 return ret; 2279 } 2280 2281 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2282 struct binder_buffer_object *parent, 2283 struct binder_transaction *t, 2284 struct binder_thread *thread, 2285 struct binder_transaction *in_reply_to) 2286 { 2287 binder_size_t fdi, fd_buf_size, num_installed_fds; 2288 int target_fd; 2289 uintptr_t parent_buffer; 2290 u32 *fd_array; 2291 struct binder_proc *proc = thread->proc; 2292 struct binder_proc *target_proc = t->to_proc; 2293 2294 fd_buf_size = sizeof(u32) * fda->num_fds; 2295 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2296 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2297 proc->pid, thread->pid, (u64)fda->num_fds); 2298 return -EINVAL; 2299 } 2300 if (fd_buf_size > parent->length || 2301 fda->parent_offset > parent->length - fd_buf_size) { 2302 /* No space for all file descriptors here. */ 2303 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2304 proc->pid, thread->pid, (u64)fda->num_fds); 2305 return -EINVAL; 2306 } 2307 /* 2308 * Since the parent was already fixed up, convert it 2309 * back to the kernel address space to access it 2310 */ 2311 parent_buffer = parent->buffer - 2312 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2313 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 2314 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2315 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2316 proc->pid, thread->pid); 2317 return -EINVAL; 2318 } 2319 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2320 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 2321 in_reply_to); 2322 if (target_fd < 0) 2323 goto err_translate_fd_failed; 2324 fd_array[fdi] = target_fd; 2325 } 2326 return 0; 2327 2328 err_translate_fd_failed: 2329 /* 2330 * Failed to allocate fd or security error, free fds 2331 * installed so far. 2332 */ 2333 num_installed_fds = fdi; 2334 for (fdi = 0; fdi < num_installed_fds; fdi++) 2335 task_close_fd(target_proc, fd_array[fdi]); 2336 return target_fd; 2337 } 2338 2339 static int binder_fixup_parent(struct binder_transaction *t, 2340 struct binder_thread *thread, 2341 struct binder_buffer_object *bp, 2342 binder_size_t *off_start, 2343 binder_size_t num_valid, 2344 struct binder_buffer_object *last_fixup_obj, 2345 binder_size_t last_fixup_min_off) 2346 { 2347 struct binder_buffer_object *parent; 2348 u8 *parent_buffer; 2349 struct binder_buffer *b = t->buffer; 2350 struct binder_proc *proc = thread->proc; 2351 struct binder_proc *target_proc = t->to_proc; 2352 2353 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2354 return 0; 2355 2356 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 2357 if (!parent) { 2358 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2359 proc->pid, thread->pid); 2360 return -EINVAL; 2361 } 2362 2363 if (!binder_validate_fixup(b, off_start, 2364 parent, bp->parent_offset, 2365 last_fixup_obj, 2366 last_fixup_min_off)) { 2367 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2368 proc->pid, thread->pid); 2369 return -EINVAL; 2370 } 2371 2372 if (parent->length < sizeof(binder_uintptr_t) || 2373 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2374 /* No space for a pointer here! */ 2375 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2376 proc->pid, thread->pid); 2377 return -EINVAL; 2378 } 2379 parent_buffer = (u8 *)(parent->buffer - 2380 binder_alloc_get_user_buffer_offset( 2381 &target_proc->alloc)); 2382 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2383 2384 return 0; 2385 } 2386 2387 static void binder_transaction(struct binder_proc *proc, 2388 struct binder_thread *thread, 2389 struct binder_transaction_data *tr, int reply, 2390 binder_size_t extra_buffers_size) 2391 { 2392 int ret; 2393 struct binder_transaction *t; 2394 struct binder_work *tcomplete; 2395 binder_size_t *offp, *off_end, *off_start; 2396 binder_size_t off_min; 2397 u8 *sg_bufp, *sg_buf_end; 2398 struct binder_proc *target_proc = NULL; 2399 struct binder_thread *target_thread = NULL; 2400 struct binder_node *target_node = NULL; 2401 struct list_head *target_list; 2402 wait_queue_head_t *target_wait; 2403 struct binder_transaction *in_reply_to = NULL; 2404 struct binder_transaction_log_entry *e; 2405 uint32_t return_error = 0; 2406 uint32_t return_error_param = 0; 2407 uint32_t return_error_line = 0; 2408 struct binder_buffer_object *last_fixup_obj = NULL; 2409 binder_size_t last_fixup_min_off = 0; 2410 struct binder_context *context = proc->context; 2411 int t_debug_id = atomic_inc_return(&binder_last_id); 2412 2413 e = binder_transaction_log_add(&binder_transaction_log); 2414 e->debug_id = t_debug_id; 2415 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2416 e->from_proc = proc->pid; 2417 e->from_thread = thread->pid; 2418 e->target_handle = tr->target.handle; 2419 e->data_size = tr->data_size; 2420 e->offsets_size = tr->offsets_size; 2421 e->context_name = proc->context->name; 2422 2423 if (reply) { 2424 binder_inner_proc_lock(proc); 2425 in_reply_to = thread->transaction_stack; 2426 if (in_reply_to == NULL) { 2427 binder_inner_proc_unlock(proc); 2428 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2429 proc->pid, thread->pid); 2430 return_error = BR_FAILED_REPLY; 2431 return_error_param = -EPROTO; 2432 return_error_line = __LINE__; 2433 goto err_empty_call_stack; 2434 } 2435 if (in_reply_to->to_thread != thread) { 2436 spin_lock(&in_reply_to->lock); 2437 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2438 proc->pid, thread->pid, in_reply_to->debug_id, 2439 in_reply_to->to_proc ? 2440 in_reply_to->to_proc->pid : 0, 2441 in_reply_to->to_thread ? 2442 in_reply_to->to_thread->pid : 0); 2443 spin_unlock(&in_reply_to->lock); 2444 binder_inner_proc_unlock(proc); 2445 return_error = BR_FAILED_REPLY; 2446 return_error_param = -EPROTO; 2447 return_error_line = __LINE__; 2448 in_reply_to = NULL; 2449 goto err_bad_call_stack; 2450 } 2451 thread->transaction_stack = in_reply_to->to_parent; 2452 binder_inner_proc_unlock(proc); 2453 binder_set_nice(in_reply_to->saved_priority); 2454 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2455 if (target_thread == NULL) { 2456 return_error = BR_DEAD_REPLY; 2457 return_error_line = __LINE__; 2458 goto err_dead_binder; 2459 } 2460 if (target_thread->transaction_stack != in_reply_to) { 2461 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2462 proc->pid, thread->pid, 2463 target_thread->transaction_stack ? 2464 target_thread->transaction_stack->debug_id : 0, 2465 in_reply_to->debug_id); 2466 binder_inner_proc_unlock(target_thread->proc); 2467 return_error = BR_FAILED_REPLY; 2468 return_error_param = -EPROTO; 2469 return_error_line = __LINE__; 2470 in_reply_to = NULL; 2471 target_thread = NULL; 2472 goto err_dead_binder; 2473 } 2474 target_proc = target_thread->proc; 2475 target_proc->tmp_ref++; 2476 binder_inner_proc_unlock(target_thread->proc); 2477 } else { 2478 if (tr->target.handle) { 2479 struct binder_ref *ref; 2480 2481 /* 2482 * There must already be a strong ref 2483 * on this node. If so, do a strong 2484 * increment on the node to ensure it 2485 * stays alive until the transaction is 2486 * done. 2487 */ 2488 binder_proc_lock(proc); 2489 ref = binder_get_ref_olocked(proc, tr->target.handle, 2490 true); 2491 if (ref) { 2492 binder_inc_node(ref->node, 1, 0, NULL); 2493 target_node = ref->node; 2494 } 2495 binder_proc_unlock(proc); 2496 if (target_node == NULL) { 2497 binder_user_error("%d:%d got transaction to invalid handle\n", 2498 proc->pid, thread->pid); 2499 return_error = BR_FAILED_REPLY; 2500 return_error_param = -EINVAL; 2501 return_error_line = __LINE__; 2502 goto err_invalid_target_handle; 2503 } 2504 } else { 2505 mutex_lock(&context->context_mgr_node_lock); 2506 target_node = context->binder_context_mgr_node; 2507 if (target_node == NULL) { 2508 return_error = BR_DEAD_REPLY; 2509 mutex_unlock(&context->context_mgr_node_lock); 2510 return_error_line = __LINE__; 2511 goto err_no_context_mgr_node; 2512 } 2513 binder_inc_node(target_node, 1, 0, NULL); 2514 mutex_unlock(&context->context_mgr_node_lock); 2515 } 2516 e->to_node = target_node->debug_id; 2517 binder_node_lock(target_node); 2518 target_proc = target_node->proc; 2519 if (target_proc == NULL) { 2520 binder_node_unlock(target_node); 2521 return_error = BR_DEAD_REPLY; 2522 return_error_line = __LINE__; 2523 goto err_dead_binder; 2524 } 2525 binder_inner_proc_lock(target_proc); 2526 target_proc->tmp_ref++; 2527 binder_inner_proc_unlock(target_proc); 2528 binder_node_unlock(target_node); 2529 if (security_binder_transaction(proc->tsk, 2530 target_proc->tsk) < 0) { 2531 return_error = BR_FAILED_REPLY; 2532 return_error_param = -EPERM; 2533 return_error_line = __LINE__; 2534 goto err_invalid_target_handle; 2535 } 2536 binder_inner_proc_lock(proc); 2537 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2538 struct binder_transaction *tmp; 2539 2540 tmp = thread->transaction_stack; 2541 if (tmp->to_thread != thread) { 2542 spin_lock(&tmp->lock); 2543 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2544 proc->pid, thread->pid, tmp->debug_id, 2545 tmp->to_proc ? tmp->to_proc->pid : 0, 2546 tmp->to_thread ? 2547 tmp->to_thread->pid : 0); 2548 spin_unlock(&tmp->lock); 2549 binder_inner_proc_unlock(proc); 2550 return_error = BR_FAILED_REPLY; 2551 return_error_param = -EPROTO; 2552 return_error_line = __LINE__; 2553 goto err_bad_call_stack; 2554 } 2555 while (tmp) { 2556 struct binder_thread *from; 2557 2558 spin_lock(&tmp->lock); 2559 from = tmp->from; 2560 if (from && from->proc == target_proc) { 2561 atomic_inc(&from->tmp_ref); 2562 target_thread = from; 2563 spin_unlock(&tmp->lock); 2564 break; 2565 } 2566 spin_unlock(&tmp->lock); 2567 tmp = tmp->from_parent; 2568 } 2569 } 2570 binder_inner_proc_unlock(proc); 2571 } 2572 if (target_thread) { 2573 e->to_thread = target_thread->pid; 2574 target_list = &target_thread->todo; 2575 target_wait = &target_thread->wait; 2576 } else { 2577 target_list = &target_proc->todo; 2578 target_wait = &target_proc->wait; 2579 } 2580 e->to_proc = target_proc->pid; 2581 2582 /* TODO: reuse incoming transaction for reply */ 2583 t = kzalloc(sizeof(*t), GFP_KERNEL); 2584 if (t == NULL) { 2585 return_error = BR_FAILED_REPLY; 2586 return_error_param = -ENOMEM; 2587 return_error_line = __LINE__; 2588 goto err_alloc_t_failed; 2589 } 2590 binder_stats_created(BINDER_STAT_TRANSACTION); 2591 spin_lock_init(&t->lock); 2592 2593 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2594 if (tcomplete == NULL) { 2595 return_error = BR_FAILED_REPLY; 2596 return_error_param = -ENOMEM; 2597 return_error_line = __LINE__; 2598 goto err_alloc_tcomplete_failed; 2599 } 2600 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2601 2602 t->debug_id = t_debug_id; 2603 2604 if (reply) 2605 binder_debug(BINDER_DEBUG_TRANSACTION, 2606 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2607 proc->pid, thread->pid, t->debug_id, 2608 target_proc->pid, target_thread->pid, 2609 (u64)tr->data.ptr.buffer, 2610 (u64)tr->data.ptr.offsets, 2611 (u64)tr->data_size, (u64)tr->offsets_size, 2612 (u64)extra_buffers_size); 2613 else 2614 binder_debug(BINDER_DEBUG_TRANSACTION, 2615 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2616 proc->pid, thread->pid, t->debug_id, 2617 target_proc->pid, target_node->debug_id, 2618 (u64)tr->data.ptr.buffer, 2619 (u64)tr->data.ptr.offsets, 2620 (u64)tr->data_size, (u64)tr->offsets_size, 2621 (u64)extra_buffers_size); 2622 2623 if (!reply && !(tr->flags & TF_ONE_WAY)) 2624 t->from = thread; 2625 else 2626 t->from = NULL; 2627 t->sender_euid = task_euid(proc->tsk); 2628 t->to_proc = target_proc; 2629 t->to_thread = target_thread; 2630 t->code = tr->code; 2631 t->flags = tr->flags; 2632 t->priority = task_nice(current); 2633 2634 trace_binder_transaction(reply, t, target_node); 2635 2636 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 2637 tr->offsets_size, extra_buffers_size, 2638 !reply && (t->flags & TF_ONE_WAY)); 2639 if (IS_ERR(t->buffer)) { 2640 /* 2641 * -ESRCH indicates VMA cleared. The target is dying. 2642 */ 2643 return_error_param = PTR_ERR(t->buffer); 2644 return_error = return_error_param == -ESRCH ? 2645 BR_DEAD_REPLY : BR_FAILED_REPLY; 2646 return_error_line = __LINE__; 2647 t->buffer = NULL; 2648 goto err_binder_alloc_buf_failed; 2649 } 2650 t->buffer->allow_user_free = 0; 2651 t->buffer->debug_id = t->debug_id; 2652 t->buffer->transaction = t; 2653 t->buffer->target_node = target_node; 2654 trace_binder_transaction_alloc_buf(t->buffer); 2655 off_start = (binder_size_t *)(t->buffer->data + 2656 ALIGN(tr->data_size, sizeof(void *))); 2657 offp = off_start; 2658 2659 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2660 tr->data.ptr.buffer, tr->data_size)) { 2661 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2662 proc->pid, thread->pid); 2663 return_error = BR_FAILED_REPLY; 2664 return_error_param = -EFAULT; 2665 return_error_line = __LINE__; 2666 goto err_copy_data_failed; 2667 } 2668 if (copy_from_user(offp, (const void __user *)(uintptr_t) 2669 tr->data.ptr.offsets, tr->offsets_size)) { 2670 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2671 proc->pid, thread->pid); 2672 return_error = BR_FAILED_REPLY; 2673 return_error_param = -EFAULT; 2674 return_error_line = __LINE__; 2675 goto err_copy_data_failed; 2676 } 2677 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 2678 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 2679 proc->pid, thread->pid, (u64)tr->offsets_size); 2680 return_error = BR_FAILED_REPLY; 2681 return_error_param = -EINVAL; 2682 return_error_line = __LINE__; 2683 goto err_bad_offset; 2684 } 2685 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 2686 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 2687 proc->pid, thread->pid, 2688 (u64)extra_buffers_size); 2689 return_error = BR_FAILED_REPLY; 2690 return_error_param = -EINVAL; 2691 return_error_line = __LINE__; 2692 goto err_bad_offset; 2693 } 2694 off_end = (void *)off_start + tr->offsets_size; 2695 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 2696 sg_buf_end = sg_bufp + extra_buffers_size; 2697 off_min = 0; 2698 for (; offp < off_end; offp++) { 2699 struct binder_object_header *hdr; 2700 size_t object_size = binder_validate_object(t->buffer, *offp); 2701 2702 if (object_size == 0 || *offp < off_min) { 2703 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 2704 proc->pid, thread->pid, (u64)*offp, 2705 (u64)off_min, 2706 (u64)t->buffer->data_size); 2707 return_error = BR_FAILED_REPLY; 2708 return_error_param = -EINVAL; 2709 return_error_line = __LINE__; 2710 goto err_bad_offset; 2711 } 2712 2713 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 2714 off_min = *offp + object_size; 2715 switch (hdr->type) { 2716 case BINDER_TYPE_BINDER: 2717 case BINDER_TYPE_WEAK_BINDER: { 2718 struct flat_binder_object *fp; 2719 2720 fp = to_flat_binder_object(hdr); 2721 ret = binder_translate_binder(fp, t, thread); 2722 if (ret < 0) { 2723 return_error = BR_FAILED_REPLY; 2724 return_error_param = ret; 2725 return_error_line = __LINE__; 2726 goto err_translate_failed; 2727 } 2728 } break; 2729 case BINDER_TYPE_HANDLE: 2730 case BINDER_TYPE_WEAK_HANDLE: { 2731 struct flat_binder_object *fp; 2732 2733 fp = to_flat_binder_object(hdr); 2734 ret = binder_translate_handle(fp, t, thread); 2735 if (ret < 0) { 2736 return_error = BR_FAILED_REPLY; 2737 return_error_param = ret; 2738 return_error_line = __LINE__; 2739 goto err_translate_failed; 2740 } 2741 } break; 2742 2743 case BINDER_TYPE_FD: { 2744 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2745 int target_fd = binder_translate_fd(fp->fd, t, thread, 2746 in_reply_to); 2747 2748 if (target_fd < 0) { 2749 return_error = BR_FAILED_REPLY; 2750 return_error_param = target_fd; 2751 return_error_line = __LINE__; 2752 goto err_translate_failed; 2753 } 2754 fp->pad_binder = 0; 2755 fp->fd = target_fd; 2756 } break; 2757 case BINDER_TYPE_FDA: { 2758 struct binder_fd_array_object *fda = 2759 to_binder_fd_array_object(hdr); 2760 struct binder_buffer_object *parent = 2761 binder_validate_ptr(t->buffer, fda->parent, 2762 off_start, 2763 offp - off_start); 2764 if (!parent) { 2765 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2766 proc->pid, thread->pid); 2767 return_error = BR_FAILED_REPLY; 2768 return_error_param = -EINVAL; 2769 return_error_line = __LINE__; 2770 goto err_bad_parent; 2771 } 2772 if (!binder_validate_fixup(t->buffer, off_start, 2773 parent, fda->parent_offset, 2774 last_fixup_obj, 2775 last_fixup_min_off)) { 2776 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2777 proc->pid, thread->pid); 2778 return_error = BR_FAILED_REPLY; 2779 return_error_param = -EINVAL; 2780 return_error_line = __LINE__; 2781 goto err_bad_parent; 2782 } 2783 ret = binder_translate_fd_array(fda, parent, t, thread, 2784 in_reply_to); 2785 if (ret < 0) { 2786 return_error = BR_FAILED_REPLY; 2787 return_error_param = ret; 2788 return_error_line = __LINE__; 2789 goto err_translate_failed; 2790 } 2791 last_fixup_obj = parent; 2792 last_fixup_min_off = 2793 fda->parent_offset + sizeof(u32) * fda->num_fds; 2794 } break; 2795 case BINDER_TYPE_PTR: { 2796 struct binder_buffer_object *bp = 2797 to_binder_buffer_object(hdr); 2798 size_t buf_left = sg_buf_end - sg_bufp; 2799 2800 if (bp->length > buf_left) { 2801 binder_user_error("%d:%d got transaction with too large buffer\n", 2802 proc->pid, thread->pid); 2803 return_error = BR_FAILED_REPLY; 2804 return_error_param = -EINVAL; 2805 return_error_line = __LINE__; 2806 goto err_bad_offset; 2807 } 2808 if (copy_from_user(sg_bufp, 2809 (const void __user *)(uintptr_t) 2810 bp->buffer, bp->length)) { 2811 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2812 proc->pid, thread->pid); 2813 return_error_param = -EFAULT; 2814 return_error = BR_FAILED_REPLY; 2815 return_error_line = __LINE__; 2816 goto err_copy_data_failed; 2817 } 2818 /* Fixup buffer pointer to target proc address space */ 2819 bp->buffer = (uintptr_t)sg_bufp + 2820 binder_alloc_get_user_buffer_offset( 2821 &target_proc->alloc); 2822 sg_bufp += ALIGN(bp->length, sizeof(u64)); 2823 2824 ret = binder_fixup_parent(t, thread, bp, off_start, 2825 offp - off_start, 2826 last_fixup_obj, 2827 last_fixup_min_off); 2828 if (ret < 0) { 2829 return_error = BR_FAILED_REPLY; 2830 return_error_param = ret; 2831 return_error_line = __LINE__; 2832 goto err_translate_failed; 2833 } 2834 last_fixup_obj = bp; 2835 last_fixup_min_off = 0; 2836 } break; 2837 default: 2838 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 2839 proc->pid, thread->pid, hdr->type); 2840 return_error = BR_FAILED_REPLY; 2841 return_error_param = -EINVAL; 2842 return_error_line = __LINE__; 2843 goto err_bad_object_type; 2844 } 2845 } 2846 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 2847 binder_enqueue_work(proc, tcomplete, &thread->todo); 2848 t->work.type = BINDER_WORK_TRANSACTION; 2849 2850 if (reply) { 2851 binder_inner_proc_lock(target_proc); 2852 if (target_thread->is_dead) { 2853 binder_inner_proc_unlock(target_proc); 2854 goto err_dead_proc_or_thread; 2855 } 2856 BUG_ON(t->buffer->async_transaction != 0); 2857 binder_pop_transaction_ilocked(target_thread, in_reply_to); 2858 binder_enqueue_work_ilocked(&t->work, target_list); 2859 binder_inner_proc_unlock(target_proc); 2860 binder_free_transaction(in_reply_to); 2861 } else if (!(t->flags & TF_ONE_WAY)) { 2862 BUG_ON(t->buffer->async_transaction != 0); 2863 binder_inner_proc_lock(proc); 2864 t->need_reply = 1; 2865 t->from_parent = thread->transaction_stack; 2866 thread->transaction_stack = t; 2867 binder_inner_proc_unlock(proc); 2868 binder_inner_proc_lock(target_proc); 2869 if (target_proc->is_dead || 2870 (target_thread && target_thread->is_dead)) { 2871 binder_inner_proc_unlock(target_proc); 2872 binder_inner_proc_lock(proc); 2873 binder_pop_transaction_ilocked(thread, t); 2874 binder_inner_proc_unlock(proc); 2875 goto err_dead_proc_or_thread; 2876 } 2877 binder_enqueue_work_ilocked(&t->work, target_list); 2878 binder_inner_proc_unlock(target_proc); 2879 } else { 2880 BUG_ON(target_node == NULL); 2881 BUG_ON(t->buffer->async_transaction != 1); 2882 binder_node_lock(target_node); 2883 if (target_node->has_async_transaction) { 2884 target_list = &target_node->async_todo; 2885 target_wait = NULL; 2886 } else 2887 target_node->has_async_transaction = 1; 2888 /* 2889 * Test/set of has_async_transaction 2890 * must be atomic with enqueue on 2891 * async_todo 2892 */ 2893 binder_inner_proc_lock(target_proc); 2894 if (target_proc->is_dead || 2895 (target_thread && target_thread->is_dead)) { 2896 binder_inner_proc_unlock(target_proc); 2897 binder_node_unlock(target_node); 2898 goto err_dead_proc_or_thread; 2899 } 2900 binder_enqueue_work_ilocked(&t->work, target_list); 2901 binder_inner_proc_unlock(target_proc); 2902 binder_node_unlock(target_node); 2903 } 2904 if (target_wait) { 2905 if (reply || !(tr->flags & TF_ONE_WAY)) 2906 wake_up_interruptible_sync(target_wait); 2907 else 2908 wake_up_interruptible(target_wait); 2909 } 2910 if (target_thread) 2911 binder_thread_dec_tmpref(target_thread); 2912 binder_proc_dec_tmpref(target_proc); 2913 /* 2914 * write barrier to synchronize with initialization 2915 * of log entry 2916 */ 2917 smp_wmb(); 2918 WRITE_ONCE(e->debug_id_done, t_debug_id); 2919 return; 2920 2921 err_dead_proc_or_thread: 2922 return_error = BR_DEAD_REPLY; 2923 return_error_line = __LINE__; 2924 err_translate_failed: 2925 err_bad_object_type: 2926 err_bad_offset: 2927 err_bad_parent: 2928 err_copy_data_failed: 2929 trace_binder_transaction_failed_buffer_release(t->buffer); 2930 binder_transaction_buffer_release(target_proc, t->buffer, offp); 2931 target_node = NULL; 2932 t->buffer->transaction = NULL; 2933 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 2934 err_binder_alloc_buf_failed: 2935 kfree(tcomplete); 2936 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2937 err_alloc_tcomplete_failed: 2938 kfree(t); 2939 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2940 err_alloc_t_failed: 2941 err_bad_call_stack: 2942 err_empty_call_stack: 2943 err_dead_binder: 2944 err_invalid_target_handle: 2945 err_no_context_mgr_node: 2946 if (target_thread) 2947 binder_thread_dec_tmpref(target_thread); 2948 if (target_proc) 2949 binder_proc_dec_tmpref(target_proc); 2950 if (target_node) 2951 binder_dec_node(target_node, 1, 0); 2952 2953 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2954 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 2955 proc->pid, thread->pid, return_error, return_error_param, 2956 (u64)tr->data_size, (u64)tr->offsets_size, 2957 return_error_line); 2958 2959 { 2960 struct binder_transaction_log_entry *fe; 2961 2962 e->return_error = return_error; 2963 e->return_error_param = return_error_param; 2964 e->return_error_line = return_error_line; 2965 fe = binder_transaction_log_add(&binder_transaction_log_failed); 2966 *fe = *e; 2967 /* 2968 * write barrier to synchronize with initialization 2969 * of log entry 2970 */ 2971 smp_wmb(); 2972 WRITE_ONCE(e->debug_id_done, t_debug_id); 2973 WRITE_ONCE(fe->debug_id_done, t_debug_id); 2974 } 2975 2976 BUG_ON(thread->return_error.cmd != BR_OK); 2977 if (in_reply_to) { 2978 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 2979 binder_enqueue_work(thread->proc, 2980 &thread->return_error.work, 2981 &thread->todo); 2982 binder_send_failed_reply(in_reply_to, return_error); 2983 } else { 2984 thread->return_error.cmd = return_error; 2985 binder_enqueue_work(thread->proc, 2986 &thread->return_error.work, 2987 &thread->todo); 2988 } 2989 } 2990 2991 static int binder_thread_write(struct binder_proc *proc, 2992 struct binder_thread *thread, 2993 binder_uintptr_t binder_buffer, size_t size, 2994 binder_size_t *consumed) 2995 { 2996 uint32_t cmd; 2997 struct binder_context *context = proc->context; 2998 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2999 void __user *ptr = buffer + *consumed; 3000 void __user *end = buffer + size; 3001 3002 while (ptr < end && thread->return_error.cmd == BR_OK) { 3003 int ret; 3004 3005 if (get_user(cmd, (uint32_t __user *)ptr)) 3006 return -EFAULT; 3007 ptr += sizeof(uint32_t); 3008 trace_binder_command(cmd); 3009 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3010 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3011 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3012 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3013 } 3014 switch (cmd) { 3015 case BC_INCREFS: 3016 case BC_ACQUIRE: 3017 case BC_RELEASE: 3018 case BC_DECREFS: { 3019 uint32_t target; 3020 const char *debug_string; 3021 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3022 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3023 struct binder_ref_data rdata; 3024 3025 if (get_user(target, (uint32_t __user *)ptr)) 3026 return -EFAULT; 3027 3028 ptr += sizeof(uint32_t); 3029 ret = -1; 3030 if (increment && !target) { 3031 struct binder_node *ctx_mgr_node; 3032 mutex_lock(&context->context_mgr_node_lock); 3033 ctx_mgr_node = context->binder_context_mgr_node; 3034 if (ctx_mgr_node) 3035 ret = binder_inc_ref_for_node( 3036 proc, ctx_mgr_node, 3037 strong, NULL, &rdata); 3038 mutex_unlock(&context->context_mgr_node_lock); 3039 } 3040 if (ret) 3041 ret = binder_update_ref_for_handle( 3042 proc, target, increment, strong, 3043 &rdata); 3044 if (!ret && rdata.desc != target) { 3045 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3046 proc->pid, thread->pid, 3047 target, rdata.desc); 3048 } 3049 switch (cmd) { 3050 case BC_INCREFS: 3051 debug_string = "IncRefs"; 3052 break; 3053 case BC_ACQUIRE: 3054 debug_string = "Acquire"; 3055 break; 3056 case BC_RELEASE: 3057 debug_string = "Release"; 3058 break; 3059 case BC_DECREFS: 3060 default: 3061 debug_string = "DecRefs"; 3062 break; 3063 } 3064 if (ret) { 3065 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3066 proc->pid, thread->pid, debug_string, 3067 strong, target, ret); 3068 break; 3069 } 3070 binder_debug(BINDER_DEBUG_USER_REFS, 3071 "%d:%d %s ref %d desc %d s %d w %d\n", 3072 proc->pid, thread->pid, debug_string, 3073 rdata.debug_id, rdata.desc, rdata.strong, 3074 rdata.weak); 3075 break; 3076 } 3077 case BC_INCREFS_DONE: 3078 case BC_ACQUIRE_DONE: { 3079 binder_uintptr_t node_ptr; 3080 binder_uintptr_t cookie; 3081 struct binder_node *node; 3082 bool free_node; 3083 3084 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3085 return -EFAULT; 3086 ptr += sizeof(binder_uintptr_t); 3087 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3088 return -EFAULT; 3089 ptr += sizeof(binder_uintptr_t); 3090 node = binder_get_node(proc, node_ptr); 3091 if (node == NULL) { 3092 binder_user_error("%d:%d %s u%016llx no match\n", 3093 proc->pid, thread->pid, 3094 cmd == BC_INCREFS_DONE ? 3095 "BC_INCREFS_DONE" : 3096 "BC_ACQUIRE_DONE", 3097 (u64)node_ptr); 3098 break; 3099 } 3100 if (cookie != node->cookie) { 3101 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3102 proc->pid, thread->pid, 3103 cmd == BC_INCREFS_DONE ? 3104 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3105 (u64)node_ptr, node->debug_id, 3106 (u64)cookie, (u64)node->cookie); 3107 binder_put_node(node); 3108 break; 3109 } 3110 binder_node_inner_lock(node); 3111 if (cmd == BC_ACQUIRE_DONE) { 3112 if (node->pending_strong_ref == 0) { 3113 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3114 proc->pid, thread->pid, 3115 node->debug_id); 3116 binder_node_inner_unlock(node); 3117 binder_put_node(node); 3118 break; 3119 } 3120 node->pending_strong_ref = 0; 3121 } else { 3122 if (node->pending_weak_ref == 0) { 3123 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3124 proc->pid, thread->pid, 3125 node->debug_id); 3126 binder_node_inner_unlock(node); 3127 binder_put_node(node); 3128 break; 3129 } 3130 node->pending_weak_ref = 0; 3131 } 3132 free_node = binder_dec_node_nilocked(node, 3133 cmd == BC_ACQUIRE_DONE, 0); 3134 WARN_ON(free_node); 3135 binder_debug(BINDER_DEBUG_USER_REFS, 3136 "%d:%d %s node %d ls %d lw %d tr %d\n", 3137 proc->pid, thread->pid, 3138 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3139 node->debug_id, node->local_strong_refs, 3140 node->local_weak_refs, node->tmp_refs); 3141 binder_node_inner_unlock(node); 3142 binder_put_node(node); 3143 break; 3144 } 3145 case BC_ATTEMPT_ACQUIRE: 3146 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3147 return -EINVAL; 3148 case BC_ACQUIRE_RESULT: 3149 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3150 return -EINVAL; 3151 3152 case BC_FREE_BUFFER: { 3153 binder_uintptr_t data_ptr; 3154 struct binder_buffer *buffer; 3155 3156 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3157 return -EFAULT; 3158 ptr += sizeof(binder_uintptr_t); 3159 3160 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3161 data_ptr); 3162 if (buffer == NULL) { 3163 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 3164 proc->pid, thread->pid, (u64)data_ptr); 3165 break; 3166 } 3167 if (!buffer->allow_user_free) { 3168 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 3169 proc->pid, thread->pid, (u64)data_ptr); 3170 break; 3171 } 3172 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3173 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3174 proc->pid, thread->pid, (u64)data_ptr, 3175 buffer->debug_id, 3176 buffer->transaction ? "active" : "finished"); 3177 3178 if (buffer->transaction) { 3179 buffer->transaction->buffer = NULL; 3180 buffer->transaction = NULL; 3181 } 3182 if (buffer->async_transaction && buffer->target_node) { 3183 struct binder_node *buf_node; 3184 struct binder_work *w; 3185 3186 buf_node = buffer->target_node; 3187 binder_node_inner_lock(buf_node); 3188 BUG_ON(!buf_node->has_async_transaction); 3189 BUG_ON(buf_node->proc != proc); 3190 w = binder_dequeue_work_head_ilocked( 3191 &buf_node->async_todo); 3192 if (!w) 3193 buf_node->has_async_transaction = 0; 3194 else 3195 binder_enqueue_work_ilocked( 3196 w, &thread->todo); 3197 binder_node_inner_unlock(buf_node); 3198 } 3199 trace_binder_transaction_buffer_release(buffer); 3200 binder_transaction_buffer_release(proc, buffer, NULL); 3201 binder_alloc_free_buf(&proc->alloc, buffer); 3202 break; 3203 } 3204 3205 case BC_TRANSACTION_SG: 3206 case BC_REPLY_SG: { 3207 struct binder_transaction_data_sg tr; 3208 3209 if (copy_from_user(&tr, ptr, sizeof(tr))) 3210 return -EFAULT; 3211 ptr += sizeof(tr); 3212 binder_transaction(proc, thread, &tr.transaction_data, 3213 cmd == BC_REPLY_SG, tr.buffers_size); 3214 break; 3215 } 3216 case BC_TRANSACTION: 3217 case BC_REPLY: { 3218 struct binder_transaction_data tr; 3219 3220 if (copy_from_user(&tr, ptr, sizeof(tr))) 3221 return -EFAULT; 3222 ptr += sizeof(tr); 3223 binder_transaction(proc, thread, &tr, 3224 cmd == BC_REPLY, 0); 3225 break; 3226 } 3227 3228 case BC_REGISTER_LOOPER: 3229 binder_debug(BINDER_DEBUG_THREADS, 3230 "%d:%d BC_REGISTER_LOOPER\n", 3231 proc->pid, thread->pid); 3232 binder_inner_proc_lock(proc); 3233 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3234 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3235 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3236 proc->pid, thread->pid); 3237 } else if (proc->requested_threads == 0) { 3238 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3239 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3240 proc->pid, thread->pid); 3241 } else { 3242 proc->requested_threads--; 3243 proc->requested_threads_started++; 3244 } 3245 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3246 binder_inner_proc_unlock(proc); 3247 break; 3248 case BC_ENTER_LOOPER: 3249 binder_debug(BINDER_DEBUG_THREADS, 3250 "%d:%d BC_ENTER_LOOPER\n", 3251 proc->pid, thread->pid); 3252 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3253 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3254 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3255 proc->pid, thread->pid); 3256 } 3257 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3258 break; 3259 case BC_EXIT_LOOPER: 3260 binder_debug(BINDER_DEBUG_THREADS, 3261 "%d:%d BC_EXIT_LOOPER\n", 3262 proc->pid, thread->pid); 3263 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3264 break; 3265 3266 case BC_REQUEST_DEATH_NOTIFICATION: 3267 case BC_CLEAR_DEATH_NOTIFICATION: { 3268 uint32_t target; 3269 binder_uintptr_t cookie; 3270 struct binder_ref *ref; 3271 struct binder_ref_death *death = NULL; 3272 3273 if (get_user(target, (uint32_t __user *)ptr)) 3274 return -EFAULT; 3275 ptr += sizeof(uint32_t); 3276 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3277 return -EFAULT; 3278 ptr += sizeof(binder_uintptr_t); 3279 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3280 /* 3281 * Allocate memory for death notification 3282 * before taking lock 3283 */ 3284 death = kzalloc(sizeof(*death), GFP_KERNEL); 3285 if (death == NULL) { 3286 WARN_ON(thread->return_error.cmd != 3287 BR_OK); 3288 thread->return_error.cmd = BR_ERROR; 3289 binder_enqueue_work( 3290 thread->proc, 3291 &thread->return_error.work, 3292 &thread->todo); 3293 binder_debug( 3294 BINDER_DEBUG_FAILED_TRANSACTION, 3295 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3296 proc->pid, thread->pid); 3297 break; 3298 } 3299 } 3300 binder_proc_lock(proc); 3301 ref = binder_get_ref_olocked(proc, target, false); 3302 if (ref == NULL) { 3303 binder_user_error("%d:%d %s invalid ref %d\n", 3304 proc->pid, thread->pid, 3305 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3306 "BC_REQUEST_DEATH_NOTIFICATION" : 3307 "BC_CLEAR_DEATH_NOTIFICATION", 3308 target); 3309 binder_proc_unlock(proc); 3310 kfree(death); 3311 break; 3312 } 3313 3314 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3315 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3316 proc->pid, thread->pid, 3317 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3318 "BC_REQUEST_DEATH_NOTIFICATION" : 3319 "BC_CLEAR_DEATH_NOTIFICATION", 3320 (u64)cookie, ref->data.debug_id, 3321 ref->data.desc, ref->data.strong, 3322 ref->data.weak, ref->node->debug_id); 3323 3324 binder_node_lock(ref->node); 3325 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3326 if (ref->death) { 3327 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3328 proc->pid, thread->pid); 3329 binder_node_unlock(ref->node); 3330 binder_proc_unlock(proc); 3331 kfree(death); 3332 break; 3333 } 3334 binder_stats_created(BINDER_STAT_DEATH); 3335 INIT_LIST_HEAD(&death->work.entry); 3336 death->cookie = cookie; 3337 ref->death = death; 3338 if (ref->node->proc == NULL) { 3339 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3340 if (thread->looper & 3341 (BINDER_LOOPER_STATE_REGISTERED | 3342 BINDER_LOOPER_STATE_ENTERED)) 3343 binder_enqueue_work( 3344 proc, 3345 &ref->death->work, 3346 &thread->todo); 3347 else { 3348 binder_enqueue_work( 3349 proc, 3350 &ref->death->work, 3351 &proc->todo); 3352 wake_up_interruptible( 3353 &proc->wait); 3354 } 3355 } 3356 } else { 3357 if (ref->death == NULL) { 3358 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3359 proc->pid, thread->pid); 3360 binder_node_unlock(ref->node); 3361 binder_proc_unlock(proc); 3362 break; 3363 } 3364 death = ref->death; 3365 if (death->cookie != cookie) { 3366 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3367 proc->pid, thread->pid, 3368 (u64)death->cookie, 3369 (u64)cookie); 3370 binder_node_unlock(ref->node); 3371 binder_proc_unlock(proc); 3372 break; 3373 } 3374 ref->death = NULL; 3375 binder_inner_proc_lock(proc); 3376 if (list_empty(&death->work.entry)) { 3377 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3378 if (thread->looper & 3379 (BINDER_LOOPER_STATE_REGISTERED | 3380 BINDER_LOOPER_STATE_ENTERED)) 3381 binder_enqueue_work_ilocked( 3382 &death->work, 3383 &thread->todo); 3384 else { 3385 binder_enqueue_work_ilocked( 3386 &death->work, 3387 &proc->todo); 3388 wake_up_interruptible( 3389 &proc->wait); 3390 } 3391 } else { 3392 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3393 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3394 } 3395 binder_inner_proc_unlock(proc); 3396 } 3397 binder_node_unlock(ref->node); 3398 binder_proc_unlock(proc); 3399 } break; 3400 case BC_DEAD_BINDER_DONE: { 3401 struct binder_work *w; 3402 binder_uintptr_t cookie; 3403 struct binder_ref_death *death = NULL; 3404 3405 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3406 return -EFAULT; 3407 3408 ptr += sizeof(cookie); 3409 binder_inner_proc_lock(proc); 3410 list_for_each_entry(w, &proc->delivered_death, 3411 entry) { 3412 struct binder_ref_death *tmp_death = 3413 container_of(w, 3414 struct binder_ref_death, 3415 work); 3416 3417 if (tmp_death->cookie == cookie) { 3418 death = tmp_death; 3419 break; 3420 } 3421 } 3422 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3423 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 3424 proc->pid, thread->pid, (u64)cookie, 3425 death); 3426 if (death == NULL) { 3427 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3428 proc->pid, thread->pid, (u64)cookie); 3429 binder_inner_proc_unlock(proc); 3430 break; 3431 } 3432 binder_dequeue_work_ilocked(&death->work); 3433 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3434 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3435 if (thread->looper & 3436 (BINDER_LOOPER_STATE_REGISTERED | 3437 BINDER_LOOPER_STATE_ENTERED)) 3438 binder_enqueue_work_ilocked( 3439 &death->work, &thread->todo); 3440 else { 3441 binder_enqueue_work_ilocked( 3442 &death->work, 3443 &proc->todo); 3444 wake_up_interruptible(&proc->wait); 3445 } 3446 } 3447 binder_inner_proc_unlock(proc); 3448 } break; 3449 3450 default: 3451 pr_err("%d:%d unknown command %d\n", 3452 proc->pid, thread->pid, cmd); 3453 return -EINVAL; 3454 } 3455 *consumed = ptr - buffer; 3456 } 3457 return 0; 3458 } 3459 3460 static void binder_stat_br(struct binder_proc *proc, 3461 struct binder_thread *thread, uint32_t cmd) 3462 { 3463 trace_binder_return(cmd); 3464 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 3465 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 3466 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 3467 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 3468 } 3469 } 3470 3471 static int binder_has_proc_work(struct binder_proc *proc, 3472 struct binder_thread *thread) 3473 { 3474 return !binder_worklist_empty(proc, &proc->todo) || 3475 thread->looper_need_return; 3476 } 3477 3478 static int binder_has_thread_work(struct binder_thread *thread) 3479 { 3480 return !binder_worklist_empty(thread->proc, &thread->todo) || 3481 thread->looper_need_return; 3482 } 3483 3484 static int binder_put_node_cmd(struct binder_proc *proc, 3485 struct binder_thread *thread, 3486 void __user **ptrp, 3487 binder_uintptr_t node_ptr, 3488 binder_uintptr_t node_cookie, 3489 int node_debug_id, 3490 uint32_t cmd, const char *cmd_name) 3491 { 3492 void __user *ptr = *ptrp; 3493 3494 if (put_user(cmd, (uint32_t __user *)ptr)) 3495 return -EFAULT; 3496 ptr += sizeof(uint32_t); 3497 3498 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3499 return -EFAULT; 3500 ptr += sizeof(binder_uintptr_t); 3501 3502 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 3503 return -EFAULT; 3504 ptr += sizeof(binder_uintptr_t); 3505 3506 binder_stat_br(proc, thread, cmd); 3507 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 3508 proc->pid, thread->pid, cmd_name, node_debug_id, 3509 (u64)node_ptr, (u64)node_cookie); 3510 3511 *ptrp = ptr; 3512 return 0; 3513 } 3514 3515 static int binder_thread_read(struct binder_proc *proc, 3516 struct binder_thread *thread, 3517 binder_uintptr_t binder_buffer, size_t size, 3518 binder_size_t *consumed, int non_block) 3519 { 3520 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3521 void __user *ptr = buffer + *consumed; 3522 void __user *end = buffer + size; 3523 3524 int ret = 0; 3525 int wait_for_proc_work; 3526 3527 if (*consumed == 0) { 3528 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 3529 return -EFAULT; 3530 ptr += sizeof(uint32_t); 3531 } 3532 3533 retry: 3534 binder_inner_proc_lock(proc); 3535 wait_for_proc_work = thread->transaction_stack == NULL && 3536 binder_worklist_empty_ilocked(&thread->todo); 3537 if (wait_for_proc_work) 3538 proc->ready_threads++; 3539 binder_inner_proc_unlock(proc); 3540 3541 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3542 3543 trace_binder_wait_for_work(wait_for_proc_work, 3544 !!thread->transaction_stack, 3545 !binder_worklist_empty(proc, &thread->todo)); 3546 if (wait_for_proc_work) { 3547 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3548 BINDER_LOOPER_STATE_ENTERED))) { 3549 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 3550 proc->pid, thread->pid, thread->looper); 3551 wait_event_interruptible(binder_user_error_wait, 3552 binder_stop_on_user_error < 2); 3553 } 3554 binder_set_nice(proc->default_priority); 3555 if (non_block) { 3556 if (!binder_has_proc_work(proc, thread)) 3557 ret = -EAGAIN; 3558 } else 3559 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 3560 } else { 3561 if (non_block) { 3562 if (!binder_has_thread_work(thread)) 3563 ret = -EAGAIN; 3564 } else 3565 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 3566 } 3567 3568 binder_inner_proc_lock(proc); 3569 if (wait_for_proc_work) 3570 proc->ready_threads--; 3571 binder_inner_proc_unlock(proc); 3572 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3573 3574 if (ret) 3575 return ret; 3576 3577 while (1) { 3578 uint32_t cmd; 3579 struct binder_transaction_data tr; 3580 struct binder_work *w = NULL; 3581 struct list_head *list = NULL; 3582 struct binder_transaction *t = NULL; 3583 struct binder_thread *t_from; 3584 3585 binder_inner_proc_lock(proc); 3586 if (!binder_worklist_empty_ilocked(&thread->todo)) 3587 list = &thread->todo; 3588 else if (!binder_worklist_empty_ilocked(&proc->todo) && 3589 wait_for_proc_work) 3590 list = &proc->todo; 3591 else { 3592 binder_inner_proc_unlock(proc); 3593 3594 /* no data added */ 3595 if (ptr - buffer == 4 && !thread->looper_need_return) 3596 goto retry; 3597 break; 3598 } 3599 3600 if (end - ptr < sizeof(tr) + 4) { 3601 binder_inner_proc_unlock(proc); 3602 break; 3603 } 3604 w = binder_dequeue_work_head_ilocked(list); 3605 3606 switch (w->type) { 3607 case BINDER_WORK_TRANSACTION: { 3608 binder_inner_proc_unlock(proc); 3609 t = container_of(w, struct binder_transaction, work); 3610 } break; 3611 case BINDER_WORK_RETURN_ERROR: { 3612 struct binder_error *e = container_of( 3613 w, struct binder_error, work); 3614 3615 WARN_ON(e->cmd == BR_OK); 3616 binder_inner_proc_unlock(proc); 3617 if (put_user(e->cmd, (uint32_t __user *)ptr)) 3618 return -EFAULT; 3619 e->cmd = BR_OK; 3620 ptr += sizeof(uint32_t); 3621 3622 binder_stat_br(proc, thread, cmd); 3623 } break; 3624 case BINDER_WORK_TRANSACTION_COMPLETE: { 3625 binder_inner_proc_unlock(proc); 3626 cmd = BR_TRANSACTION_COMPLETE; 3627 if (put_user(cmd, (uint32_t __user *)ptr)) 3628 return -EFAULT; 3629 ptr += sizeof(uint32_t); 3630 3631 binder_stat_br(proc, thread, cmd); 3632 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 3633 "%d:%d BR_TRANSACTION_COMPLETE\n", 3634 proc->pid, thread->pid); 3635 kfree(w); 3636 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3637 } break; 3638 case BINDER_WORK_NODE: { 3639 struct binder_node *node = container_of(w, struct binder_node, work); 3640 int strong, weak; 3641 binder_uintptr_t node_ptr = node->ptr; 3642 binder_uintptr_t node_cookie = node->cookie; 3643 int node_debug_id = node->debug_id; 3644 int has_weak_ref; 3645 int has_strong_ref; 3646 void __user *orig_ptr = ptr; 3647 3648 BUG_ON(proc != node->proc); 3649 strong = node->internal_strong_refs || 3650 node->local_strong_refs; 3651 weak = !hlist_empty(&node->refs) || 3652 node->local_weak_refs || 3653 node->tmp_refs || strong; 3654 has_strong_ref = node->has_strong_ref; 3655 has_weak_ref = node->has_weak_ref; 3656 3657 if (weak && !has_weak_ref) { 3658 node->has_weak_ref = 1; 3659 node->pending_weak_ref = 1; 3660 node->local_weak_refs++; 3661 } 3662 if (strong && !has_strong_ref) { 3663 node->has_strong_ref = 1; 3664 node->pending_strong_ref = 1; 3665 node->local_strong_refs++; 3666 } 3667 if (!strong && has_strong_ref) 3668 node->has_strong_ref = 0; 3669 if (!weak && has_weak_ref) 3670 node->has_weak_ref = 0; 3671 if (!weak && !strong) { 3672 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 3673 "%d:%d node %d u%016llx c%016llx deleted\n", 3674 proc->pid, thread->pid, 3675 node_debug_id, 3676 (u64)node_ptr, 3677 (u64)node_cookie); 3678 rb_erase(&node->rb_node, &proc->nodes); 3679 binder_inner_proc_unlock(proc); 3680 binder_node_lock(node); 3681 /* 3682 * Acquire the node lock before freeing the 3683 * node to serialize with other threads that 3684 * may have been holding the node lock while 3685 * decrementing this node (avoids race where 3686 * this thread frees while the other thread 3687 * is unlocking the node after the final 3688 * decrement) 3689 */ 3690 binder_node_unlock(node); 3691 binder_free_node(node); 3692 } else 3693 binder_inner_proc_unlock(proc); 3694 3695 if (weak && !has_weak_ref) 3696 ret = binder_put_node_cmd( 3697 proc, thread, &ptr, node_ptr, 3698 node_cookie, node_debug_id, 3699 BR_INCREFS, "BR_INCREFS"); 3700 if (!ret && strong && !has_strong_ref) 3701 ret = binder_put_node_cmd( 3702 proc, thread, &ptr, node_ptr, 3703 node_cookie, node_debug_id, 3704 BR_ACQUIRE, "BR_ACQUIRE"); 3705 if (!ret && !strong && has_strong_ref) 3706 ret = binder_put_node_cmd( 3707 proc, thread, &ptr, node_ptr, 3708 node_cookie, node_debug_id, 3709 BR_RELEASE, "BR_RELEASE"); 3710 if (!ret && !weak && has_weak_ref) 3711 ret = binder_put_node_cmd( 3712 proc, thread, &ptr, node_ptr, 3713 node_cookie, node_debug_id, 3714 BR_DECREFS, "BR_DECREFS"); 3715 if (orig_ptr == ptr) 3716 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 3717 "%d:%d node %d u%016llx c%016llx state unchanged\n", 3718 proc->pid, thread->pid, 3719 node_debug_id, 3720 (u64)node_ptr, 3721 (u64)node_cookie); 3722 if (ret) 3723 return ret; 3724 } break; 3725 case BINDER_WORK_DEAD_BINDER: 3726 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3727 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 3728 struct binder_ref_death *death; 3729 uint32_t cmd; 3730 binder_uintptr_t cookie; 3731 3732 death = container_of(w, struct binder_ref_death, work); 3733 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 3734 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 3735 else 3736 cmd = BR_DEAD_BINDER; 3737 cookie = death->cookie; 3738 3739 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3740 "%d:%d %s %016llx\n", 3741 proc->pid, thread->pid, 3742 cmd == BR_DEAD_BINDER ? 3743 "BR_DEAD_BINDER" : 3744 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3745 (u64)cookie); 3746 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 3747 binder_inner_proc_unlock(proc); 3748 kfree(death); 3749 binder_stats_deleted(BINDER_STAT_DEATH); 3750 } else { 3751 binder_enqueue_work_ilocked( 3752 w, &proc->delivered_death); 3753 binder_inner_proc_unlock(proc); 3754 } 3755 if (put_user(cmd, (uint32_t __user *)ptr)) 3756 return -EFAULT; 3757 ptr += sizeof(uint32_t); 3758 if (put_user(cookie, 3759 (binder_uintptr_t __user *)ptr)) 3760 return -EFAULT; 3761 ptr += sizeof(binder_uintptr_t); 3762 binder_stat_br(proc, thread, cmd); 3763 if (cmd == BR_DEAD_BINDER) 3764 goto done; /* DEAD_BINDER notifications can cause transactions */ 3765 } break; 3766 } 3767 3768 if (!t) 3769 continue; 3770 3771 BUG_ON(t->buffer == NULL); 3772 if (t->buffer->target_node) { 3773 struct binder_node *target_node = t->buffer->target_node; 3774 3775 tr.target.ptr = target_node->ptr; 3776 tr.cookie = target_node->cookie; 3777 t->saved_priority = task_nice(current); 3778 if (t->priority < target_node->min_priority && 3779 !(t->flags & TF_ONE_WAY)) 3780 binder_set_nice(t->priority); 3781 else if (!(t->flags & TF_ONE_WAY) || 3782 t->saved_priority > target_node->min_priority) 3783 binder_set_nice(target_node->min_priority); 3784 cmd = BR_TRANSACTION; 3785 } else { 3786 tr.target.ptr = 0; 3787 tr.cookie = 0; 3788 cmd = BR_REPLY; 3789 } 3790 tr.code = t->code; 3791 tr.flags = t->flags; 3792 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 3793 3794 t_from = binder_get_txn_from(t); 3795 if (t_from) { 3796 struct task_struct *sender = t_from->proc->tsk; 3797 3798 tr.sender_pid = task_tgid_nr_ns(sender, 3799 task_active_pid_ns(current)); 3800 } else { 3801 tr.sender_pid = 0; 3802 } 3803 3804 tr.data_size = t->buffer->data_size; 3805 tr.offsets_size = t->buffer->offsets_size; 3806 tr.data.ptr.buffer = (binder_uintptr_t) 3807 ((uintptr_t)t->buffer->data + 3808 binder_alloc_get_user_buffer_offset(&proc->alloc)); 3809 tr.data.ptr.offsets = tr.data.ptr.buffer + 3810 ALIGN(t->buffer->data_size, 3811 sizeof(void *)); 3812 3813 if (put_user(cmd, (uint32_t __user *)ptr)) { 3814 if (t_from) 3815 binder_thread_dec_tmpref(t_from); 3816 return -EFAULT; 3817 } 3818 ptr += sizeof(uint32_t); 3819 if (copy_to_user(ptr, &tr, sizeof(tr))) { 3820 if (t_from) 3821 binder_thread_dec_tmpref(t_from); 3822 return -EFAULT; 3823 } 3824 ptr += sizeof(tr); 3825 3826 trace_binder_transaction_received(t); 3827 binder_stat_br(proc, thread, cmd); 3828 binder_debug(BINDER_DEBUG_TRANSACTION, 3829 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 3830 proc->pid, thread->pid, 3831 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 3832 "BR_REPLY", 3833 t->debug_id, t_from ? t_from->proc->pid : 0, 3834 t_from ? t_from->pid : 0, cmd, 3835 t->buffer->data_size, t->buffer->offsets_size, 3836 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 3837 3838 if (t_from) 3839 binder_thread_dec_tmpref(t_from); 3840 t->buffer->allow_user_free = 1; 3841 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 3842 binder_inner_proc_lock(thread->proc); 3843 t->to_parent = thread->transaction_stack; 3844 t->to_thread = thread; 3845 thread->transaction_stack = t; 3846 binder_inner_proc_unlock(thread->proc); 3847 } else { 3848 binder_free_transaction(t); 3849 } 3850 break; 3851 } 3852 3853 done: 3854 3855 *consumed = ptr - buffer; 3856 binder_inner_proc_lock(proc); 3857 if (proc->requested_threads + proc->ready_threads == 0 && 3858 proc->requested_threads_started < proc->max_threads && 3859 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3860 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 3861 /*spawn a new thread if we leave this out */) { 3862 proc->requested_threads++; 3863 binder_inner_proc_unlock(proc); 3864 binder_debug(BINDER_DEBUG_THREADS, 3865 "%d:%d BR_SPAWN_LOOPER\n", 3866 proc->pid, thread->pid); 3867 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 3868 return -EFAULT; 3869 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 3870 } else 3871 binder_inner_proc_unlock(proc); 3872 return 0; 3873 } 3874 3875 static void binder_release_work(struct binder_proc *proc, 3876 struct list_head *list) 3877 { 3878 struct binder_work *w; 3879 3880 while (1) { 3881 w = binder_dequeue_work_head(proc, list); 3882 if (!w) 3883 return; 3884 3885 switch (w->type) { 3886 case BINDER_WORK_TRANSACTION: { 3887 struct binder_transaction *t; 3888 3889 t = container_of(w, struct binder_transaction, work); 3890 if (t->buffer->target_node && 3891 !(t->flags & TF_ONE_WAY)) { 3892 binder_send_failed_reply(t, BR_DEAD_REPLY); 3893 } else { 3894 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3895 "undelivered transaction %d\n", 3896 t->debug_id); 3897 binder_free_transaction(t); 3898 } 3899 } break; 3900 case BINDER_WORK_RETURN_ERROR: { 3901 struct binder_error *e = container_of( 3902 w, struct binder_error, work); 3903 3904 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3905 "undelivered TRANSACTION_ERROR: %u\n", 3906 e->cmd); 3907 } break; 3908 case BINDER_WORK_TRANSACTION_COMPLETE: { 3909 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3910 "undelivered TRANSACTION_COMPLETE\n"); 3911 kfree(w); 3912 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3913 } break; 3914 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3915 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 3916 struct binder_ref_death *death; 3917 3918 death = container_of(w, struct binder_ref_death, work); 3919 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3920 "undelivered death notification, %016llx\n", 3921 (u64)death->cookie); 3922 kfree(death); 3923 binder_stats_deleted(BINDER_STAT_DEATH); 3924 } break; 3925 default: 3926 pr_err("unexpected work type, %d, not freed\n", 3927 w->type); 3928 break; 3929 } 3930 } 3931 3932 } 3933 3934 static struct binder_thread *binder_get_thread_ilocked( 3935 struct binder_proc *proc, struct binder_thread *new_thread) 3936 { 3937 struct binder_thread *thread = NULL; 3938 struct rb_node *parent = NULL; 3939 struct rb_node **p = &proc->threads.rb_node; 3940 3941 while (*p) { 3942 parent = *p; 3943 thread = rb_entry(parent, struct binder_thread, rb_node); 3944 3945 if (current->pid < thread->pid) 3946 p = &(*p)->rb_left; 3947 else if (current->pid > thread->pid) 3948 p = &(*p)->rb_right; 3949 else 3950 return thread; 3951 } 3952 if (!new_thread) 3953 return NULL; 3954 thread = new_thread; 3955 binder_stats_created(BINDER_STAT_THREAD); 3956 thread->proc = proc; 3957 thread->pid = current->pid; 3958 atomic_set(&thread->tmp_ref, 0); 3959 init_waitqueue_head(&thread->wait); 3960 INIT_LIST_HEAD(&thread->todo); 3961 rb_link_node(&thread->rb_node, parent, p); 3962 rb_insert_color(&thread->rb_node, &proc->threads); 3963 thread->looper_need_return = true; 3964 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 3965 thread->return_error.cmd = BR_OK; 3966 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 3967 thread->reply_error.cmd = BR_OK; 3968 3969 return thread; 3970 } 3971 3972 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 3973 { 3974 struct binder_thread *thread; 3975 struct binder_thread *new_thread; 3976 3977 binder_inner_proc_lock(proc); 3978 thread = binder_get_thread_ilocked(proc, NULL); 3979 binder_inner_proc_unlock(proc); 3980 if (!thread) { 3981 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 3982 if (new_thread == NULL) 3983 return NULL; 3984 binder_inner_proc_lock(proc); 3985 thread = binder_get_thread_ilocked(proc, new_thread); 3986 binder_inner_proc_unlock(proc); 3987 if (thread != new_thread) 3988 kfree(new_thread); 3989 } 3990 return thread; 3991 } 3992 3993 static void binder_free_proc(struct binder_proc *proc) 3994 { 3995 BUG_ON(!list_empty(&proc->todo)); 3996 BUG_ON(!list_empty(&proc->delivered_death)); 3997 binder_alloc_deferred_release(&proc->alloc); 3998 put_task_struct(proc->tsk); 3999 binder_stats_deleted(BINDER_STAT_PROC); 4000 kfree(proc); 4001 } 4002 4003 static void binder_free_thread(struct binder_thread *thread) 4004 { 4005 BUG_ON(!list_empty(&thread->todo)); 4006 binder_stats_deleted(BINDER_STAT_THREAD); 4007 binder_proc_dec_tmpref(thread->proc); 4008 kfree(thread); 4009 } 4010 4011 static int binder_thread_release(struct binder_proc *proc, 4012 struct binder_thread *thread) 4013 { 4014 struct binder_transaction *t; 4015 struct binder_transaction *send_reply = NULL; 4016 int active_transactions = 0; 4017 struct binder_transaction *last_t = NULL; 4018 4019 binder_inner_proc_lock(thread->proc); 4020 /* 4021 * take a ref on the proc so it survives 4022 * after we remove this thread from proc->threads. 4023 * The corresponding dec is when we actually 4024 * free the thread in binder_free_thread() 4025 */ 4026 proc->tmp_ref++; 4027 /* 4028 * take a ref on this thread to ensure it 4029 * survives while we are releasing it 4030 */ 4031 atomic_inc(&thread->tmp_ref); 4032 rb_erase(&thread->rb_node, &proc->threads); 4033 t = thread->transaction_stack; 4034 if (t) { 4035 spin_lock(&t->lock); 4036 if (t->to_thread == thread) 4037 send_reply = t; 4038 } 4039 thread->is_dead = true; 4040 4041 while (t) { 4042 last_t = t; 4043 active_transactions++; 4044 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4045 "release %d:%d transaction %d %s, still active\n", 4046 proc->pid, thread->pid, 4047 t->debug_id, 4048 (t->to_thread == thread) ? "in" : "out"); 4049 4050 if (t->to_thread == thread) { 4051 t->to_proc = NULL; 4052 t->to_thread = NULL; 4053 if (t->buffer) { 4054 t->buffer->transaction = NULL; 4055 t->buffer = NULL; 4056 } 4057 t = t->to_parent; 4058 } else if (t->from == thread) { 4059 t->from = NULL; 4060 t = t->from_parent; 4061 } else 4062 BUG(); 4063 spin_unlock(&last_t->lock); 4064 if (t) 4065 spin_lock(&t->lock); 4066 } 4067 binder_inner_proc_unlock(thread->proc); 4068 4069 if (send_reply) 4070 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4071 binder_release_work(proc, &thread->todo); 4072 binder_thread_dec_tmpref(thread); 4073 return active_transactions; 4074 } 4075 4076 static unsigned int binder_poll(struct file *filp, 4077 struct poll_table_struct *wait) 4078 { 4079 struct binder_proc *proc = filp->private_data; 4080 struct binder_thread *thread = NULL; 4081 int wait_for_proc_work; 4082 4083 thread = binder_get_thread(proc); 4084 4085 binder_inner_proc_lock(thread->proc); 4086 wait_for_proc_work = thread->transaction_stack == NULL && 4087 binder_worklist_empty_ilocked(&thread->todo); 4088 binder_inner_proc_unlock(thread->proc); 4089 4090 if (wait_for_proc_work) { 4091 if (binder_has_proc_work(proc, thread)) 4092 return POLLIN; 4093 poll_wait(filp, &proc->wait, wait); 4094 if (binder_has_proc_work(proc, thread)) 4095 return POLLIN; 4096 } else { 4097 if (binder_has_thread_work(thread)) 4098 return POLLIN; 4099 poll_wait(filp, &thread->wait, wait); 4100 if (binder_has_thread_work(thread)) 4101 return POLLIN; 4102 } 4103 return 0; 4104 } 4105 4106 static int binder_ioctl_write_read(struct file *filp, 4107 unsigned int cmd, unsigned long arg, 4108 struct binder_thread *thread) 4109 { 4110 int ret = 0; 4111 struct binder_proc *proc = filp->private_data; 4112 unsigned int size = _IOC_SIZE(cmd); 4113 void __user *ubuf = (void __user *)arg; 4114 struct binder_write_read bwr; 4115 4116 if (size != sizeof(struct binder_write_read)) { 4117 ret = -EINVAL; 4118 goto out; 4119 } 4120 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4121 ret = -EFAULT; 4122 goto out; 4123 } 4124 binder_debug(BINDER_DEBUG_READ_WRITE, 4125 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4126 proc->pid, thread->pid, 4127 (u64)bwr.write_size, (u64)bwr.write_buffer, 4128 (u64)bwr.read_size, (u64)bwr.read_buffer); 4129 4130 if (bwr.write_size > 0) { 4131 ret = binder_thread_write(proc, thread, 4132 bwr.write_buffer, 4133 bwr.write_size, 4134 &bwr.write_consumed); 4135 trace_binder_write_done(ret); 4136 if (ret < 0) { 4137 bwr.read_consumed = 0; 4138 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4139 ret = -EFAULT; 4140 goto out; 4141 } 4142 } 4143 if (bwr.read_size > 0) { 4144 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4145 bwr.read_size, 4146 &bwr.read_consumed, 4147 filp->f_flags & O_NONBLOCK); 4148 trace_binder_read_done(ret); 4149 if (!binder_worklist_empty(proc, &proc->todo)) 4150 wake_up_interruptible(&proc->wait); 4151 if (ret < 0) { 4152 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4153 ret = -EFAULT; 4154 goto out; 4155 } 4156 } 4157 binder_debug(BINDER_DEBUG_READ_WRITE, 4158 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4159 proc->pid, thread->pid, 4160 (u64)bwr.write_consumed, (u64)bwr.write_size, 4161 (u64)bwr.read_consumed, (u64)bwr.read_size); 4162 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4163 ret = -EFAULT; 4164 goto out; 4165 } 4166 out: 4167 return ret; 4168 } 4169 4170 static int binder_ioctl_set_ctx_mgr(struct file *filp) 4171 { 4172 int ret = 0; 4173 struct binder_proc *proc = filp->private_data; 4174 struct binder_context *context = proc->context; 4175 struct binder_node *new_node; 4176 kuid_t curr_euid = current_euid(); 4177 4178 mutex_lock(&context->context_mgr_node_lock); 4179 if (context->binder_context_mgr_node) { 4180 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4181 ret = -EBUSY; 4182 goto out; 4183 } 4184 ret = security_binder_set_context_mgr(proc->tsk); 4185 if (ret < 0) 4186 goto out; 4187 if (uid_valid(context->binder_context_mgr_uid)) { 4188 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4189 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4190 from_kuid(&init_user_ns, curr_euid), 4191 from_kuid(&init_user_ns, 4192 context->binder_context_mgr_uid)); 4193 ret = -EPERM; 4194 goto out; 4195 } 4196 } else { 4197 context->binder_context_mgr_uid = curr_euid; 4198 } 4199 new_node = binder_new_node(proc, NULL); 4200 if (!new_node) { 4201 ret = -ENOMEM; 4202 goto out; 4203 } 4204 binder_node_lock(new_node); 4205 new_node->local_weak_refs++; 4206 new_node->local_strong_refs++; 4207 new_node->has_strong_ref = 1; 4208 new_node->has_weak_ref = 1; 4209 context->binder_context_mgr_node = new_node; 4210 binder_node_unlock(new_node); 4211 binder_put_node(new_node); 4212 out: 4213 mutex_unlock(&context->context_mgr_node_lock); 4214 return ret; 4215 } 4216 4217 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4218 { 4219 int ret; 4220 struct binder_proc *proc = filp->private_data; 4221 struct binder_thread *thread; 4222 unsigned int size = _IOC_SIZE(cmd); 4223 void __user *ubuf = (void __user *)arg; 4224 4225 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4226 proc->pid, current->pid, cmd, arg);*/ 4227 4228 trace_binder_ioctl(cmd, arg); 4229 4230 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4231 if (ret) 4232 goto err_unlocked; 4233 4234 thread = binder_get_thread(proc); 4235 if (thread == NULL) { 4236 ret = -ENOMEM; 4237 goto err; 4238 } 4239 4240 switch (cmd) { 4241 case BINDER_WRITE_READ: 4242 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 4243 if (ret) 4244 goto err; 4245 break; 4246 case BINDER_SET_MAX_THREADS: { 4247 int max_threads; 4248 4249 if (copy_from_user(&max_threads, ubuf, 4250 sizeof(max_threads))) { 4251 ret = -EINVAL; 4252 goto err; 4253 } 4254 binder_inner_proc_lock(proc); 4255 proc->max_threads = max_threads; 4256 binder_inner_proc_unlock(proc); 4257 break; 4258 } 4259 case BINDER_SET_CONTEXT_MGR: 4260 ret = binder_ioctl_set_ctx_mgr(filp); 4261 if (ret) 4262 goto err; 4263 break; 4264 case BINDER_THREAD_EXIT: 4265 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 4266 proc->pid, thread->pid); 4267 binder_thread_release(proc, thread); 4268 thread = NULL; 4269 break; 4270 case BINDER_VERSION: { 4271 struct binder_version __user *ver = ubuf; 4272 4273 if (size != sizeof(struct binder_version)) { 4274 ret = -EINVAL; 4275 goto err; 4276 } 4277 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 4278 &ver->protocol_version)) { 4279 ret = -EINVAL; 4280 goto err; 4281 } 4282 break; 4283 } 4284 default: 4285 ret = -EINVAL; 4286 goto err; 4287 } 4288 ret = 0; 4289 err: 4290 if (thread) 4291 thread->looper_need_return = false; 4292 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4293 if (ret && ret != -ERESTARTSYS) 4294 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 4295 err_unlocked: 4296 trace_binder_ioctl_done(ret); 4297 return ret; 4298 } 4299 4300 static void binder_vma_open(struct vm_area_struct *vma) 4301 { 4302 struct binder_proc *proc = vma->vm_private_data; 4303 4304 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4305 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4306 proc->pid, vma->vm_start, vma->vm_end, 4307 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4308 (unsigned long)pgprot_val(vma->vm_page_prot)); 4309 } 4310 4311 static void binder_vma_close(struct vm_area_struct *vma) 4312 { 4313 struct binder_proc *proc = vma->vm_private_data; 4314 4315 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4316 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4317 proc->pid, vma->vm_start, vma->vm_end, 4318 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4319 (unsigned long)pgprot_val(vma->vm_page_prot)); 4320 binder_alloc_vma_close(&proc->alloc); 4321 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 4322 } 4323 4324 static int binder_vm_fault(struct vm_fault *vmf) 4325 { 4326 return VM_FAULT_SIGBUS; 4327 } 4328 4329 static const struct vm_operations_struct binder_vm_ops = { 4330 .open = binder_vma_open, 4331 .close = binder_vma_close, 4332 .fault = binder_vm_fault, 4333 }; 4334 4335 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 4336 { 4337 int ret; 4338 struct binder_proc *proc = filp->private_data; 4339 const char *failure_string; 4340 4341 if (proc->tsk != current->group_leader) 4342 return -EINVAL; 4343 4344 if ((vma->vm_end - vma->vm_start) > SZ_4M) 4345 vma->vm_end = vma->vm_start + SZ_4M; 4346 4347 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4348 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 4349 __func__, proc->pid, vma->vm_start, vma->vm_end, 4350 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4351 (unsigned long)pgprot_val(vma->vm_page_prot)); 4352 4353 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 4354 ret = -EPERM; 4355 failure_string = "bad vm_flags"; 4356 goto err_bad_arg; 4357 } 4358 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 4359 vma->vm_ops = &binder_vm_ops; 4360 vma->vm_private_data = proc; 4361 4362 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4363 if (ret) 4364 return ret; 4365 proc->files = get_files_struct(current); 4366 return 0; 4367 4368 err_bad_arg: 4369 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 4370 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 4371 return ret; 4372 } 4373 4374 static int binder_open(struct inode *nodp, struct file *filp) 4375 { 4376 struct binder_proc *proc; 4377 struct binder_device *binder_dev; 4378 4379 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 4380 current->group_leader->pid, current->pid); 4381 4382 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 4383 if (proc == NULL) 4384 return -ENOMEM; 4385 spin_lock_init(&proc->inner_lock); 4386 spin_lock_init(&proc->outer_lock); 4387 get_task_struct(current->group_leader); 4388 proc->tsk = current->group_leader; 4389 INIT_LIST_HEAD(&proc->todo); 4390 init_waitqueue_head(&proc->wait); 4391 proc->default_priority = task_nice(current); 4392 binder_dev = container_of(filp->private_data, struct binder_device, 4393 miscdev); 4394 proc->context = &binder_dev->context; 4395 binder_alloc_init(&proc->alloc); 4396 4397 binder_stats_created(BINDER_STAT_PROC); 4398 proc->pid = current->group_leader->pid; 4399 INIT_LIST_HEAD(&proc->delivered_death); 4400 filp->private_data = proc; 4401 4402 mutex_lock(&binder_procs_lock); 4403 hlist_add_head(&proc->proc_node, &binder_procs); 4404 mutex_unlock(&binder_procs_lock); 4405 4406 if (binder_debugfs_dir_entry_proc) { 4407 char strbuf[11]; 4408 4409 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 4410 /* 4411 * proc debug entries are shared between contexts, so 4412 * this will fail if the process tries to open the driver 4413 * again with a different context. The priting code will 4414 * anyway print all contexts that a given PID has, so this 4415 * is not a problem. 4416 */ 4417 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 4418 binder_debugfs_dir_entry_proc, 4419 (void *)(unsigned long)proc->pid, 4420 &binder_proc_fops); 4421 } 4422 4423 return 0; 4424 } 4425 4426 static int binder_flush(struct file *filp, fl_owner_t id) 4427 { 4428 struct binder_proc *proc = filp->private_data; 4429 4430 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 4431 4432 return 0; 4433 } 4434 4435 static void binder_deferred_flush(struct binder_proc *proc) 4436 { 4437 struct rb_node *n; 4438 int wake_count = 0; 4439 4440 binder_inner_proc_lock(proc); 4441 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 4442 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 4443 4444 thread->looper_need_return = true; 4445 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 4446 wake_up_interruptible(&thread->wait); 4447 wake_count++; 4448 } 4449 } 4450 binder_inner_proc_unlock(proc); 4451 wake_up_interruptible_all(&proc->wait); 4452 4453 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4454 "binder_flush: %d woke %d threads\n", proc->pid, 4455 wake_count); 4456 } 4457 4458 static int binder_release(struct inode *nodp, struct file *filp) 4459 { 4460 struct binder_proc *proc = filp->private_data; 4461 4462 debugfs_remove(proc->debugfs_entry); 4463 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 4464 4465 return 0; 4466 } 4467 4468 static int binder_node_release(struct binder_node *node, int refs) 4469 { 4470 struct binder_ref *ref; 4471 int death = 0; 4472 struct binder_proc *proc = node->proc; 4473 4474 binder_release_work(proc, &node->async_todo); 4475 4476 binder_node_lock(node); 4477 binder_inner_proc_lock(proc); 4478 binder_dequeue_work_ilocked(&node->work); 4479 /* 4480 * The caller must have taken a temporary ref on the node, 4481 */ 4482 BUG_ON(!node->tmp_refs); 4483 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 4484 binder_inner_proc_unlock(proc); 4485 binder_node_unlock(node); 4486 binder_free_node(node); 4487 4488 return refs; 4489 } 4490 4491 node->proc = NULL; 4492 node->local_strong_refs = 0; 4493 node->local_weak_refs = 0; 4494 binder_inner_proc_unlock(proc); 4495 4496 spin_lock(&binder_dead_nodes_lock); 4497 hlist_add_head(&node->dead_node, &binder_dead_nodes); 4498 spin_unlock(&binder_dead_nodes_lock); 4499 4500 hlist_for_each_entry(ref, &node->refs, node_entry) { 4501 refs++; 4502 /* 4503 * Need the node lock to synchronize 4504 * with new notification requests and the 4505 * inner lock to synchronize with queued 4506 * death notifications. 4507 */ 4508 binder_inner_proc_lock(ref->proc); 4509 if (!ref->death) { 4510 binder_inner_proc_unlock(ref->proc); 4511 continue; 4512 } 4513 4514 death++; 4515 4516 BUG_ON(!list_empty(&ref->death->work.entry)); 4517 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4518 binder_enqueue_work_ilocked(&ref->death->work, 4519 &ref->proc->todo); 4520 wake_up_interruptible(&ref->proc->wait); 4521 binder_inner_proc_unlock(ref->proc); 4522 } 4523 4524 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4525 "node %d now dead, refs %d, death %d\n", 4526 node->debug_id, refs, death); 4527 binder_node_unlock(node); 4528 binder_put_node(node); 4529 4530 return refs; 4531 } 4532 4533 static void binder_deferred_release(struct binder_proc *proc) 4534 { 4535 struct binder_context *context = proc->context; 4536 struct rb_node *n; 4537 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 4538 4539 BUG_ON(proc->files); 4540 4541 mutex_lock(&binder_procs_lock); 4542 hlist_del(&proc->proc_node); 4543 mutex_unlock(&binder_procs_lock); 4544 4545 mutex_lock(&context->context_mgr_node_lock); 4546 if (context->binder_context_mgr_node && 4547 context->binder_context_mgr_node->proc == proc) { 4548 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4549 "%s: %d context_mgr_node gone\n", 4550 __func__, proc->pid); 4551 context->binder_context_mgr_node = NULL; 4552 } 4553 mutex_unlock(&context->context_mgr_node_lock); 4554 binder_inner_proc_lock(proc); 4555 /* 4556 * Make sure proc stays alive after we 4557 * remove all the threads 4558 */ 4559 proc->tmp_ref++; 4560 4561 proc->is_dead = true; 4562 threads = 0; 4563 active_transactions = 0; 4564 while ((n = rb_first(&proc->threads))) { 4565 struct binder_thread *thread; 4566 4567 thread = rb_entry(n, struct binder_thread, rb_node); 4568 binder_inner_proc_unlock(proc); 4569 threads++; 4570 active_transactions += binder_thread_release(proc, thread); 4571 binder_inner_proc_lock(proc); 4572 } 4573 4574 nodes = 0; 4575 incoming_refs = 0; 4576 while ((n = rb_first(&proc->nodes))) { 4577 struct binder_node *node; 4578 4579 node = rb_entry(n, struct binder_node, rb_node); 4580 nodes++; 4581 /* 4582 * take a temporary ref on the node before 4583 * calling binder_node_release() which will either 4584 * kfree() the node or call binder_put_node() 4585 */ 4586 binder_inc_node_tmpref_ilocked(node); 4587 rb_erase(&node->rb_node, &proc->nodes); 4588 binder_inner_proc_unlock(proc); 4589 incoming_refs = binder_node_release(node, incoming_refs); 4590 binder_inner_proc_lock(proc); 4591 } 4592 binder_inner_proc_unlock(proc); 4593 4594 outgoing_refs = 0; 4595 binder_proc_lock(proc); 4596 while ((n = rb_first(&proc->refs_by_desc))) { 4597 struct binder_ref *ref; 4598 4599 ref = rb_entry(n, struct binder_ref, rb_node_desc); 4600 outgoing_refs++; 4601 binder_cleanup_ref_olocked(ref); 4602 binder_proc_unlock(proc); 4603 binder_free_ref(ref); 4604 binder_proc_lock(proc); 4605 } 4606 binder_proc_unlock(proc); 4607 4608 binder_release_work(proc, &proc->todo); 4609 binder_release_work(proc, &proc->delivered_death); 4610 4611 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4612 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 4613 __func__, proc->pid, threads, nodes, incoming_refs, 4614 outgoing_refs, active_transactions); 4615 4616 binder_proc_dec_tmpref(proc); 4617 } 4618 4619 static void binder_deferred_func(struct work_struct *work) 4620 { 4621 struct binder_proc *proc; 4622 struct files_struct *files; 4623 4624 int defer; 4625 4626 do { 4627 mutex_lock(&binder_deferred_lock); 4628 if (!hlist_empty(&binder_deferred_list)) { 4629 proc = hlist_entry(binder_deferred_list.first, 4630 struct binder_proc, deferred_work_node); 4631 hlist_del_init(&proc->deferred_work_node); 4632 defer = proc->deferred_work; 4633 proc->deferred_work = 0; 4634 } else { 4635 proc = NULL; 4636 defer = 0; 4637 } 4638 mutex_unlock(&binder_deferred_lock); 4639 4640 files = NULL; 4641 if (defer & BINDER_DEFERRED_PUT_FILES) { 4642 files = proc->files; 4643 if (files) 4644 proc->files = NULL; 4645 } 4646 4647 if (defer & BINDER_DEFERRED_FLUSH) 4648 binder_deferred_flush(proc); 4649 4650 if (defer & BINDER_DEFERRED_RELEASE) 4651 binder_deferred_release(proc); /* frees proc */ 4652 4653 if (files) 4654 put_files_struct(files); 4655 } while (proc); 4656 } 4657 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 4658 4659 static void 4660 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 4661 { 4662 mutex_lock(&binder_deferred_lock); 4663 proc->deferred_work |= defer; 4664 if (hlist_unhashed(&proc->deferred_work_node)) { 4665 hlist_add_head(&proc->deferred_work_node, 4666 &binder_deferred_list); 4667 schedule_work(&binder_deferred_work); 4668 } 4669 mutex_unlock(&binder_deferred_lock); 4670 } 4671 4672 static void print_binder_transaction_ilocked(struct seq_file *m, 4673 struct binder_proc *proc, 4674 const char *prefix, 4675 struct binder_transaction *t) 4676 { 4677 struct binder_proc *to_proc; 4678 struct binder_buffer *buffer = t->buffer; 4679 4680 WARN_ON(!spin_is_locked(&proc->inner_lock)); 4681 spin_lock(&t->lock); 4682 to_proc = t->to_proc; 4683 seq_printf(m, 4684 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 4685 prefix, t->debug_id, t, 4686 t->from ? t->from->proc->pid : 0, 4687 t->from ? t->from->pid : 0, 4688 to_proc ? to_proc->pid : 0, 4689 t->to_thread ? t->to_thread->pid : 0, 4690 t->code, t->flags, t->priority, t->need_reply); 4691 spin_unlock(&t->lock); 4692 4693 if (proc != to_proc) { 4694 /* 4695 * Can only safely deref buffer if we are holding the 4696 * correct proc inner lock for this node 4697 */ 4698 seq_puts(m, "\n"); 4699 return; 4700 } 4701 4702 if (buffer == NULL) { 4703 seq_puts(m, " buffer free\n"); 4704 return; 4705 } 4706 if (buffer->target_node) 4707 seq_printf(m, " node %d", buffer->target_node->debug_id); 4708 seq_printf(m, " size %zd:%zd data %p\n", 4709 buffer->data_size, buffer->offsets_size, 4710 buffer->data); 4711 } 4712 4713 static void print_binder_work_ilocked(struct seq_file *m, 4714 struct binder_proc *proc, 4715 const char *prefix, 4716 const char *transaction_prefix, 4717 struct binder_work *w) 4718 { 4719 struct binder_node *node; 4720 struct binder_transaction *t; 4721 4722 switch (w->type) { 4723 case BINDER_WORK_TRANSACTION: 4724 t = container_of(w, struct binder_transaction, work); 4725 print_binder_transaction_ilocked( 4726 m, proc, transaction_prefix, t); 4727 break; 4728 case BINDER_WORK_RETURN_ERROR: { 4729 struct binder_error *e = container_of( 4730 w, struct binder_error, work); 4731 4732 seq_printf(m, "%stransaction error: %u\n", 4733 prefix, e->cmd); 4734 } break; 4735 case BINDER_WORK_TRANSACTION_COMPLETE: 4736 seq_printf(m, "%stransaction complete\n", prefix); 4737 break; 4738 case BINDER_WORK_NODE: 4739 node = container_of(w, struct binder_node, work); 4740 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 4741 prefix, node->debug_id, 4742 (u64)node->ptr, (u64)node->cookie); 4743 break; 4744 case BINDER_WORK_DEAD_BINDER: 4745 seq_printf(m, "%shas dead binder\n", prefix); 4746 break; 4747 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4748 seq_printf(m, "%shas cleared dead binder\n", prefix); 4749 break; 4750 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 4751 seq_printf(m, "%shas cleared death notification\n", prefix); 4752 break; 4753 default: 4754 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 4755 break; 4756 } 4757 } 4758 4759 static void print_binder_thread_ilocked(struct seq_file *m, 4760 struct binder_thread *thread, 4761 int print_always) 4762 { 4763 struct binder_transaction *t; 4764 struct binder_work *w; 4765 size_t start_pos = m->count; 4766 size_t header_pos; 4767 4768 WARN_ON(!spin_is_locked(&thread->proc->inner_lock)); 4769 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 4770 thread->pid, thread->looper, 4771 thread->looper_need_return, 4772 atomic_read(&thread->tmp_ref)); 4773 header_pos = m->count; 4774 t = thread->transaction_stack; 4775 while (t) { 4776 if (t->from == thread) { 4777 print_binder_transaction_ilocked(m, thread->proc, 4778 " outgoing transaction", t); 4779 t = t->from_parent; 4780 } else if (t->to_thread == thread) { 4781 print_binder_transaction_ilocked(m, thread->proc, 4782 " incoming transaction", t); 4783 t = t->to_parent; 4784 } else { 4785 print_binder_transaction_ilocked(m, thread->proc, 4786 " bad transaction", t); 4787 t = NULL; 4788 } 4789 } 4790 list_for_each_entry(w, &thread->todo, entry) { 4791 print_binder_work_ilocked(m, thread->proc, " ", 4792 " pending transaction", w); 4793 } 4794 if (!print_always && m->count == header_pos) 4795 m->count = start_pos; 4796 } 4797 4798 static void print_binder_node_nilocked(struct seq_file *m, 4799 struct binder_node *node) 4800 { 4801 struct binder_ref *ref; 4802 struct binder_work *w; 4803 int count; 4804 4805 WARN_ON(!spin_is_locked(&node->lock)); 4806 if (node->proc) 4807 WARN_ON(!spin_is_locked(&node->proc->inner_lock)); 4808 4809 count = 0; 4810 hlist_for_each_entry(ref, &node->refs, node_entry) 4811 count++; 4812 4813 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 4814 node->debug_id, (u64)node->ptr, (u64)node->cookie, 4815 node->has_strong_ref, node->has_weak_ref, 4816 node->local_strong_refs, node->local_weak_refs, 4817 node->internal_strong_refs, count, node->tmp_refs); 4818 if (count) { 4819 seq_puts(m, " proc"); 4820 hlist_for_each_entry(ref, &node->refs, node_entry) 4821 seq_printf(m, " %d", ref->proc->pid); 4822 } 4823 seq_puts(m, "\n"); 4824 if (node->proc) { 4825 list_for_each_entry(w, &node->async_todo, entry) 4826 print_binder_work_ilocked(m, node->proc, " ", 4827 " pending async transaction", w); 4828 } 4829 } 4830 4831 static void print_binder_ref_olocked(struct seq_file *m, 4832 struct binder_ref *ref) 4833 { 4834 WARN_ON(!spin_is_locked(&ref->proc->outer_lock)); 4835 binder_node_lock(ref->node); 4836 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 4837 ref->data.debug_id, ref->data.desc, 4838 ref->node->proc ? "" : "dead ", 4839 ref->node->debug_id, ref->data.strong, 4840 ref->data.weak, ref->death); 4841 binder_node_unlock(ref->node); 4842 } 4843 4844 static void print_binder_proc(struct seq_file *m, 4845 struct binder_proc *proc, int print_all) 4846 { 4847 struct binder_work *w; 4848 struct rb_node *n; 4849 size_t start_pos = m->count; 4850 size_t header_pos; 4851 struct binder_node *last_node = NULL; 4852 4853 seq_printf(m, "proc %d\n", proc->pid); 4854 seq_printf(m, "context %s\n", proc->context->name); 4855 header_pos = m->count; 4856 4857 binder_inner_proc_lock(proc); 4858 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 4859 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 4860 rb_node), print_all); 4861 4862 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4863 struct binder_node *node = rb_entry(n, struct binder_node, 4864 rb_node); 4865 /* 4866 * take a temporary reference on the node so it 4867 * survives and isn't removed from the tree 4868 * while we print it. 4869 */ 4870 binder_inc_node_tmpref_ilocked(node); 4871 /* Need to drop inner lock to take node lock */ 4872 binder_inner_proc_unlock(proc); 4873 if (last_node) 4874 binder_put_node(last_node); 4875 binder_node_inner_lock(node); 4876 print_binder_node_nilocked(m, node); 4877 binder_node_inner_unlock(node); 4878 last_node = node; 4879 binder_inner_proc_lock(proc); 4880 } 4881 binder_inner_proc_unlock(proc); 4882 if (last_node) 4883 binder_put_node(last_node); 4884 4885 if (print_all) { 4886 binder_proc_lock(proc); 4887 for (n = rb_first(&proc->refs_by_desc); 4888 n != NULL; 4889 n = rb_next(n)) 4890 print_binder_ref_olocked(m, rb_entry(n, 4891 struct binder_ref, 4892 rb_node_desc)); 4893 binder_proc_unlock(proc); 4894 } 4895 binder_alloc_print_allocated(m, &proc->alloc); 4896 binder_inner_proc_lock(proc); 4897 list_for_each_entry(w, &proc->todo, entry) 4898 print_binder_work_ilocked(m, proc, " ", 4899 " pending transaction", w); 4900 list_for_each_entry(w, &proc->delivered_death, entry) { 4901 seq_puts(m, " has delivered dead binder\n"); 4902 break; 4903 } 4904 binder_inner_proc_unlock(proc); 4905 if (!print_all && m->count == header_pos) 4906 m->count = start_pos; 4907 } 4908 4909 static const char * const binder_return_strings[] = { 4910 "BR_ERROR", 4911 "BR_OK", 4912 "BR_TRANSACTION", 4913 "BR_REPLY", 4914 "BR_ACQUIRE_RESULT", 4915 "BR_DEAD_REPLY", 4916 "BR_TRANSACTION_COMPLETE", 4917 "BR_INCREFS", 4918 "BR_ACQUIRE", 4919 "BR_RELEASE", 4920 "BR_DECREFS", 4921 "BR_ATTEMPT_ACQUIRE", 4922 "BR_NOOP", 4923 "BR_SPAWN_LOOPER", 4924 "BR_FINISHED", 4925 "BR_DEAD_BINDER", 4926 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4927 "BR_FAILED_REPLY" 4928 }; 4929 4930 static const char * const binder_command_strings[] = { 4931 "BC_TRANSACTION", 4932 "BC_REPLY", 4933 "BC_ACQUIRE_RESULT", 4934 "BC_FREE_BUFFER", 4935 "BC_INCREFS", 4936 "BC_ACQUIRE", 4937 "BC_RELEASE", 4938 "BC_DECREFS", 4939 "BC_INCREFS_DONE", 4940 "BC_ACQUIRE_DONE", 4941 "BC_ATTEMPT_ACQUIRE", 4942 "BC_REGISTER_LOOPER", 4943 "BC_ENTER_LOOPER", 4944 "BC_EXIT_LOOPER", 4945 "BC_REQUEST_DEATH_NOTIFICATION", 4946 "BC_CLEAR_DEATH_NOTIFICATION", 4947 "BC_DEAD_BINDER_DONE", 4948 "BC_TRANSACTION_SG", 4949 "BC_REPLY_SG", 4950 }; 4951 4952 static const char * const binder_objstat_strings[] = { 4953 "proc", 4954 "thread", 4955 "node", 4956 "ref", 4957 "death", 4958 "transaction", 4959 "transaction_complete" 4960 }; 4961 4962 static void print_binder_stats(struct seq_file *m, const char *prefix, 4963 struct binder_stats *stats) 4964 { 4965 int i; 4966 4967 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 4968 ARRAY_SIZE(binder_command_strings)); 4969 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 4970 int temp = atomic_read(&stats->bc[i]); 4971 4972 if (temp) 4973 seq_printf(m, "%s%s: %d\n", prefix, 4974 binder_command_strings[i], temp); 4975 } 4976 4977 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 4978 ARRAY_SIZE(binder_return_strings)); 4979 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 4980 int temp = atomic_read(&stats->br[i]); 4981 4982 if (temp) 4983 seq_printf(m, "%s%s: %d\n", prefix, 4984 binder_return_strings[i], temp); 4985 } 4986 4987 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 4988 ARRAY_SIZE(binder_objstat_strings)); 4989 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 4990 ARRAY_SIZE(stats->obj_deleted)); 4991 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 4992 int created = atomic_read(&stats->obj_created[i]); 4993 int deleted = atomic_read(&stats->obj_deleted[i]); 4994 4995 if (created || deleted) 4996 seq_printf(m, "%s%s: active %d total %d\n", 4997 prefix, 4998 binder_objstat_strings[i], 4999 created - deleted, 5000 created); 5001 } 5002 } 5003 5004 static void print_binder_proc_stats(struct seq_file *m, 5005 struct binder_proc *proc) 5006 { 5007 struct binder_work *w; 5008 struct rb_node *n; 5009 int count, strong, weak; 5010 size_t free_async_space = 5011 binder_alloc_get_free_async_space(&proc->alloc); 5012 5013 seq_printf(m, "proc %d\n", proc->pid); 5014 seq_printf(m, "context %s\n", proc->context->name); 5015 count = 0; 5016 binder_inner_proc_lock(proc); 5017 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5018 count++; 5019 seq_printf(m, " threads: %d\n", count); 5020 seq_printf(m, " requested threads: %d+%d/%d\n" 5021 " ready threads %d\n" 5022 " free async space %zd\n", proc->requested_threads, 5023 proc->requested_threads_started, proc->max_threads, 5024 proc->ready_threads, 5025 free_async_space); 5026 count = 0; 5027 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5028 count++; 5029 binder_inner_proc_unlock(proc); 5030 seq_printf(m, " nodes: %d\n", count); 5031 count = 0; 5032 strong = 0; 5033 weak = 0; 5034 binder_proc_lock(proc); 5035 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5036 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5037 rb_node_desc); 5038 count++; 5039 strong += ref->data.strong; 5040 weak += ref->data.weak; 5041 } 5042 binder_proc_unlock(proc); 5043 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5044 5045 count = binder_alloc_get_allocated_count(&proc->alloc); 5046 seq_printf(m, " buffers: %d\n", count); 5047 5048 count = 0; 5049 binder_inner_proc_lock(proc); 5050 list_for_each_entry(w, &proc->todo, entry) { 5051 if (w->type == BINDER_WORK_TRANSACTION) 5052 count++; 5053 } 5054 binder_inner_proc_unlock(proc); 5055 seq_printf(m, " pending transactions: %d\n", count); 5056 5057 print_binder_stats(m, " ", &proc->stats); 5058 } 5059 5060 5061 static int binder_state_show(struct seq_file *m, void *unused) 5062 { 5063 struct binder_proc *proc; 5064 struct binder_node *node; 5065 struct binder_node *last_node = NULL; 5066 5067 seq_puts(m, "binder state:\n"); 5068 5069 spin_lock(&binder_dead_nodes_lock); 5070 if (!hlist_empty(&binder_dead_nodes)) 5071 seq_puts(m, "dead nodes:\n"); 5072 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5073 /* 5074 * take a temporary reference on the node so it 5075 * survives and isn't removed from the list 5076 * while we print it. 5077 */ 5078 node->tmp_refs++; 5079 spin_unlock(&binder_dead_nodes_lock); 5080 if (last_node) 5081 binder_put_node(last_node); 5082 binder_node_lock(node); 5083 print_binder_node_nilocked(m, node); 5084 binder_node_unlock(node); 5085 last_node = node; 5086 spin_lock(&binder_dead_nodes_lock); 5087 } 5088 spin_unlock(&binder_dead_nodes_lock); 5089 if (last_node) 5090 binder_put_node(last_node); 5091 5092 mutex_lock(&binder_procs_lock); 5093 hlist_for_each_entry(proc, &binder_procs, proc_node) 5094 print_binder_proc(m, proc, 1); 5095 mutex_unlock(&binder_procs_lock); 5096 5097 return 0; 5098 } 5099 5100 static int binder_stats_show(struct seq_file *m, void *unused) 5101 { 5102 struct binder_proc *proc; 5103 5104 seq_puts(m, "binder stats:\n"); 5105 5106 print_binder_stats(m, "", &binder_stats); 5107 5108 mutex_lock(&binder_procs_lock); 5109 hlist_for_each_entry(proc, &binder_procs, proc_node) 5110 print_binder_proc_stats(m, proc); 5111 mutex_unlock(&binder_procs_lock); 5112 5113 return 0; 5114 } 5115 5116 static int binder_transactions_show(struct seq_file *m, void *unused) 5117 { 5118 struct binder_proc *proc; 5119 5120 seq_puts(m, "binder transactions:\n"); 5121 mutex_lock(&binder_procs_lock); 5122 hlist_for_each_entry(proc, &binder_procs, proc_node) 5123 print_binder_proc(m, proc, 0); 5124 mutex_unlock(&binder_procs_lock); 5125 5126 return 0; 5127 } 5128 5129 static int binder_proc_show(struct seq_file *m, void *unused) 5130 { 5131 struct binder_proc *itr; 5132 int pid = (unsigned long)m->private; 5133 5134 mutex_lock(&binder_procs_lock); 5135 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5136 if (itr->pid == pid) { 5137 seq_puts(m, "binder proc state:\n"); 5138 print_binder_proc(m, itr, 1); 5139 } 5140 } 5141 mutex_unlock(&binder_procs_lock); 5142 5143 return 0; 5144 } 5145 5146 static void print_binder_transaction_log_entry(struct seq_file *m, 5147 struct binder_transaction_log_entry *e) 5148 { 5149 int debug_id = READ_ONCE(e->debug_id_done); 5150 /* 5151 * read barrier to guarantee debug_id_done read before 5152 * we print the log values 5153 */ 5154 smp_rmb(); 5155 seq_printf(m, 5156 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5157 e->debug_id, (e->call_type == 2) ? "reply" : 5158 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5159 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5160 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5161 e->return_error, e->return_error_param, 5162 e->return_error_line); 5163 /* 5164 * read-barrier to guarantee read of debug_id_done after 5165 * done printing the fields of the entry 5166 */ 5167 smp_rmb(); 5168 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5169 "\n" : " (incomplete)\n"); 5170 } 5171 5172 static int binder_transaction_log_show(struct seq_file *m, void *unused) 5173 { 5174 struct binder_transaction_log *log = m->private; 5175 unsigned int log_cur = atomic_read(&log->cur); 5176 unsigned int count; 5177 unsigned int cur; 5178 int i; 5179 5180 count = log_cur + 1; 5181 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5182 0 : count % ARRAY_SIZE(log->entry); 5183 if (count > ARRAY_SIZE(log->entry) || log->full) 5184 count = ARRAY_SIZE(log->entry); 5185 for (i = 0; i < count; i++) { 5186 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5187 5188 print_binder_transaction_log_entry(m, &log->entry[index]); 5189 } 5190 return 0; 5191 } 5192 5193 static const struct file_operations binder_fops = { 5194 .owner = THIS_MODULE, 5195 .poll = binder_poll, 5196 .unlocked_ioctl = binder_ioctl, 5197 .compat_ioctl = binder_ioctl, 5198 .mmap = binder_mmap, 5199 .open = binder_open, 5200 .flush = binder_flush, 5201 .release = binder_release, 5202 }; 5203 5204 BINDER_DEBUG_ENTRY(state); 5205 BINDER_DEBUG_ENTRY(stats); 5206 BINDER_DEBUG_ENTRY(transactions); 5207 BINDER_DEBUG_ENTRY(transaction_log); 5208 5209 static int __init init_binder_device(const char *name) 5210 { 5211 int ret; 5212 struct binder_device *binder_device; 5213 5214 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 5215 if (!binder_device) 5216 return -ENOMEM; 5217 5218 binder_device->miscdev.fops = &binder_fops; 5219 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 5220 binder_device->miscdev.name = name; 5221 5222 binder_device->context.binder_context_mgr_uid = INVALID_UID; 5223 binder_device->context.name = name; 5224 mutex_init(&binder_device->context.context_mgr_node_lock); 5225 5226 ret = misc_register(&binder_device->miscdev); 5227 if (ret < 0) { 5228 kfree(binder_device); 5229 return ret; 5230 } 5231 5232 hlist_add_head(&binder_device->hlist, &binder_devices); 5233 5234 return ret; 5235 } 5236 5237 static int __init binder_init(void) 5238 { 5239 int ret; 5240 char *device_name, *device_names; 5241 struct binder_device *device; 5242 struct hlist_node *tmp; 5243 5244 atomic_set(&binder_transaction_log.cur, ~0U); 5245 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5246 5247 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 5248 if (binder_debugfs_dir_entry_root) 5249 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 5250 binder_debugfs_dir_entry_root); 5251 5252 if (binder_debugfs_dir_entry_root) { 5253 debugfs_create_file("state", 5254 S_IRUGO, 5255 binder_debugfs_dir_entry_root, 5256 NULL, 5257 &binder_state_fops); 5258 debugfs_create_file("stats", 5259 S_IRUGO, 5260 binder_debugfs_dir_entry_root, 5261 NULL, 5262 &binder_stats_fops); 5263 debugfs_create_file("transactions", 5264 S_IRUGO, 5265 binder_debugfs_dir_entry_root, 5266 NULL, 5267 &binder_transactions_fops); 5268 debugfs_create_file("transaction_log", 5269 S_IRUGO, 5270 binder_debugfs_dir_entry_root, 5271 &binder_transaction_log, 5272 &binder_transaction_log_fops); 5273 debugfs_create_file("failed_transaction_log", 5274 S_IRUGO, 5275 binder_debugfs_dir_entry_root, 5276 &binder_transaction_log_failed, 5277 &binder_transaction_log_fops); 5278 } 5279 5280 /* 5281 * Copy the module_parameter string, because we don't want to 5282 * tokenize it in-place. 5283 */ 5284 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 5285 if (!device_names) { 5286 ret = -ENOMEM; 5287 goto err_alloc_device_names_failed; 5288 } 5289 strcpy(device_names, binder_devices_param); 5290 5291 while ((device_name = strsep(&device_names, ","))) { 5292 ret = init_binder_device(device_name); 5293 if (ret) 5294 goto err_init_binder_device_failed; 5295 } 5296 5297 return ret; 5298 5299 err_init_binder_device_failed: 5300 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 5301 misc_deregister(&device->miscdev); 5302 hlist_del(&device->hlist); 5303 kfree(device); 5304 } 5305 err_alloc_device_names_failed: 5306 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 5307 5308 return ret; 5309 } 5310 5311 device_initcall(binder_init); 5312 5313 #define CREATE_TRACE_POINTS 5314 #include "binder_trace.h" 5315 5316 MODULE_LICENSE("GPL v2"); 5317