1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->waiting_threads, proc->nodes) 32 * and all todo lists associated with the binder_proc 33 * (proc->todo, thread->todo, proc->delivered_death and 34 * node->async_todo), as well as thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <linux/fdtable.h> 55 #include <linux/file.h> 56 #include <linux/freezer.h> 57 #include <linux/fs.h> 58 #include <linux/list.h> 59 #include <linux/miscdevice.h> 60 #include <linux/module.h> 61 #include <linux/mutex.h> 62 #include <linux/nsproxy.h> 63 #include <linux/poll.h> 64 #include <linux/debugfs.h> 65 #include <linux/rbtree.h> 66 #include <linux/sched/signal.h> 67 #include <linux/sched/mm.h> 68 #include <linux/seq_file.h> 69 #include <linux/uaccess.h> 70 #include <linux/pid_namespace.h> 71 #include <linux/security.h> 72 #include <linux/spinlock.h> 73 #include <linux/ratelimit.h> 74 #include <linux/syscalls.h> 75 76 #include <uapi/linux/android/binder.h> 77 78 #include <asm/cacheflush.h> 79 80 #include "binder_alloc.h" 81 #include "binder_trace.h" 82 83 static HLIST_HEAD(binder_deferred_list); 84 static DEFINE_MUTEX(binder_deferred_lock); 85 86 static HLIST_HEAD(binder_devices); 87 static HLIST_HEAD(binder_procs); 88 static DEFINE_MUTEX(binder_procs_lock); 89 90 static HLIST_HEAD(binder_dead_nodes); 91 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 92 93 static struct dentry *binder_debugfs_dir_entry_root; 94 static struct dentry *binder_debugfs_dir_entry_proc; 95 static atomic_t binder_last_id; 96 97 #define BINDER_DEBUG_ENTRY(name) \ 98 static int binder_##name##_open(struct inode *inode, struct file *file) \ 99 { \ 100 return single_open(file, binder_##name##_show, inode->i_private); \ 101 } \ 102 \ 103 static const struct file_operations binder_##name##_fops = { \ 104 .owner = THIS_MODULE, \ 105 .open = binder_##name##_open, \ 106 .read = seq_read, \ 107 .llseek = seq_lseek, \ 108 .release = single_release, \ 109 } 110 111 static int binder_proc_show(struct seq_file *m, void *unused); 112 BINDER_DEBUG_ENTRY(proc); 113 114 /* This is only defined in include/asm-arm/sizes.h */ 115 #ifndef SZ_1K 116 #define SZ_1K 0x400 117 #endif 118 119 #ifndef SZ_4M 120 #define SZ_4M 0x400000 121 #endif 122 123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 124 125 enum { 126 BINDER_DEBUG_USER_ERROR = 1U << 0, 127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 130 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 132 BINDER_DEBUG_READ_WRITE = 1U << 6, 133 BINDER_DEBUG_USER_REFS = 1U << 7, 134 BINDER_DEBUG_THREADS = 1U << 8, 135 BINDER_DEBUG_TRANSACTION = 1U << 9, 136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 137 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 140 BINDER_DEBUG_SPINLOCKS = 1U << 14, 141 }; 142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 144 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 145 146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 147 module_param_named(devices, binder_devices_param, charp, 0444); 148 149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 150 static int binder_stop_on_user_error; 151 152 static int binder_set_stop_on_user_error(const char *val, 153 const struct kernel_param *kp) 154 { 155 int ret; 156 157 ret = param_set_int(val, kp); 158 if (binder_stop_on_user_error < 2) 159 wake_up(&binder_user_error_wait); 160 return ret; 161 } 162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 163 param_get_int, &binder_stop_on_user_error, 0644); 164 165 #define binder_debug(mask, x...) \ 166 do { \ 167 if (binder_debug_mask & mask) \ 168 pr_info_ratelimited(x); \ 169 } while (0) 170 171 #define binder_user_error(x...) \ 172 do { \ 173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 174 pr_info_ratelimited(x); \ 175 if (binder_stop_on_user_error) \ 176 binder_stop_on_user_error = 2; \ 177 } while (0) 178 179 #define to_flat_binder_object(hdr) \ 180 container_of(hdr, struct flat_binder_object, hdr) 181 182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 183 184 #define to_binder_buffer_object(hdr) \ 185 container_of(hdr, struct binder_buffer_object, hdr) 186 187 #define to_binder_fd_array_object(hdr) \ 188 container_of(hdr, struct binder_fd_array_object, hdr) 189 190 enum binder_stat_types { 191 BINDER_STAT_PROC, 192 BINDER_STAT_THREAD, 193 BINDER_STAT_NODE, 194 BINDER_STAT_REF, 195 BINDER_STAT_DEATH, 196 BINDER_STAT_TRANSACTION, 197 BINDER_STAT_TRANSACTION_COMPLETE, 198 BINDER_STAT_COUNT 199 }; 200 201 struct binder_stats { 202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 204 atomic_t obj_created[BINDER_STAT_COUNT]; 205 atomic_t obj_deleted[BINDER_STAT_COUNT]; 206 }; 207 208 static struct binder_stats binder_stats; 209 210 static inline void binder_stats_deleted(enum binder_stat_types type) 211 { 212 atomic_inc(&binder_stats.obj_deleted[type]); 213 } 214 215 static inline void binder_stats_created(enum binder_stat_types type) 216 { 217 atomic_inc(&binder_stats.obj_created[type]); 218 } 219 220 struct binder_transaction_log_entry { 221 int debug_id; 222 int debug_id_done; 223 int call_type; 224 int from_proc; 225 int from_thread; 226 int target_handle; 227 int to_proc; 228 int to_thread; 229 int to_node; 230 int data_size; 231 int offsets_size; 232 int return_error_line; 233 uint32_t return_error; 234 uint32_t return_error_param; 235 const char *context_name; 236 }; 237 struct binder_transaction_log { 238 atomic_t cur; 239 bool full; 240 struct binder_transaction_log_entry entry[32]; 241 }; 242 static struct binder_transaction_log binder_transaction_log; 243 static struct binder_transaction_log binder_transaction_log_failed; 244 245 static struct binder_transaction_log_entry *binder_transaction_log_add( 246 struct binder_transaction_log *log) 247 { 248 struct binder_transaction_log_entry *e; 249 unsigned int cur = atomic_inc_return(&log->cur); 250 251 if (cur >= ARRAY_SIZE(log->entry)) 252 log->full = true; 253 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 254 WRITE_ONCE(e->debug_id_done, 0); 255 /* 256 * write-barrier to synchronize access to e->debug_id_done. 257 * We make sure the initialized 0 value is seen before 258 * memset() other fields are zeroed by memset. 259 */ 260 smp_wmb(); 261 memset(e, 0, sizeof(*e)); 262 return e; 263 } 264 265 struct binder_context { 266 struct binder_node *binder_context_mgr_node; 267 struct mutex context_mgr_node_lock; 268 269 kuid_t binder_context_mgr_uid; 270 const char *name; 271 }; 272 273 struct binder_device { 274 struct hlist_node hlist; 275 struct miscdevice miscdev; 276 struct binder_context context; 277 }; 278 279 /** 280 * struct binder_work - work enqueued on a worklist 281 * @entry: node enqueued on list 282 * @type: type of work to be performed 283 * 284 * There are separate work lists for proc, thread, and node (async). 285 */ 286 struct binder_work { 287 struct list_head entry; 288 289 enum { 290 BINDER_WORK_TRANSACTION = 1, 291 BINDER_WORK_TRANSACTION_COMPLETE, 292 BINDER_WORK_RETURN_ERROR, 293 BINDER_WORK_NODE, 294 BINDER_WORK_DEAD_BINDER, 295 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 297 } type; 298 }; 299 300 struct binder_error { 301 struct binder_work work; 302 uint32_t cmd; 303 }; 304 305 /** 306 * struct binder_node - binder node bookkeeping 307 * @debug_id: unique ID for debugging 308 * (invariant after initialized) 309 * @lock: lock for node fields 310 * @work: worklist element for node work 311 * (protected by @proc->inner_lock) 312 * @rb_node: element for proc->nodes tree 313 * (protected by @proc->inner_lock) 314 * @dead_node: element for binder_dead_nodes list 315 * (protected by binder_dead_nodes_lock) 316 * @proc: binder_proc that owns this node 317 * (invariant after initialized) 318 * @refs: list of references on this node 319 * (protected by @lock) 320 * @internal_strong_refs: used to take strong references when 321 * initiating a transaction 322 * (protected by @proc->inner_lock if @proc 323 * and by @lock) 324 * @local_weak_refs: weak user refs from local process 325 * (protected by @proc->inner_lock if @proc 326 * and by @lock) 327 * @local_strong_refs: strong user refs from local process 328 * (protected by @proc->inner_lock if @proc 329 * and by @lock) 330 * @tmp_refs: temporary kernel refs 331 * (protected by @proc->inner_lock while @proc 332 * is valid, and by binder_dead_nodes_lock 333 * if @proc is NULL. During inc/dec and node release 334 * it is also protected by @lock to provide safety 335 * as the node dies and @proc becomes NULL) 336 * @ptr: userspace pointer for node 337 * (invariant, no lock needed) 338 * @cookie: userspace cookie for node 339 * (invariant, no lock needed) 340 * @has_strong_ref: userspace notified of strong ref 341 * (protected by @proc->inner_lock if @proc 342 * and by @lock) 343 * @pending_strong_ref: userspace has acked notification of strong ref 344 * (protected by @proc->inner_lock if @proc 345 * and by @lock) 346 * @has_weak_ref: userspace notified of weak ref 347 * (protected by @proc->inner_lock if @proc 348 * and by @lock) 349 * @pending_weak_ref: userspace has acked notification of weak ref 350 * (protected by @proc->inner_lock if @proc 351 * and by @lock) 352 * @has_async_transaction: async transaction to node in progress 353 * (protected by @lock) 354 * @accept_fds: file descriptor operations supported for node 355 * (invariant after initialized) 356 * @min_priority: minimum scheduling priority 357 * (invariant after initialized) 358 * @async_todo: list of async work items 359 * (protected by @proc->inner_lock) 360 * 361 * Bookkeeping structure for binder nodes. 362 */ 363 struct binder_node { 364 int debug_id; 365 spinlock_t lock; 366 struct binder_work work; 367 union { 368 struct rb_node rb_node; 369 struct hlist_node dead_node; 370 }; 371 struct binder_proc *proc; 372 struct hlist_head refs; 373 int internal_strong_refs; 374 int local_weak_refs; 375 int local_strong_refs; 376 int tmp_refs; 377 binder_uintptr_t ptr; 378 binder_uintptr_t cookie; 379 struct { 380 /* 381 * bitfield elements protected by 382 * proc inner_lock 383 */ 384 u8 has_strong_ref:1; 385 u8 pending_strong_ref:1; 386 u8 has_weak_ref:1; 387 u8 pending_weak_ref:1; 388 }; 389 struct { 390 /* 391 * invariant after initialization 392 */ 393 u8 accept_fds:1; 394 u8 min_priority; 395 }; 396 bool has_async_transaction; 397 struct list_head async_todo; 398 }; 399 400 struct binder_ref_death { 401 /** 402 * @work: worklist element for death notifications 403 * (protected by inner_lock of the proc that 404 * this ref belongs to) 405 */ 406 struct binder_work work; 407 binder_uintptr_t cookie; 408 }; 409 410 /** 411 * struct binder_ref_data - binder_ref counts and id 412 * @debug_id: unique ID for the ref 413 * @desc: unique userspace handle for ref 414 * @strong: strong ref count (debugging only if not locked) 415 * @weak: weak ref count (debugging only if not locked) 416 * 417 * Structure to hold ref count and ref id information. Since 418 * the actual ref can only be accessed with a lock, this structure 419 * is used to return information about the ref to callers of 420 * ref inc/dec functions. 421 */ 422 struct binder_ref_data { 423 int debug_id; 424 uint32_t desc; 425 int strong; 426 int weak; 427 }; 428 429 /** 430 * struct binder_ref - struct to track references on nodes 431 * @data: binder_ref_data containing id, handle, and current refcounts 432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 433 * @rb_node_node: node for lookup by @node in proc's rb_tree 434 * @node_entry: list entry for node->refs list in target node 435 * (protected by @node->lock) 436 * @proc: binder_proc containing ref 437 * @node: binder_node of target node. When cleaning up a 438 * ref for deletion in binder_cleanup_ref, a non-NULL 439 * @node indicates the node must be freed 440 * @death: pointer to death notification (ref_death) if requested 441 * (protected by @node->lock) 442 * 443 * Structure to track references from procA to target node (on procB). This 444 * structure is unsafe to access without holding @proc->outer_lock. 445 */ 446 struct binder_ref { 447 /* Lookups needed: */ 448 /* node + proc => ref (transaction) */ 449 /* desc + proc => ref (transaction, inc/dec ref) */ 450 /* node => refs + procs (proc exit) */ 451 struct binder_ref_data data; 452 struct rb_node rb_node_desc; 453 struct rb_node rb_node_node; 454 struct hlist_node node_entry; 455 struct binder_proc *proc; 456 struct binder_node *node; 457 struct binder_ref_death *death; 458 }; 459 460 enum binder_deferred_state { 461 BINDER_DEFERRED_FLUSH = 0x01, 462 BINDER_DEFERRED_RELEASE = 0x02, 463 }; 464 465 /** 466 * struct binder_proc - binder process bookkeeping 467 * @proc_node: element for binder_procs list 468 * @threads: rbtree of binder_threads in this proc 469 * (protected by @inner_lock) 470 * @nodes: rbtree of binder nodes associated with 471 * this proc ordered by node->ptr 472 * (protected by @inner_lock) 473 * @refs_by_desc: rbtree of refs ordered by ref->desc 474 * (protected by @outer_lock) 475 * @refs_by_node: rbtree of refs ordered by ref->node 476 * (protected by @outer_lock) 477 * @waiting_threads: threads currently waiting for proc work 478 * (protected by @inner_lock) 479 * @pid PID of group_leader of process 480 * (invariant after initialized) 481 * @tsk task_struct for group_leader of process 482 * (invariant after initialized) 483 * @deferred_work_node: element for binder_deferred_list 484 * (protected by binder_deferred_lock) 485 * @deferred_work: bitmap of deferred work to perform 486 * (protected by binder_deferred_lock) 487 * @is_dead: process is dead and awaiting free 488 * when outstanding transactions are cleaned up 489 * (protected by @inner_lock) 490 * @todo: list of work for this process 491 * (protected by @inner_lock) 492 * @stats: per-process binder statistics 493 * (atomics, no lock needed) 494 * @delivered_death: list of delivered death notification 495 * (protected by @inner_lock) 496 * @max_threads: cap on number of binder threads 497 * (protected by @inner_lock) 498 * @requested_threads: number of binder threads requested but not 499 * yet started. In current implementation, can 500 * only be 0 or 1. 501 * (protected by @inner_lock) 502 * @requested_threads_started: number binder threads started 503 * (protected by @inner_lock) 504 * @tmp_ref: temporary reference to indicate proc is in use 505 * (protected by @inner_lock) 506 * @default_priority: default scheduler priority 507 * (invariant after initialized) 508 * @debugfs_entry: debugfs node 509 * @alloc: binder allocator bookkeeping 510 * @context: binder_context for this proc 511 * (invariant after initialized) 512 * @inner_lock: can nest under outer_lock and/or node lock 513 * @outer_lock: no nesting under innor or node lock 514 * Lock order: 1) outer, 2) node, 3) inner 515 * 516 * Bookkeeping structure for binder processes 517 */ 518 struct binder_proc { 519 struct hlist_node proc_node; 520 struct rb_root threads; 521 struct rb_root nodes; 522 struct rb_root refs_by_desc; 523 struct rb_root refs_by_node; 524 struct list_head waiting_threads; 525 int pid; 526 struct task_struct *tsk; 527 struct hlist_node deferred_work_node; 528 int deferred_work; 529 bool is_dead; 530 531 struct list_head todo; 532 struct binder_stats stats; 533 struct list_head delivered_death; 534 int max_threads; 535 int requested_threads; 536 int requested_threads_started; 537 int tmp_ref; 538 long default_priority; 539 struct dentry *debugfs_entry; 540 struct binder_alloc alloc; 541 struct binder_context *context; 542 spinlock_t inner_lock; 543 spinlock_t outer_lock; 544 }; 545 546 enum { 547 BINDER_LOOPER_STATE_REGISTERED = 0x01, 548 BINDER_LOOPER_STATE_ENTERED = 0x02, 549 BINDER_LOOPER_STATE_EXITED = 0x04, 550 BINDER_LOOPER_STATE_INVALID = 0x08, 551 BINDER_LOOPER_STATE_WAITING = 0x10, 552 BINDER_LOOPER_STATE_POLL = 0x20, 553 }; 554 555 /** 556 * struct binder_thread - binder thread bookkeeping 557 * @proc: binder process for this thread 558 * (invariant after initialization) 559 * @rb_node: element for proc->threads rbtree 560 * (protected by @proc->inner_lock) 561 * @waiting_thread_node: element for @proc->waiting_threads list 562 * (protected by @proc->inner_lock) 563 * @pid: PID for this thread 564 * (invariant after initialization) 565 * @looper: bitmap of looping state 566 * (only accessed by this thread) 567 * @looper_needs_return: looping thread needs to exit driver 568 * (no lock needed) 569 * @transaction_stack: stack of in-progress transactions for this thread 570 * (protected by @proc->inner_lock) 571 * @todo: list of work to do for this thread 572 * (protected by @proc->inner_lock) 573 * @process_todo: whether work in @todo should be processed 574 * (protected by @proc->inner_lock) 575 * @return_error: transaction errors reported by this thread 576 * (only accessed by this thread) 577 * @reply_error: transaction errors reported by target thread 578 * (protected by @proc->inner_lock) 579 * @wait: wait queue for thread work 580 * @stats: per-thread statistics 581 * (atomics, no lock needed) 582 * @tmp_ref: temporary reference to indicate thread is in use 583 * (atomic since @proc->inner_lock cannot 584 * always be acquired) 585 * @is_dead: thread is dead and awaiting free 586 * when outstanding transactions are cleaned up 587 * (protected by @proc->inner_lock) 588 * 589 * Bookkeeping structure for binder threads. 590 */ 591 struct binder_thread { 592 struct binder_proc *proc; 593 struct rb_node rb_node; 594 struct list_head waiting_thread_node; 595 int pid; 596 int looper; /* only modified by this thread */ 597 bool looper_need_return; /* can be written by other thread */ 598 struct binder_transaction *transaction_stack; 599 struct list_head todo; 600 bool process_todo; 601 struct binder_error return_error; 602 struct binder_error reply_error; 603 wait_queue_head_t wait; 604 struct binder_stats stats; 605 atomic_t tmp_ref; 606 bool is_dead; 607 }; 608 609 /** 610 * struct binder_txn_fd_fixup - transaction fd fixup list element 611 * @fixup_entry: list entry 612 * @file: struct file to be associated with new fd 613 * @offset: offset in buffer data to this fixup 614 * 615 * List element for fd fixups in a transaction. Since file 616 * descriptors need to be allocated in the context of the 617 * target process, we pass each fd to be processed in this 618 * struct. 619 */ 620 struct binder_txn_fd_fixup { 621 struct list_head fixup_entry; 622 struct file *file; 623 size_t offset; 624 }; 625 626 struct binder_transaction { 627 int debug_id; 628 struct binder_work work; 629 struct binder_thread *from; 630 struct binder_transaction *from_parent; 631 struct binder_proc *to_proc; 632 struct binder_thread *to_thread; 633 struct binder_transaction *to_parent; 634 unsigned need_reply:1; 635 /* unsigned is_dead:1; */ /* not used at the moment */ 636 637 struct binder_buffer *buffer; 638 unsigned int code; 639 unsigned int flags; 640 long priority; 641 long saved_priority; 642 kuid_t sender_euid; 643 struct list_head fd_fixups; 644 /** 645 * @lock: protects @from, @to_proc, and @to_thread 646 * 647 * @from, @to_proc, and @to_thread can be set to NULL 648 * during thread teardown 649 */ 650 spinlock_t lock; 651 }; 652 653 /** 654 * binder_proc_lock() - Acquire outer lock for given binder_proc 655 * @proc: struct binder_proc to acquire 656 * 657 * Acquires proc->outer_lock. Used to protect binder_ref 658 * structures associated with the given proc. 659 */ 660 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 661 static void 662 _binder_proc_lock(struct binder_proc *proc, int line) 663 { 664 binder_debug(BINDER_DEBUG_SPINLOCKS, 665 "%s: line=%d\n", __func__, line); 666 spin_lock(&proc->outer_lock); 667 } 668 669 /** 670 * binder_proc_unlock() - Release spinlock for given binder_proc 671 * @proc: struct binder_proc to acquire 672 * 673 * Release lock acquired via binder_proc_lock() 674 */ 675 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 676 static void 677 _binder_proc_unlock(struct binder_proc *proc, int line) 678 { 679 binder_debug(BINDER_DEBUG_SPINLOCKS, 680 "%s: line=%d\n", __func__, line); 681 spin_unlock(&proc->outer_lock); 682 } 683 684 /** 685 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 686 * @proc: struct binder_proc to acquire 687 * 688 * Acquires proc->inner_lock. Used to protect todo lists 689 */ 690 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 691 static void 692 _binder_inner_proc_lock(struct binder_proc *proc, int line) 693 { 694 binder_debug(BINDER_DEBUG_SPINLOCKS, 695 "%s: line=%d\n", __func__, line); 696 spin_lock(&proc->inner_lock); 697 } 698 699 /** 700 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 701 * @proc: struct binder_proc to acquire 702 * 703 * Release lock acquired via binder_inner_proc_lock() 704 */ 705 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 706 static void 707 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 708 { 709 binder_debug(BINDER_DEBUG_SPINLOCKS, 710 "%s: line=%d\n", __func__, line); 711 spin_unlock(&proc->inner_lock); 712 } 713 714 /** 715 * binder_node_lock() - Acquire spinlock for given binder_node 716 * @node: struct binder_node to acquire 717 * 718 * Acquires node->lock. Used to protect binder_node fields 719 */ 720 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 721 static void 722 _binder_node_lock(struct binder_node *node, int line) 723 { 724 binder_debug(BINDER_DEBUG_SPINLOCKS, 725 "%s: line=%d\n", __func__, line); 726 spin_lock(&node->lock); 727 } 728 729 /** 730 * binder_node_unlock() - Release spinlock for given binder_proc 731 * @node: struct binder_node to acquire 732 * 733 * Release lock acquired via binder_node_lock() 734 */ 735 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 736 static void 737 _binder_node_unlock(struct binder_node *node, int line) 738 { 739 binder_debug(BINDER_DEBUG_SPINLOCKS, 740 "%s: line=%d\n", __func__, line); 741 spin_unlock(&node->lock); 742 } 743 744 /** 745 * binder_node_inner_lock() - Acquire node and inner locks 746 * @node: struct binder_node to acquire 747 * 748 * Acquires node->lock. If node->proc also acquires 749 * proc->inner_lock. Used to protect binder_node fields 750 */ 751 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 752 static void 753 _binder_node_inner_lock(struct binder_node *node, int line) 754 { 755 binder_debug(BINDER_DEBUG_SPINLOCKS, 756 "%s: line=%d\n", __func__, line); 757 spin_lock(&node->lock); 758 if (node->proc) 759 binder_inner_proc_lock(node->proc); 760 } 761 762 /** 763 * binder_node_unlock() - Release node and inner locks 764 * @node: struct binder_node to acquire 765 * 766 * Release lock acquired via binder_node_lock() 767 */ 768 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 769 static void 770 _binder_node_inner_unlock(struct binder_node *node, int line) 771 { 772 struct binder_proc *proc = node->proc; 773 774 binder_debug(BINDER_DEBUG_SPINLOCKS, 775 "%s: line=%d\n", __func__, line); 776 if (proc) 777 binder_inner_proc_unlock(proc); 778 spin_unlock(&node->lock); 779 } 780 781 static bool binder_worklist_empty_ilocked(struct list_head *list) 782 { 783 return list_empty(list); 784 } 785 786 /** 787 * binder_worklist_empty() - Check if no items on the work list 788 * @proc: binder_proc associated with list 789 * @list: list to check 790 * 791 * Return: true if there are no items on list, else false 792 */ 793 static bool binder_worklist_empty(struct binder_proc *proc, 794 struct list_head *list) 795 { 796 bool ret; 797 798 binder_inner_proc_lock(proc); 799 ret = binder_worklist_empty_ilocked(list); 800 binder_inner_proc_unlock(proc); 801 return ret; 802 } 803 804 /** 805 * binder_enqueue_work_ilocked() - Add an item to the work list 806 * @work: struct binder_work to add to list 807 * @target_list: list to add work to 808 * 809 * Adds the work to the specified list. Asserts that work 810 * is not already on a list. 811 * 812 * Requires the proc->inner_lock to be held. 813 */ 814 static void 815 binder_enqueue_work_ilocked(struct binder_work *work, 816 struct list_head *target_list) 817 { 818 BUG_ON(target_list == NULL); 819 BUG_ON(work->entry.next && !list_empty(&work->entry)); 820 list_add_tail(&work->entry, target_list); 821 } 822 823 /** 824 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 825 * @thread: thread to queue work to 826 * @work: struct binder_work to add to list 827 * 828 * Adds the work to the todo list of the thread. Doesn't set the process_todo 829 * flag, which means that (if it wasn't already set) the thread will go to 830 * sleep without handling this work when it calls read. 831 * 832 * Requires the proc->inner_lock to be held. 833 */ 834 static void 835 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 836 struct binder_work *work) 837 { 838 WARN_ON(!list_empty(&thread->waiting_thread_node)); 839 binder_enqueue_work_ilocked(work, &thread->todo); 840 } 841 842 /** 843 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 844 * @thread: thread to queue work to 845 * @work: struct binder_work to add to list 846 * 847 * Adds the work to the todo list of the thread, and enables processing 848 * of the todo queue. 849 * 850 * Requires the proc->inner_lock to be held. 851 */ 852 static void 853 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 854 struct binder_work *work) 855 { 856 WARN_ON(!list_empty(&thread->waiting_thread_node)); 857 binder_enqueue_work_ilocked(work, &thread->todo); 858 thread->process_todo = true; 859 } 860 861 /** 862 * binder_enqueue_thread_work() - Add an item to the thread work list 863 * @thread: thread to queue work to 864 * @work: struct binder_work to add to list 865 * 866 * Adds the work to the todo list of the thread, and enables processing 867 * of the todo queue. 868 */ 869 static void 870 binder_enqueue_thread_work(struct binder_thread *thread, 871 struct binder_work *work) 872 { 873 binder_inner_proc_lock(thread->proc); 874 binder_enqueue_thread_work_ilocked(thread, work); 875 binder_inner_proc_unlock(thread->proc); 876 } 877 878 static void 879 binder_dequeue_work_ilocked(struct binder_work *work) 880 { 881 list_del_init(&work->entry); 882 } 883 884 /** 885 * binder_dequeue_work() - Removes an item from the work list 886 * @proc: binder_proc associated with list 887 * @work: struct binder_work to remove from list 888 * 889 * Removes the specified work item from whatever list it is on. 890 * Can safely be called if work is not on any list. 891 */ 892 static void 893 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 894 { 895 binder_inner_proc_lock(proc); 896 binder_dequeue_work_ilocked(work); 897 binder_inner_proc_unlock(proc); 898 } 899 900 static struct binder_work *binder_dequeue_work_head_ilocked( 901 struct list_head *list) 902 { 903 struct binder_work *w; 904 905 w = list_first_entry_or_null(list, struct binder_work, entry); 906 if (w) 907 list_del_init(&w->entry); 908 return w; 909 } 910 911 /** 912 * binder_dequeue_work_head() - Dequeues the item at head of list 913 * @proc: binder_proc associated with list 914 * @list: list to dequeue head 915 * 916 * Removes the head of the list if there are items on the list 917 * 918 * Return: pointer dequeued binder_work, NULL if list was empty 919 */ 920 static struct binder_work *binder_dequeue_work_head( 921 struct binder_proc *proc, 922 struct list_head *list) 923 { 924 struct binder_work *w; 925 926 binder_inner_proc_lock(proc); 927 w = binder_dequeue_work_head_ilocked(list); 928 binder_inner_proc_unlock(proc); 929 return w; 930 } 931 932 static void 933 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 934 static void binder_free_thread(struct binder_thread *thread); 935 static void binder_free_proc(struct binder_proc *proc); 936 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 937 938 static bool binder_has_work_ilocked(struct binder_thread *thread, 939 bool do_proc_work) 940 { 941 return thread->process_todo || 942 thread->looper_need_return || 943 (do_proc_work && 944 !binder_worklist_empty_ilocked(&thread->proc->todo)); 945 } 946 947 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 948 { 949 bool has_work; 950 951 binder_inner_proc_lock(thread->proc); 952 has_work = binder_has_work_ilocked(thread, do_proc_work); 953 binder_inner_proc_unlock(thread->proc); 954 955 return has_work; 956 } 957 958 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 959 { 960 return !thread->transaction_stack && 961 binder_worklist_empty_ilocked(&thread->todo) && 962 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 963 BINDER_LOOPER_STATE_REGISTERED)); 964 } 965 966 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 967 bool sync) 968 { 969 struct rb_node *n; 970 struct binder_thread *thread; 971 972 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 973 thread = rb_entry(n, struct binder_thread, rb_node); 974 if (thread->looper & BINDER_LOOPER_STATE_POLL && 975 binder_available_for_proc_work_ilocked(thread)) { 976 if (sync) 977 wake_up_interruptible_sync(&thread->wait); 978 else 979 wake_up_interruptible(&thread->wait); 980 } 981 } 982 } 983 984 /** 985 * binder_select_thread_ilocked() - selects a thread for doing proc work. 986 * @proc: process to select a thread from 987 * 988 * Note that calling this function moves the thread off the waiting_threads 989 * list, so it can only be woken up by the caller of this function, or a 990 * signal. Therefore, callers *should* always wake up the thread this function 991 * returns. 992 * 993 * Return: If there's a thread currently waiting for process work, 994 * returns that thread. Otherwise returns NULL. 995 */ 996 static struct binder_thread * 997 binder_select_thread_ilocked(struct binder_proc *proc) 998 { 999 struct binder_thread *thread; 1000 1001 assert_spin_locked(&proc->inner_lock); 1002 thread = list_first_entry_or_null(&proc->waiting_threads, 1003 struct binder_thread, 1004 waiting_thread_node); 1005 1006 if (thread) 1007 list_del_init(&thread->waiting_thread_node); 1008 1009 return thread; 1010 } 1011 1012 /** 1013 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 1014 * @proc: process to wake up a thread in 1015 * @thread: specific thread to wake-up (may be NULL) 1016 * @sync: whether to do a synchronous wake-up 1017 * 1018 * This function wakes up a thread in the @proc process. 1019 * The caller may provide a specific thread to wake-up in 1020 * the @thread parameter. If @thread is NULL, this function 1021 * will wake up threads that have called poll(). 1022 * 1023 * Note that for this function to work as expected, callers 1024 * should first call binder_select_thread() to find a thread 1025 * to handle the work (if they don't have a thread already), 1026 * and pass the result into the @thread parameter. 1027 */ 1028 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1029 struct binder_thread *thread, 1030 bool sync) 1031 { 1032 assert_spin_locked(&proc->inner_lock); 1033 1034 if (thread) { 1035 if (sync) 1036 wake_up_interruptible_sync(&thread->wait); 1037 else 1038 wake_up_interruptible(&thread->wait); 1039 return; 1040 } 1041 1042 /* Didn't find a thread waiting for proc work; this can happen 1043 * in two scenarios: 1044 * 1. All threads are busy handling transactions 1045 * In that case, one of those threads should call back into 1046 * the kernel driver soon and pick up this work. 1047 * 2. Threads are using the (e)poll interface, in which case 1048 * they may be blocked on the waitqueue without having been 1049 * added to waiting_threads. For this case, we just iterate 1050 * over all threads not handling transaction work, and 1051 * wake them all up. We wake all because we don't know whether 1052 * a thread that called into (e)poll is handling non-binder 1053 * work currently. 1054 */ 1055 binder_wakeup_poll_threads_ilocked(proc, sync); 1056 } 1057 1058 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1059 { 1060 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1061 1062 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1063 } 1064 1065 static void binder_set_nice(long nice) 1066 { 1067 long min_nice; 1068 1069 if (can_nice(current, nice)) { 1070 set_user_nice(current, nice); 1071 return; 1072 } 1073 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1074 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1075 "%d: nice value %ld not allowed use %ld instead\n", 1076 current->pid, nice, min_nice); 1077 set_user_nice(current, min_nice); 1078 if (min_nice <= MAX_NICE) 1079 return; 1080 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1081 } 1082 1083 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1084 binder_uintptr_t ptr) 1085 { 1086 struct rb_node *n = proc->nodes.rb_node; 1087 struct binder_node *node; 1088 1089 assert_spin_locked(&proc->inner_lock); 1090 1091 while (n) { 1092 node = rb_entry(n, struct binder_node, rb_node); 1093 1094 if (ptr < node->ptr) 1095 n = n->rb_left; 1096 else if (ptr > node->ptr) 1097 n = n->rb_right; 1098 else { 1099 /* 1100 * take an implicit weak reference 1101 * to ensure node stays alive until 1102 * call to binder_put_node() 1103 */ 1104 binder_inc_node_tmpref_ilocked(node); 1105 return node; 1106 } 1107 } 1108 return NULL; 1109 } 1110 1111 static struct binder_node *binder_get_node(struct binder_proc *proc, 1112 binder_uintptr_t ptr) 1113 { 1114 struct binder_node *node; 1115 1116 binder_inner_proc_lock(proc); 1117 node = binder_get_node_ilocked(proc, ptr); 1118 binder_inner_proc_unlock(proc); 1119 return node; 1120 } 1121 1122 static struct binder_node *binder_init_node_ilocked( 1123 struct binder_proc *proc, 1124 struct binder_node *new_node, 1125 struct flat_binder_object *fp) 1126 { 1127 struct rb_node **p = &proc->nodes.rb_node; 1128 struct rb_node *parent = NULL; 1129 struct binder_node *node; 1130 binder_uintptr_t ptr = fp ? fp->binder : 0; 1131 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1132 __u32 flags = fp ? fp->flags : 0; 1133 1134 assert_spin_locked(&proc->inner_lock); 1135 1136 while (*p) { 1137 1138 parent = *p; 1139 node = rb_entry(parent, struct binder_node, rb_node); 1140 1141 if (ptr < node->ptr) 1142 p = &(*p)->rb_left; 1143 else if (ptr > node->ptr) 1144 p = &(*p)->rb_right; 1145 else { 1146 /* 1147 * A matching node is already in 1148 * the rb tree. Abandon the init 1149 * and return it. 1150 */ 1151 binder_inc_node_tmpref_ilocked(node); 1152 return node; 1153 } 1154 } 1155 node = new_node; 1156 binder_stats_created(BINDER_STAT_NODE); 1157 node->tmp_refs++; 1158 rb_link_node(&node->rb_node, parent, p); 1159 rb_insert_color(&node->rb_node, &proc->nodes); 1160 node->debug_id = atomic_inc_return(&binder_last_id); 1161 node->proc = proc; 1162 node->ptr = ptr; 1163 node->cookie = cookie; 1164 node->work.type = BINDER_WORK_NODE; 1165 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1166 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1167 spin_lock_init(&node->lock); 1168 INIT_LIST_HEAD(&node->work.entry); 1169 INIT_LIST_HEAD(&node->async_todo); 1170 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1171 "%d:%d node %d u%016llx c%016llx created\n", 1172 proc->pid, current->pid, node->debug_id, 1173 (u64)node->ptr, (u64)node->cookie); 1174 1175 return node; 1176 } 1177 1178 static struct binder_node *binder_new_node(struct binder_proc *proc, 1179 struct flat_binder_object *fp) 1180 { 1181 struct binder_node *node; 1182 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1183 1184 if (!new_node) 1185 return NULL; 1186 binder_inner_proc_lock(proc); 1187 node = binder_init_node_ilocked(proc, new_node, fp); 1188 binder_inner_proc_unlock(proc); 1189 if (node != new_node) 1190 /* 1191 * The node was already added by another thread 1192 */ 1193 kfree(new_node); 1194 1195 return node; 1196 } 1197 1198 static void binder_free_node(struct binder_node *node) 1199 { 1200 kfree(node); 1201 binder_stats_deleted(BINDER_STAT_NODE); 1202 } 1203 1204 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1205 int internal, 1206 struct list_head *target_list) 1207 { 1208 struct binder_proc *proc = node->proc; 1209 1210 assert_spin_locked(&node->lock); 1211 if (proc) 1212 assert_spin_locked(&proc->inner_lock); 1213 if (strong) { 1214 if (internal) { 1215 if (target_list == NULL && 1216 node->internal_strong_refs == 0 && 1217 !(node->proc && 1218 node == node->proc->context->binder_context_mgr_node && 1219 node->has_strong_ref)) { 1220 pr_err("invalid inc strong node for %d\n", 1221 node->debug_id); 1222 return -EINVAL; 1223 } 1224 node->internal_strong_refs++; 1225 } else 1226 node->local_strong_refs++; 1227 if (!node->has_strong_ref && target_list) { 1228 struct binder_thread *thread = container_of(target_list, 1229 struct binder_thread, todo); 1230 binder_dequeue_work_ilocked(&node->work); 1231 BUG_ON(&thread->todo != target_list); 1232 binder_enqueue_deferred_thread_work_ilocked(thread, 1233 &node->work); 1234 } 1235 } else { 1236 if (!internal) 1237 node->local_weak_refs++; 1238 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1239 if (target_list == NULL) { 1240 pr_err("invalid inc weak node for %d\n", 1241 node->debug_id); 1242 return -EINVAL; 1243 } 1244 /* 1245 * See comment above 1246 */ 1247 binder_enqueue_work_ilocked(&node->work, target_list); 1248 } 1249 } 1250 return 0; 1251 } 1252 1253 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1254 struct list_head *target_list) 1255 { 1256 int ret; 1257 1258 binder_node_inner_lock(node); 1259 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1260 binder_node_inner_unlock(node); 1261 1262 return ret; 1263 } 1264 1265 static bool binder_dec_node_nilocked(struct binder_node *node, 1266 int strong, int internal) 1267 { 1268 struct binder_proc *proc = node->proc; 1269 1270 assert_spin_locked(&node->lock); 1271 if (proc) 1272 assert_spin_locked(&proc->inner_lock); 1273 if (strong) { 1274 if (internal) 1275 node->internal_strong_refs--; 1276 else 1277 node->local_strong_refs--; 1278 if (node->local_strong_refs || node->internal_strong_refs) 1279 return false; 1280 } else { 1281 if (!internal) 1282 node->local_weak_refs--; 1283 if (node->local_weak_refs || node->tmp_refs || 1284 !hlist_empty(&node->refs)) 1285 return false; 1286 } 1287 1288 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1289 if (list_empty(&node->work.entry)) { 1290 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1291 binder_wakeup_proc_ilocked(proc); 1292 } 1293 } else { 1294 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1295 !node->local_weak_refs && !node->tmp_refs) { 1296 if (proc) { 1297 binder_dequeue_work_ilocked(&node->work); 1298 rb_erase(&node->rb_node, &proc->nodes); 1299 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1300 "refless node %d deleted\n", 1301 node->debug_id); 1302 } else { 1303 BUG_ON(!list_empty(&node->work.entry)); 1304 spin_lock(&binder_dead_nodes_lock); 1305 /* 1306 * tmp_refs could have changed so 1307 * check it again 1308 */ 1309 if (node->tmp_refs) { 1310 spin_unlock(&binder_dead_nodes_lock); 1311 return false; 1312 } 1313 hlist_del(&node->dead_node); 1314 spin_unlock(&binder_dead_nodes_lock); 1315 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1316 "dead node %d deleted\n", 1317 node->debug_id); 1318 } 1319 return true; 1320 } 1321 } 1322 return false; 1323 } 1324 1325 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1326 { 1327 bool free_node; 1328 1329 binder_node_inner_lock(node); 1330 free_node = binder_dec_node_nilocked(node, strong, internal); 1331 binder_node_inner_unlock(node); 1332 if (free_node) 1333 binder_free_node(node); 1334 } 1335 1336 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1337 { 1338 /* 1339 * No call to binder_inc_node() is needed since we 1340 * don't need to inform userspace of any changes to 1341 * tmp_refs 1342 */ 1343 node->tmp_refs++; 1344 } 1345 1346 /** 1347 * binder_inc_node_tmpref() - take a temporary reference on node 1348 * @node: node to reference 1349 * 1350 * Take reference on node to prevent the node from being freed 1351 * while referenced only by a local variable. The inner lock is 1352 * needed to serialize with the node work on the queue (which 1353 * isn't needed after the node is dead). If the node is dead 1354 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1355 * node->tmp_refs against dead-node-only cases where the node 1356 * lock cannot be acquired (eg traversing the dead node list to 1357 * print nodes) 1358 */ 1359 static void binder_inc_node_tmpref(struct binder_node *node) 1360 { 1361 binder_node_lock(node); 1362 if (node->proc) 1363 binder_inner_proc_lock(node->proc); 1364 else 1365 spin_lock(&binder_dead_nodes_lock); 1366 binder_inc_node_tmpref_ilocked(node); 1367 if (node->proc) 1368 binder_inner_proc_unlock(node->proc); 1369 else 1370 spin_unlock(&binder_dead_nodes_lock); 1371 binder_node_unlock(node); 1372 } 1373 1374 /** 1375 * binder_dec_node_tmpref() - remove a temporary reference on node 1376 * @node: node to reference 1377 * 1378 * Release temporary reference on node taken via binder_inc_node_tmpref() 1379 */ 1380 static void binder_dec_node_tmpref(struct binder_node *node) 1381 { 1382 bool free_node; 1383 1384 binder_node_inner_lock(node); 1385 if (!node->proc) 1386 spin_lock(&binder_dead_nodes_lock); 1387 node->tmp_refs--; 1388 BUG_ON(node->tmp_refs < 0); 1389 if (!node->proc) 1390 spin_unlock(&binder_dead_nodes_lock); 1391 /* 1392 * Call binder_dec_node() to check if all refcounts are 0 1393 * and cleanup is needed. Calling with strong=0 and internal=1 1394 * causes no actual reference to be released in binder_dec_node(). 1395 * If that changes, a change is needed here too. 1396 */ 1397 free_node = binder_dec_node_nilocked(node, 0, 1); 1398 binder_node_inner_unlock(node); 1399 if (free_node) 1400 binder_free_node(node); 1401 } 1402 1403 static void binder_put_node(struct binder_node *node) 1404 { 1405 binder_dec_node_tmpref(node); 1406 } 1407 1408 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1409 u32 desc, bool need_strong_ref) 1410 { 1411 struct rb_node *n = proc->refs_by_desc.rb_node; 1412 struct binder_ref *ref; 1413 1414 while (n) { 1415 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1416 1417 if (desc < ref->data.desc) { 1418 n = n->rb_left; 1419 } else if (desc > ref->data.desc) { 1420 n = n->rb_right; 1421 } else if (need_strong_ref && !ref->data.strong) { 1422 binder_user_error("tried to use weak ref as strong ref\n"); 1423 return NULL; 1424 } else { 1425 return ref; 1426 } 1427 } 1428 return NULL; 1429 } 1430 1431 /** 1432 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1433 * @proc: binder_proc that owns the ref 1434 * @node: binder_node of target 1435 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1436 * 1437 * Look up the ref for the given node and return it if it exists 1438 * 1439 * If it doesn't exist and the caller provides a newly allocated 1440 * ref, initialize the fields of the newly allocated ref and insert 1441 * into the given proc rb_trees and node refs list. 1442 * 1443 * Return: the ref for node. It is possible that another thread 1444 * allocated/initialized the ref first in which case the 1445 * returned ref would be different than the passed-in 1446 * new_ref. new_ref must be kfree'd by the caller in 1447 * this case. 1448 */ 1449 static struct binder_ref *binder_get_ref_for_node_olocked( 1450 struct binder_proc *proc, 1451 struct binder_node *node, 1452 struct binder_ref *new_ref) 1453 { 1454 struct binder_context *context = proc->context; 1455 struct rb_node **p = &proc->refs_by_node.rb_node; 1456 struct rb_node *parent = NULL; 1457 struct binder_ref *ref; 1458 struct rb_node *n; 1459 1460 while (*p) { 1461 parent = *p; 1462 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1463 1464 if (node < ref->node) 1465 p = &(*p)->rb_left; 1466 else if (node > ref->node) 1467 p = &(*p)->rb_right; 1468 else 1469 return ref; 1470 } 1471 if (!new_ref) 1472 return NULL; 1473 1474 binder_stats_created(BINDER_STAT_REF); 1475 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1476 new_ref->proc = proc; 1477 new_ref->node = node; 1478 rb_link_node(&new_ref->rb_node_node, parent, p); 1479 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1480 1481 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1482 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1483 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1484 if (ref->data.desc > new_ref->data.desc) 1485 break; 1486 new_ref->data.desc = ref->data.desc + 1; 1487 } 1488 1489 p = &proc->refs_by_desc.rb_node; 1490 while (*p) { 1491 parent = *p; 1492 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1493 1494 if (new_ref->data.desc < ref->data.desc) 1495 p = &(*p)->rb_left; 1496 else if (new_ref->data.desc > ref->data.desc) 1497 p = &(*p)->rb_right; 1498 else 1499 BUG(); 1500 } 1501 rb_link_node(&new_ref->rb_node_desc, parent, p); 1502 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1503 1504 binder_node_lock(node); 1505 hlist_add_head(&new_ref->node_entry, &node->refs); 1506 1507 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1508 "%d new ref %d desc %d for node %d\n", 1509 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1510 node->debug_id); 1511 binder_node_unlock(node); 1512 return new_ref; 1513 } 1514 1515 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1516 { 1517 bool delete_node = false; 1518 1519 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1520 "%d delete ref %d desc %d for node %d\n", 1521 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1522 ref->node->debug_id); 1523 1524 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1525 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1526 1527 binder_node_inner_lock(ref->node); 1528 if (ref->data.strong) 1529 binder_dec_node_nilocked(ref->node, 1, 1); 1530 1531 hlist_del(&ref->node_entry); 1532 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1533 binder_node_inner_unlock(ref->node); 1534 /* 1535 * Clear ref->node unless we want the caller to free the node 1536 */ 1537 if (!delete_node) { 1538 /* 1539 * The caller uses ref->node to determine 1540 * whether the node needs to be freed. Clear 1541 * it since the node is still alive. 1542 */ 1543 ref->node = NULL; 1544 } 1545 1546 if (ref->death) { 1547 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1548 "%d delete ref %d desc %d has death notification\n", 1549 ref->proc->pid, ref->data.debug_id, 1550 ref->data.desc); 1551 binder_dequeue_work(ref->proc, &ref->death->work); 1552 binder_stats_deleted(BINDER_STAT_DEATH); 1553 } 1554 binder_stats_deleted(BINDER_STAT_REF); 1555 } 1556 1557 /** 1558 * binder_inc_ref_olocked() - increment the ref for given handle 1559 * @ref: ref to be incremented 1560 * @strong: if true, strong increment, else weak 1561 * @target_list: list to queue node work on 1562 * 1563 * Increment the ref. @ref->proc->outer_lock must be held on entry 1564 * 1565 * Return: 0, if successful, else errno 1566 */ 1567 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1568 struct list_head *target_list) 1569 { 1570 int ret; 1571 1572 if (strong) { 1573 if (ref->data.strong == 0) { 1574 ret = binder_inc_node(ref->node, 1, 1, target_list); 1575 if (ret) 1576 return ret; 1577 } 1578 ref->data.strong++; 1579 } else { 1580 if (ref->data.weak == 0) { 1581 ret = binder_inc_node(ref->node, 0, 1, target_list); 1582 if (ret) 1583 return ret; 1584 } 1585 ref->data.weak++; 1586 } 1587 return 0; 1588 } 1589 1590 /** 1591 * binder_dec_ref() - dec the ref for given handle 1592 * @ref: ref to be decremented 1593 * @strong: if true, strong decrement, else weak 1594 * 1595 * Decrement the ref. 1596 * 1597 * Return: true if ref is cleaned up and ready to be freed 1598 */ 1599 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1600 { 1601 if (strong) { 1602 if (ref->data.strong == 0) { 1603 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1604 ref->proc->pid, ref->data.debug_id, 1605 ref->data.desc, ref->data.strong, 1606 ref->data.weak); 1607 return false; 1608 } 1609 ref->data.strong--; 1610 if (ref->data.strong == 0) 1611 binder_dec_node(ref->node, strong, 1); 1612 } else { 1613 if (ref->data.weak == 0) { 1614 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1615 ref->proc->pid, ref->data.debug_id, 1616 ref->data.desc, ref->data.strong, 1617 ref->data.weak); 1618 return false; 1619 } 1620 ref->data.weak--; 1621 } 1622 if (ref->data.strong == 0 && ref->data.weak == 0) { 1623 binder_cleanup_ref_olocked(ref); 1624 return true; 1625 } 1626 return false; 1627 } 1628 1629 /** 1630 * binder_get_node_from_ref() - get the node from the given proc/desc 1631 * @proc: proc containing the ref 1632 * @desc: the handle associated with the ref 1633 * @need_strong_ref: if true, only return node if ref is strong 1634 * @rdata: the id/refcount data for the ref 1635 * 1636 * Given a proc and ref handle, return the associated binder_node 1637 * 1638 * Return: a binder_node or NULL if not found or not strong when strong required 1639 */ 1640 static struct binder_node *binder_get_node_from_ref( 1641 struct binder_proc *proc, 1642 u32 desc, bool need_strong_ref, 1643 struct binder_ref_data *rdata) 1644 { 1645 struct binder_node *node; 1646 struct binder_ref *ref; 1647 1648 binder_proc_lock(proc); 1649 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1650 if (!ref) 1651 goto err_no_ref; 1652 node = ref->node; 1653 /* 1654 * Take an implicit reference on the node to ensure 1655 * it stays alive until the call to binder_put_node() 1656 */ 1657 binder_inc_node_tmpref(node); 1658 if (rdata) 1659 *rdata = ref->data; 1660 binder_proc_unlock(proc); 1661 1662 return node; 1663 1664 err_no_ref: 1665 binder_proc_unlock(proc); 1666 return NULL; 1667 } 1668 1669 /** 1670 * binder_free_ref() - free the binder_ref 1671 * @ref: ref to free 1672 * 1673 * Free the binder_ref. Free the binder_node indicated by ref->node 1674 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1675 */ 1676 static void binder_free_ref(struct binder_ref *ref) 1677 { 1678 if (ref->node) 1679 binder_free_node(ref->node); 1680 kfree(ref->death); 1681 kfree(ref); 1682 } 1683 1684 /** 1685 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1686 * @proc: proc containing the ref 1687 * @desc: the handle associated with the ref 1688 * @increment: true=inc reference, false=dec reference 1689 * @strong: true=strong reference, false=weak reference 1690 * @rdata: the id/refcount data for the ref 1691 * 1692 * Given a proc and ref handle, increment or decrement the ref 1693 * according to "increment" arg. 1694 * 1695 * Return: 0 if successful, else errno 1696 */ 1697 static int binder_update_ref_for_handle(struct binder_proc *proc, 1698 uint32_t desc, bool increment, bool strong, 1699 struct binder_ref_data *rdata) 1700 { 1701 int ret = 0; 1702 struct binder_ref *ref; 1703 bool delete_ref = false; 1704 1705 binder_proc_lock(proc); 1706 ref = binder_get_ref_olocked(proc, desc, strong); 1707 if (!ref) { 1708 ret = -EINVAL; 1709 goto err_no_ref; 1710 } 1711 if (increment) 1712 ret = binder_inc_ref_olocked(ref, strong, NULL); 1713 else 1714 delete_ref = binder_dec_ref_olocked(ref, strong); 1715 1716 if (rdata) 1717 *rdata = ref->data; 1718 binder_proc_unlock(proc); 1719 1720 if (delete_ref) 1721 binder_free_ref(ref); 1722 return ret; 1723 1724 err_no_ref: 1725 binder_proc_unlock(proc); 1726 return ret; 1727 } 1728 1729 /** 1730 * binder_dec_ref_for_handle() - dec the ref for given handle 1731 * @proc: proc containing the ref 1732 * @desc: the handle associated with the ref 1733 * @strong: true=strong reference, false=weak reference 1734 * @rdata: the id/refcount data for the ref 1735 * 1736 * Just calls binder_update_ref_for_handle() to decrement the ref. 1737 * 1738 * Return: 0 if successful, else errno 1739 */ 1740 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1741 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1742 { 1743 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1744 } 1745 1746 1747 /** 1748 * binder_inc_ref_for_node() - increment the ref for given proc/node 1749 * @proc: proc containing the ref 1750 * @node: target node 1751 * @strong: true=strong reference, false=weak reference 1752 * @target_list: worklist to use if node is incremented 1753 * @rdata: the id/refcount data for the ref 1754 * 1755 * Given a proc and node, increment the ref. Create the ref if it 1756 * doesn't already exist 1757 * 1758 * Return: 0 if successful, else errno 1759 */ 1760 static int binder_inc_ref_for_node(struct binder_proc *proc, 1761 struct binder_node *node, 1762 bool strong, 1763 struct list_head *target_list, 1764 struct binder_ref_data *rdata) 1765 { 1766 struct binder_ref *ref; 1767 struct binder_ref *new_ref = NULL; 1768 int ret = 0; 1769 1770 binder_proc_lock(proc); 1771 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1772 if (!ref) { 1773 binder_proc_unlock(proc); 1774 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1775 if (!new_ref) 1776 return -ENOMEM; 1777 binder_proc_lock(proc); 1778 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1779 } 1780 ret = binder_inc_ref_olocked(ref, strong, target_list); 1781 *rdata = ref->data; 1782 binder_proc_unlock(proc); 1783 if (new_ref && ref != new_ref) 1784 /* 1785 * Another thread created the ref first so 1786 * free the one we allocated 1787 */ 1788 kfree(new_ref); 1789 return ret; 1790 } 1791 1792 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1793 struct binder_transaction *t) 1794 { 1795 BUG_ON(!target_thread); 1796 assert_spin_locked(&target_thread->proc->inner_lock); 1797 BUG_ON(target_thread->transaction_stack != t); 1798 BUG_ON(target_thread->transaction_stack->from != target_thread); 1799 target_thread->transaction_stack = 1800 target_thread->transaction_stack->from_parent; 1801 t->from = NULL; 1802 } 1803 1804 /** 1805 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1806 * @thread: thread to decrement 1807 * 1808 * A thread needs to be kept alive while being used to create or 1809 * handle a transaction. binder_get_txn_from() is used to safely 1810 * extract t->from from a binder_transaction and keep the thread 1811 * indicated by t->from from being freed. When done with that 1812 * binder_thread, this function is called to decrement the 1813 * tmp_ref and free if appropriate (thread has been released 1814 * and no transaction being processed by the driver) 1815 */ 1816 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1817 { 1818 /* 1819 * atomic is used to protect the counter value while 1820 * it cannot reach zero or thread->is_dead is false 1821 */ 1822 binder_inner_proc_lock(thread->proc); 1823 atomic_dec(&thread->tmp_ref); 1824 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1825 binder_inner_proc_unlock(thread->proc); 1826 binder_free_thread(thread); 1827 return; 1828 } 1829 binder_inner_proc_unlock(thread->proc); 1830 } 1831 1832 /** 1833 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1834 * @proc: proc to decrement 1835 * 1836 * A binder_proc needs to be kept alive while being used to create or 1837 * handle a transaction. proc->tmp_ref is incremented when 1838 * creating a new transaction or the binder_proc is currently in-use 1839 * by threads that are being released. When done with the binder_proc, 1840 * this function is called to decrement the counter and free the 1841 * proc if appropriate (proc has been released, all threads have 1842 * been released and not currenly in-use to process a transaction). 1843 */ 1844 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1845 { 1846 binder_inner_proc_lock(proc); 1847 proc->tmp_ref--; 1848 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1849 !proc->tmp_ref) { 1850 binder_inner_proc_unlock(proc); 1851 binder_free_proc(proc); 1852 return; 1853 } 1854 binder_inner_proc_unlock(proc); 1855 } 1856 1857 /** 1858 * binder_get_txn_from() - safely extract the "from" thread in transaction 1859 * @t: binder transaction for t->from 1860 * 1861 * Atomically return the "from" thread and increment the tmp_ref 1862 * count for the thread to ensure it stays alive until 1863 * binder_thread_dec_tmpref() is called. 1864 * 1865 * Return: the value of t->from 1866 */ 1867 static struct binder_thread *binder_get_txn_from( 1868 struct binder_transaction *t) 1869 { 1870 struct binder_thread *from; 1871 1872 spin_lock(&t->lock); 1873 from = t->from; 1874 if (from) 1875 atomic_inc(&from->tmp_ref); 1876 spin_unlock(&t->lock); 1877 return from; 1878 } 1879 1880 /** 1881 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1882 * @t: binder transaction for t->from 1883 * 1884 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1885 * to guarantee that the thread cannot be released while operating on it. 1886 * The caller must call binder_inner_proc_unlock() to release the inner lock 1887 * as well as call binder_dec_thread_txn() to release the reference. 1888 * 1889 * Return: the value of t->from 1890 */ 1891 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1892 struct binder_transaction *t) 1893 { 1894 struct binder_thread *from; 1895 1896 from = binder_get_txn_from(t); 1897 if (!from) 1898 return NULL; 1899 binder_inner_proc_lock(from->proc); 1900 if (t->from) { 1901 BUG_ON(from != t->from); 1902 return from; 1903 } 1904 binder_inner_proc_unlock(from->proc); 1905 binder_thread_dec_tmpref(from); 1906 return NULL; 1907 } 1908 1909 /** 1910 * binder_free_txn_fixups() - free unprocessed fd fixups 1911 * @t: binder transaction for t->from 1912 * 1913 * If the transaction is being torn down prior to being 1914 * processed by the target process, free all of the 1915 * fd fixups and fput the file structs. It is safe to 1916 * call this function after the fixups have been 1917 * processed -- in that case, the list will be empty. 1918 */ 1919 static void binder_free_txn_fixups(struct binder_transaction *t) 1920 { 1921 struct binder_txn_fd_fixup *fixup, *tmp; 1922 1923 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1924 fput(fixup->file); 1925 list_del(&fixup->fixup_entry); 1926 kfree(fixup); 1927 } 1928 } 1929 1930 static void binder_free_transaction(struct binder_transaction *t) 1931 { 1932 if (t->buffer) 1933 t->buffer->transaction = NULL; 1934 binder_free_txn_fixups(t); 1935 kfree(t); 1936 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1937 } 1938 1939 static void binder_send_failed_reply(struct binder_transaction *t, 1940 uint32_t error_code) 1941 { 1942 struct binder_thread *target_thread; 1943 struct binder_transaction *next; 1944 1945 BUG_ON(t->flags & TF_ONE_WAY); 1946 while (1) { 1947 target_thread = binder_get_txn_from_and_acq_inner(t); 1948 if (target_thread) { 1949 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1950 "send failed reply for transaction %d to %d:%d\n", 1951 t->debug_id, 1952 target_thread->proc->pid, 1953 target_thread->pid); 1954 1955 binder_pop_transaction_ilocked(target_thread, t); 1956 if (target_thread->reply_error.cmd == BR_OK) { 1957 target_thread->reply_error.cmd = error_code; 1958 binder_enqueue_thread_work_ilocked( 1959 target_thread, 1960 &target_thread->reply_error.work); 1961 wake_up_interruptible(&target_thread->wait); 1962 } else { 1963 /* 1964 * Cannot get here for normal operation, but 1965 * we can if multiple synchronous transactions 1966 * are sent without blocking for responses. 1967 * Just ignore the 2nd error in this case. 1968 */ 1969 pr_warn("Unexpected reply error: %u\n", 1970 target_thread->reply_error.cmd); 1971 } 1972 binder_inner_proc_unlock(target_thread->proc); 1973 binder_thread_dec_tmpref(target_thread); 1974 binder_free_transaction(t); 1975 return; 1976 } 1977 next = t->from_parent; 1978 1979 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1980 "send failed reply for transaction %d, target dead\n", 1981 t->debug_id); 1982 1983 binder_free_transaction(t); 1984 if (next == NULL) { 1985 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1986 "reply failed, no target thread at root\n"); 1987 return; 1988 } 1989 t = next; 1990 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1991 "reply failed, no target thread -- retry %d\n", 1992 t->debug_id); 1993 } 1994 } 1995 1996 /** 1997 * binder_cleanup_transaction() - cleans up undelivered transaction 1998 * @t: transaction that needs to be cleaned up 1999 * @reason: reason the transaction wasn't delivered 2000 * @error_code: error to return to caller (if synchronous call) 2001 */ 2002 static void binder_cleanup_transaction(struct binder_transaction *t, 2003 const char *reason, 2004 uint32_t error_code) 2005 { 2006 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2007 binder_send_failed_reply(t, error_code); 2008 } else { 2009 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2010 "undelivered transaction %d, %s\n", 2011 t->debug_id, reason); 2012 binder_free_transaction(t); 2013 } 2014 } 2015 2016 /** 2017 * binder_validate_object() - checks for a valid metadata object in a buffer. 2018 * @buffer: binder_buffer that we're parsing. 2019 * @offset: offset in the buffer at which to validate an object. 2020 * 2021 * Return: If there's a valid metadata object at @offset in @buffer, the 2022 * size of that object. Otherwise, it returns zero. 2023 */ 2024 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 2025 { 2026 /* Check if we can read a header first */ 2027 struct binder_object_header *hdr; 2028 size_t object_size = 0; 2029 2030 if (buffer->data_size < sizeof(*hdr) || 2031 offset > buffer->data_size - sizeof(*hdr) || 2032 !IS_ALIGNED(offset, sizeof(u32))) 2033 return 0; 2034 2035 /* Ok, now see if we can read a complete object. */ 2036 hdr = (struct binder_object_header *)(buffer->data + offset); 2037 switch (hdr->type) { 2038 case BINDER_TYPE_BINDER: 2039 case BINDER_TYPE_WEAK_BINDER: 2040 case BINDER_TYPE_HANDLE: 2041 case BINDER_TYPE_WEAK_HANDLE: 2042 object_size = sizeof(struct flat_binder_object); 2043 break; 2044 case BINDER_TYPE_FD: 2045 object_size = sizeof(struct binder_fd_object); 2046 break; 2047 case BINDER_TYPE_PTR: 2048 object_size = sizeof(struct binder_buffer_object); 2049 break; 2050 case BINDER_TYPE_FDA: 2051 object_size = sizeof(struct binder_fd_array_object); 2052 break; 2053 default: 2054 return 0; 2055 } 2056 if (offset <= buffer->data_size - object_size && 2057 buffer->data_size >= object_size) 2058 return object_size; 2059 else 2060 return 0; 2061 } 2062 2063 /** 2064 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2065 * @b: binder_buffer containing the object 2066 * @index: index in offset array at which the binder_buffer_object is 2067 * located 2068 * @start: points to the start of the offset array 2069 * @num_valid: the number of valid offsets in the offset array 2070 * 2071 * Return: If @index is within the valid range of the offset array 2072 * described by @start and @num_valid, and if there's a valid 2073 * binder_buffer_object at the offset found in index @index 2074 * of the offset array, that object is returned. Otherwise, 2075 * %NULL is returned. 2076 * Note that the offset found in index @index itself is not 2077 * verified; this function assumes that @num_valid elements 2078 * from @start were previously verified to have valid offsets. 2079 */ 2080 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 2081 binder_size_t index, 2082 binder_size_t *start, 2083 binder_size_t num_valid) 2084 { 2085 struct binder_buffer_object *buffer_obj; 2086 binder_size_t *offp; 2087 2088 if (index >= num_valid) 2089 return NULL; 2090 2091 offp = start + index; 2092 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 2093 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 2094 return NULL; 2095 2096 return buffer_obj; 2097 } 2098 2099 /** 2100 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2101 * @b: transaction buffer 2102 * @objects_start start of objects buffer 2103 * @buffer: binder_buffer_object in which to fix up 2104 * @offset: start offset in @buffer to fix up 2105 * @last_obj: last binder_buffer_object that we fixed up in 2106 * @last_min_offset: minimum fixup offset in @last_obj 2107 * 2108 * Return: %true if a fixup in buffer @buffer at offset @offset is 2109 * allowed. 2110 * 2111 * For safety reasons, we only allow fixups inside a buffer to happen 2112 * at increasing offsets; additionally, we only allow fixup on the last 2113 * buffer object that was verified, or one of its parents. 2114 * 2115 * Example of what is allowed: 2116 * 2117 * A 2118 * B (parent = A, offset = 0) 2119 * C (parent = A, offset = 16) 2120 * D (parent = C, offset = 0) 2121 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2122 * 2123 * Examples of what is not allowed: 2124 * 2125 * Decreasing offsets within the same parent: 2126 * A 2127 * C (parent = A, offset = 16) 2128 * B (parent = A, offset = 0) // decreasing offset within A 2129 * 2130 * Referring to a parent that wasn't the last object or any of its parents: 2131 * A 2132 * B (parent = A, offset = 0) 2133 * C (parent = A, offset = 0) 2134 * C (parent = A, offset = 16) 2135 * D (parent = B, offset = 0) // B is not A or any of A's parents 2136 */ 2137 static bool binder_validate_fixup(struct binder_buffer *b, 2138 binder_size_t *objects_start, 2139 struct binder_buffer_object *buffer, 2140 binder_size_t fixup_offset, 2141 struct binder_buffer_object *last_obj, 2142 binder_size_t last_min_offset) 2143 { 2144 if (!last_obj) { 2145 /* Nothing to fix up in */ 2146 return false; 2147 } 2148 2149 while (last_obj != buffer) { 2150 /* 2151 * Safe to retrieve the parent of last_obj, since it 2152 * was already previously verified by the driver. 2153 */ 2154 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2155 return false; 2156 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 2157 last_obj = (struct binder_buffer_object *) 2158 (b->data + *(objects_start + last_obj->parent)); 2159 } 2160 return (fixup_offset >= last_min_offset); 2161 } 2162 2163 static void binder_transaction_buffer_release(struct binder_proc *proc, 2164 struct binder_buffer *buffer, 2165 binder_size_t *failed_at) 2166 { 2167 binder_size_t *offp, *off_start, *off_end; 2168 int debug_id = buffer->debug_id; 2169 2170 binder_debug(BINDER_DEBUG_TRANSACTION, 2171 "%d buffer release %d, size %zd-%zd, failed at %pK\n", 2172 proc->pid, buffer->debug_id, 2173 buffer->data_size, buffer->offsets_size, failed_at); 2174 2175 if (buffer->target_node) 2176 binder_dec_node(buffer->target_node, 1, 0); 2177 2178 off_start = (binder_size_t *)(buffer->data + 2179 ALIGN(buffer->data_size, sizeof(void *))); 2180 if (failed_at) 2181 off_end = failed_at; 2182 else 2183 off_end = (void *)off_start + buffer->offsets_size; 2184 for (offp = off_start; offp < off_end; offp++) { 2185 struct binder_object_header *hdr; 2186 size_t object_size = binder_validate_object(buffer, *offp); 2187 2188 if (object_size == 0) { 2189 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2190 debug_id, (u64)*offp, buffer->data_size); 2191 continue; 2192 } 2193 hdr = (struct binder_object_header *)(buffer->data + *offp); 2194 switch (hdr->type) { 2195 case BINDER_TYPE_BINDER: 2196 case BINDER_TYPE_WEAK_BINDER: { 2197 struct flat_binder_object *fp; 2198 struct binder_node *node; 2199 2200 fp = to_flat_binder_object(hdr); 2201 node = binder_get_node(proc, fp->binder); 2202 if (node == NULL) { 2203 pr_err("transaction release %d bad node %016llx\n", 2204 debug_id, (u64)fp->binder); 2205 break; 2206 } 2207 binder_debug(BINDER_DEBUG_TRANSACTION, 2208 " node %d u%016llx\n", 2209 node->debug_id, (u64)node->ptr); 2210 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2211 0); 2212 binder_put_node(node); 2213 } break; 2214 case BINDER_TYPE_HANDLE: 2215 case BINDER_TYPE_WEAK_HANDLE: { 2216 struct flat_binder_object *fp; 2217 struct binder_ref_data rdata; 2218 int ret; 2219 2220 fp = to_flat_binder_object(hdr); 2221 ret = binder_dec_ref_for_handle(proc, fp->handle, 2222 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2223 2224 if (ret) { 2225 pr_err("transaction release %d bad handle %d, ret = %d\n", 2226 debug_id, fp->handle, ret); 2227 break; 2228 } 2229 binder_debug(BINDER_DEBUG_TRANSACTION, 2230 " ref %d desc %d\n", 2231 rdata.debug_id, rdata.desc); 2232 } break; 2233 2234 case BINDER_TYPE_FD: { 2235 /* 2236 * No need to close the file here since user-space 2237 * closes it for for successfully delivered 2238 * transactions. For transactions that weren't 2239 * delivered, the new fd was never allocated so 2240 * there is no need to close and the fput on the 2241 * file is done when the transaction is torn 2242 * down. 2243 */ 2244 WARN_ON(failed_at && 2245 proc->tsk == current->group_leader); 2246 } break; 2247 case BINDER_TYPE_PTR: 2248 /* 2249 * Nothing to do here, this will get cleaned up when the 2250 * transaction buffer gets freed 2251 */ 2252 break; 2253 case BINDER_TYPE_FDA: { 2254 struct binder_fd_array_object *fda; 2255 struct binder_buffer_object *parent; 2256 uintptr_t parent_buffer; 2257 u32 *fd_array; 2258 size_t fd_index; 2259 binder_size_t fd_buf_size; 2260 2261 if (proc->tsk != current->group_leader) { 2262 /* 2263 * Nothing to do if running in sender context 2264 * The fd fixups have not been applied so no 2265 * fds need to be closed. 2266 */ 2267 continue; 2268 } 2269 2270 fda = to_binder_fd_array_object(hdr); 2271 parent = binder_validate_ptr(buffer, fda->parent, 2272 off_start, 2273 offp - off_start); 2274 if (!parent) { 2275 pr_err("transaction release %d bad parent offset\n", 2276 debug_id); 2277 continue; 2278 } 2279 /* 2280 * Since the parent was already fixed up, convert it 2281 * back to kernel address space to access it 2282 */ 2283 parent_buffer = parent->buffer - 2284 binder_alloc_get_user_buffer_offset( 2285 &proc->alloc); 2286 2287 fd_buf_size = sizeof(u32) * fda->num_fds; 2288 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2289 pr_err("transaction release %d invalid number of fds (%lld)\n", 2290 debug_id, (u64)fda->num_fds); 2291 continue; 2292 } 2293 if (fd_buf_size > parent->length || 2294 fda->parent_offset > parent->length - fd_buf_size) { 2295 /* No space for all file descriptors here. */ 2296 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2297 debug_id, (u64)fda->num_fds); 2298 continue; 2299 } 2300 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2301 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2302 ksys_close(fd_array[fd_index]); 2303 } break; 2304 default: 2305 pr_err("transaction release %d bad object type %x\n", 2306 debug_id, hdr->type); 2307 break; 2308 } 2309 } 2310 } 2311 2312 static int binder_translate_binder(struct flat_binder_object *fp, 2313 struct binder_transaction *t, 2314 struct binder_thread *thread) 2315 { 2316 struct binder_node *node; 2317 struct binder_proc *proc = thread->proc; 2318 struct binder_proc *target_proc = t->to_proc; 2319 struct binder_ref_data rdata; 2320 int ret = 0; 2321 2322 node = binder_get_node(proc, fp->binder); 2323 if (!node) { 2324 node = binder_new_node(proc, fp); 2325 if (!node) 2326 return -ENOMEM; 2327 } 2328 if (fp->cookie != node->cookie) { 2329 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2330 proc->pid, thread->pid, (u64)fp->binder, 2331 node->debug_id, (u64)fp->cookie, 2332 (u64)node->cookie); 2333 ret = -EINVAL; 2334 goto done; 2335 } 2336 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2337 ret = -EPERM; 2338 goto done; 2339 } 2340 2341 ret = binder_inc_ref_for_node(target_proc, node, 2342 fp->hdr.type == BINDER_TYPE_BINDER, 2343 &thread->todo, &rdata); 2344 if (ret) 2345 goto done; 2346 2347 if (fp->hdr.type == BINDER_TYPE_BINDER) 2348 fp->hdr.type = BINDER_TYPE_HANDLE; 2349 else 2350 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2351 fp->binder = 0; 2352 fp->handle = rdata.desc; 2353 fp->cookie = 0; 2354 2355 trace_binder_transaction_node_to_ref(t, node, &rdata); 2356 binder_debug(BINDER_DEBUG_TRANSACTION, 2357 " node %d u%016llx -> ref %d desc %d\n", 2358 node->debug_id, (u64)node->ptr, 2359 rdata.debug_id, rdata.desc); 2360 done: 2361 binder_put_node(node); 2362 return ret; 2363 } 2364 2365 static int binder_translate_handle(struct flat_binder_object *fp, 2366 struct binder_transaction *t, 2367 struct binder_thread *thread) 2368 { 2369 struct binder_proc *proc = thread->proc; 2370 struct binder_proc *target_proc = t->to_proc; 2371 struct binder_node *node; 2372 struct binder_ref_data src_rdata; 2373 int ret = 0; 2374 2375 node = binder_get_node_from_ref(proc, fp->handle, 2376 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2377 if (!node) { 2378 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2379 proc->pid, thread->pid, fp->handle); 2380 return -EINVAL; 2381 } 2382 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2383 ret = -EPERM; 2384 goto done; 2385 } 2386 2387 binder_node_lock(node); 2388 if (node->proc == target_proc) { 2389 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2390 fp->hdr.type = BINDER_TYPE_BINDER; 2391 else 2392 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2393 fp->binder = node->ptr; 2394 fp->cookie = node->cookie; 2395 if (node->proc) 2396 binder_inner_proc_lock(node->proc); 2397 binder_inc_node_nilocked(node, 2398 fp->hdr.type == BINDER_TYPE_BINDER, 2399 0, NULL); 2400 if (node->proc) 2401 binder_inner_proc_unlock(node->proc); 2402 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2403 binder_debug(BINDER_DEBUG_TRANSACTION, 2404 " ref %d desc %d -> node %d u%016llx\n", 2405 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2406 (u64)node->ptr); 2407 binder_node_unlock(node); 2408 } else { 2409 struct binder_ref_data dest_rdata; 2410 2411 binder_node_unlock(node); 2412 ret = binder_inc_ref_for_node(target_proc, node, 2413 fp->hdr.type == BINDER_TYPE_HANDLE, 2414 NULL, &dest_rdata); 2415 if (ret) 2416 goto done; 2417 2418 fp->binder = 0; 2419 fp->handle = dest_rdata.desc; 2420 fp->cookie = 0; 2421 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2422 &dest_rdata); 2423 binder_debug(BINDER_DEBUG_TRANSACTION, 2424 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2425 src_rdata.debug_id, src_rdata.desc, 2426 dest_rdata.debug_id, dest_rdata.desc, 2427 node->debug_id); 2428 } 2429 done: 2430 binder_put_node(node); 2431 return ret; 2432 } 2433 2434 static int binder_translate_fd(u32 *fdp, 2435 struct binder_transaction *t, 2436 struct binder_thread *thread, 2437 struct binder_transaction *in_reply_to) 2438 { 2439 struct binder_proc *proc = thread->proc; 2440 struct binder_proc *target_proc = t->to_proc; 2441 struct binder_txn_fd_fixup *fixup; 2442 struct file *file; 2443 int ret = 0; 2444 bool target_allows_fd; 2445 int fd = *fdp; 2446 2447 if (in_reply_to) 2448 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2449 else 2450 target_allows_fd = t->buffer->target_node->accept_fds; 2451 if (!target_allows_fd) { 2452 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2453 proc->pid, thread->pid, 2454 in_reply_to ? "reply" : "transaction", 2455 fd); 2456 ret = -EPERM; 2457 goto err_fd_not_accepted; 2458 } 2459 2460 file = fget(fd); 2461 if (!file) { 2462 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2463 proc->pid, thread->pid, fd); 2464 ret = -EBADF; 2465 goto err_fget; 2466 } 2467 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2468 if (ret < 0) { 2469 ret = -EPERM; 2470 goto err_security; 2471 } 2472 2473 /* 2474 * Add fixup record for this transaction. The allocation 2475 * of the fd in the target needs to be done from a 2476 * target thread. 2477 */ 2478 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2479 if (!fixup) { 2480 ret = -ENOMEM; 2481 goto err_alloc; 2482 } 2483 fixup->file = file; 2484 fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; 2485 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2486 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2487 2488 return ret; 2489 2490 err_alloc: 2491 err_security: 2492 fput(file); 2493 err_fget: 2494 err_fd_not_accepted: 2495 return ret; 2496 } 2497 2498 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2499 struct binder_buffer_object *parent, 2500 struct binder_transaction *t, 2501 struct binder_thread *thread, 2502 struct binder_transaction *in_reply_to) 2503 { 2504 binder_size_t fdi, fd_buf_size; 2505 uintptr_t parent_buffer; 2506 u32 *fd_array; 2507 struct binder_proc *proc = thread->proc; 2508 struct binder_proc *target_proc = t->to_proc; 2509 2510 fd_buf_size = sizeof(u32) * fda->num_fds; 2511 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2512 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2513 proc->pid, thread->pid, (u64)fda->num_fds); 2514 return -EINVAL; 2515 } 2516 if (fd_buf_size > parent->length || 2517 fda->parent_offset > parent->length - fd_buf_size) { 2518 /* No space for all file descriptors here. */ 2519 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2520 proc->pid, thread->pid, (u64)fda->num_fds); 2521 return -EINVAL; 2522 } 2523 /* 2524 * Since the parent was already fixed up, convert it 2525 * back to the kernel address space to access it 2526 */ 2527 parent_buffer = parent->buffer - 2528 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2529 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2530 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2531 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2532 proc->pid, thread->pid); 2533 return -EINVAL; 2534 } 2535 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2536 int ret = binder_translate_fd(&fd_array[fdi], t, thread, 2537 in_reply_to); 2538 if (ret < 0) 2539 return ret; 2540 } 2541 return 0; 2542 } 2543 2544 static int binder_fixup_parent(struct binder_transaction *t, 2545 struct binder_thread *thread, 2546 struct binder_buffer_object *bp, 2547 binder_size_t *off_start, 2548 binder_size_t num_valid, 2549 struct binder_buffer_object *last_fixup_obj, 2550 binder_size_t last_fixup_min_off) 2551 { 2552 struct binder_buffer_object *parent; 2553 u8 *parent_buffer; 2554 struct binder_buffer *b = t->buffer; 2555 struct binder_proc *proc = thread->proc; 2556 struct binder_proc *target_proc = t->to_proc; 2557 2558 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2559 return 0; 2560 2561 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 2562 if (!parent) { 2563 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2564 proc->pid, thread->pid); 2565 return -EINVAL; 2566 } 2567 2568 if (!binder_validate_fixup(b, off_start, 2569 parent, bp->parent_offset, 2570 last_fixup_obj, 2571 last_fixup_min_off)) { 2572 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2573 proc->pid, thread->pid); 2574 return -EINVAL; 2575 } 2576 2577 if (parent->length < sizeof(binder_uintptr_t) || 2578 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2579 /* No space for a pointer here! */ 2580 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2581 proc->pid, thread->pid); 2582 return -EINVAL; 2583 } 2584 parent_buffer = (u8 *)((uintptr_t)parent->buffer - 2585 binder_alloc_get_user_buffer_offset( 2586 &target_proc->alloc)); 2587 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2588 2589 return 0; 2590 } 2591 2592 /** 2593 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2594 * @t: transaction to send 2595 * @proc: process to send the transaction to 2596 * @thread: thread in @proc to send the transaction to (may be NULL) 2597 * 2598 * This function queues a transaction to the specified process. It will try 2599 * to find a thread in the target process to handle the transaction and 2600 * wake it up. If no thread is found, the work is queued to the proc 2601 * waitqueue. 2602 * 2603 * If the @thread parameter is not NULL, the transaction is always queued 2604 * to the waitlist of that specific thread. 2605 * 2606 * Return: true if the transactions was successfully queued 2607 * false if the target process or thread is dead 2608 */ 2609 static bool binder_proc_transaction(struct binder_transaction *t, 2610 struct binder_proc *proc, 2611 struct binder_thread *thread) 2612 { 2613 struct binder_node *node = t->buffer->target_node; 2614 bool oneway = !!(t->flags & TF_ONE_WAY); 2615 bool pending_async = false; 2616 2617 BUG_ON(!node); 2618 binder_node_lock(node); 2619 if (oneway) { 2620 BUG_ON(thread); 2621 if (node->has_async_transaction) { 2622 pending_async = true; 2623 } else { 2624 node->has_async_transaction = true; 2625 } 2626 } 2627 2628 binder_inner_proc_lock(proc); 2629 2630 if (proc->is_dead || (thread && thread->is_dead)) { 2631 binder_inner_proc_unlock(proc); 2632 binder_node_unlock(node); 2633 return false; 2634 } 2635 2636 if (!thread && !pending_async) 2637 thread = binder_select_thread_ilocked(proc); 2638 2639 if (thread) 2640 binder_enqueue_thread_work_ilocked(thread, &t->work); 2641 else if (!pending_async) 2642 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2643 else 2644 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2645 2646 if (!pending_async) 2647 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2648 2649 binder_inner_proc_unlock(proc); 2650 binder_node_unlock(node); 2651 2652 return true; 2653 } 2654 2655 /** 2656 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2657 * @node: struct binder_node for which to get refs 2658 * @proc: returns @node->proc if valid 2659 * @error: if no @proc then returns BR_DEAD_REPLY 2660 * 2661 * User-space normally keeps the node alive when creating a transaction 2662 * since it has a reference to the target. The local strong ref keeps it 2663 * alive if the sending process dies before the target process processes 2664 * the transaction. If the source process is malicious or has a reference 2665 * counting bug, relying on the local strong ref can fail. 2666 * 2667 * Since user-space can cause the local strong ref to go away, we also take 2668 * a tmpref on the node to ensure it survives while we are constructing 2669 * the transaction. We also need a tmpref on the proc while we are 2670 * constructing the transaction, so we take that here as well. 2671 * 2672 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2673 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2674 * target proc has died, @error is set to BR_DEAD_REPLY 2675 */ 2676 static struct binder_node *binder_get_node_refs_for_txn( 2677 struct binder_node *node, 2678 struct binder_proc **procp, 2679 uint32_t *error) 2680 { 2681 struct binder_node *target_node = NULL; 2682 2683 binder_node_inner_lock(node); 2684 if (node->proc) { 2685 target_node = node; 2686 binder_inc_node_nilocked(node, 1, 0, NULL); 2687 binder_inc_node_tmpref_ilocked(node); 2688 node->proc->tmp_ref++; 2689 *procp = node->proc; 2690 } else 2691 *error = BR_DEAD_REPLY; 2692 binder_node_inner_unlock(node); 2693 2694 return target_node; 2695 } 2696 2697 static void binder_transaction(struct binder_proc *proc, 2698 struct binder_thread *thread, 2699 struct binder_transaction_data *tr, int reply, 2700 binder_size_t extra_buffers_size) 2701 { 2702 int ret; 2703 struct binder_transaction *t; 2704 struct binder_work *w; 2705 struct binder_work *tcomplete; 2706 binder_size_t *offp, *off_end, *off_start; 2707 binder_size_t off_min; 2708 u8 *sg_bufp, *sg_buf_end; 2709 struct binder_proc *target_proc = NULL; 2710 struct binder_thread *target_thread = NULL; 2711 struct binder_node *target_node = NULL; 2712 struct binder_transaction *in_reply_to = NULL; 2713 struct binder_transaction_log_entry *e; 2714 uint32_t return_error = 0; 2715 uint32_t return_error_param = 0; 2716 uint32_t return_error_line = 0; 2717 struct binder_buffer_object *last_fixup_obj = NULL; 2718 binder_size_t last_fixup_min_off = 0; 2719 struct binder_context *context = proc->context; 2720 int t_debug_id = atomic_inc_return(&binder_last_id); 2721 2722 e = binder_transaction_log_add(&binder_transaction_log); 2723 e->debug_id = t_debug_id; 2724 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2725 e->from_proc = proc->pid; 2726 e->from_thread = thread->pid; 2727 e->target_handle = tr->target.handle; 2728 e->data_size = tr->data_size; 2729 e->offsets_size = tr->offsets_size; 2730 e->context_name = proc->context->name; 2731 2732 if (reply) { 2733 binder_inner_proc_lock(proc); 2734 in_reply_to = thread->transaction_stack; 2735 if (in_reply_to == NULL) { 2736 binder_inner_proc_unlock(proc); 2737 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2738 proc->pid, thread->pid); 2739 return_error = BR_FAILED_REPLY; 2740 return_error_param = -EPROTO; 2741 return_error_line = __LINE__; 2742 goto err_empty_call_stack; 2743 } 2744 if (in_reply_to->to_thread != thread) { 2745 spin_lock(&in_reply_to->lock); 2746 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2747 proc->pid, thread->pid, in_reply_to->debug_id, 2748 in_reply_to->to_proc ? 2749 in_reply_to->to_proc->pid : 0, 2750 in_reply_to->to_thread ? 2751 in_reply_to->to_thread->pid : 0); 2752 spin_unlock(&in_reply_to->lock); 2753 binder_inner_proc_unlock(proc); 2754 return_error = BR_FAILED_REPLY; 2755 return_error_param = -EPROTO; 2756 return_error_line = __LINE__; 2757 in_reply_to = NULL; 2758 goto err_bad_call_stack; 2759 } 2760 thread->transaction_stack = in_reply_to->to_parent; 2761 binder_inner_proc_unlock(proc); 2762 binder_set_nice(in_reply_to->saved_priority); 2763 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2764 if (target_thread == NULL) { 2765 return_error = BR_DEAD_REPLY; 2766 return_error_line = __LINE__; 2767 goto err_dead_binder; 2768 } 2769 if (target_thread->transaction_stack != in_reply_to) { 2770 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2771 proc->pid, thread->pid, 2772 target_thread->transaction_stack ? 2773 target_thread->transaction_stack->debug_id : 0, 2774 in_reply_to->debug_id); 2775 binder_inner_proc_unlock(target_thread->proc); 2776 return_error = BR_FAILED_REPLY; 2777 return_error_param = -EPROTO; 2778 return_error_line = __LINE__; 2779 in_reply_to = NULL; 2780 target_thread = NULL; 2781 goto err_dead_binder; 2782 } 2783 target_proc = target_thread->proc; 2784 target_proc->tmp_ref++; 2785 binder_inner_proc_unlock(target_thread->proc); 2786 } else { 2787 if (tr->target.handle) { 2788 struct binder_ref *ref; 2789 2790 /* 2791 * There must already be a strong ref 2792 * on this node. If so, do a strong 2793 * increment on the node to ensure it 2794 * stays alive until the transaction is 2795 * done. 2796 */ 2797 binder_proc_lock(proc); 2798 ref = binder_get_ref_olocked(proc, tr->target.handle, 2799 true); 2800 if (ref) { 2801 target_node = binder_get_node_refs_for_txn( 2802 ref->node, &target_proc, 2803 &return_error); 2804 } else { 2805 binder_user_error("%d:%d got transaction to invalid handle\n", 2806 proc->pid, thread->pid); 2807 return_error = BR_FAILED_REPLY; 2808 } 2809 binder_proc_unlock(proc); 2810 } else { 2811 mutex_lock(&context->context_mgr_node_lock); 2812 target_node = context->binder_context_mgr_node; 2813 if (target_node) 2814 target_node = binder_get_node_refs_for_txn( 2815 target_node, &target_proc, 2816 &return_error); 2817 else 2818 return_error = BR_DEAD_REPLY; 2819 mutex_unlock(&context->context_mgr_node_lock); 2820 if (target_node && target_proc == proc) { 2821 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2822 proc->pid, thread->pid); 2823 return_error = BR_FAILED_REPLY; 2824 return_error_param = -EINVAL; 2825 return_error_line = __LINE__; 2826 goto err_invalid_target_handle; 2827 } 2828 } 2829 if (!target_node) { 2830 /* 2831 * return_error is set above 2832 */ 2833 return_error_param = -EINVAL; 2834 return_error_line = __LINE__; 2835 goto err_dead_binder; 2836 } 2837 e->to_node = target_node->debug_id; 2838 if (security_binder_transaction(proc->tsk, 2839 target_proc->tsk) < 0) { 2840 return_error = BR_FAILED_REPLY; 2841 return_error_param = -EPERM; 2842 return_error_line = __LINE__; 2843 goto err_invalid_target_handle; 2844 } 2845 binder_inner_proc_lock(proc); 2846 2847 w = list_first_entry_or_null(&thread->todo, 2848 struct binder_work, entry); 2849 if (!(tr->flags & TF_ONE_WAY) && w && 2850 w->type == BINDER_WORK_TRANSACTION) { 2851 /* 2852 * Do not allow new outgoing transaction from a 2853 * thread that has a transaction at the head of 2854 * its todo list. Only need to check the head 2855 * because binder_select_thread_ilocked picks a 2856 * thread from proc->waiting_threads to enqueue 2857 * the transaction, and nothing is queued to the 2858 * todo list while the thread is on waiting_threads. 2859 */ 2860 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 2861 proc->pid, thread->pid); 2862 binder_inner_proc_unlock(proc); 2863 return_error = BR_FAILED_REPLY; 2864 return_error_param = -EPROTO; 2865 return_error_line = __LINE__; 2866 goto err_bad_todo_list; 2867 } 2868 2869 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2870 struct binder_transaction *tmp; 2871 2872 tmp = thread->transaction_stack; 2873 if (tmp->to_thread != thread) { 2874 spin_lock(&tmp->lock); 2875 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2876 proc->pid, thread->pid, tmp->debug_id, 2877 tmp->to_proc ? tmp->to_proc->pid : 0, 2878 tmp->to_thread ? 2879 tmp->to_thread->pid : 0); 2880 spin_unlock(&tmp->lock); 2881 binder_inner_proc_unlock(proc); 2882 return_error = BR_FAILED_REPLY; 2883 return_error_param = -EPROTO; 2884 return_error_line = __LINE__; 2885 goto err_bad_call_stack; 2886 } 2887 while (tmp) { 2888 struct binder_thread *from; 2889 2890 spin_lock(&tmp->lock); 2891 from = tmp->from; 2892 if (from && from->proc == target_proc) { 2893 atomic_inc(&from->tmp_ref); 2894 target_thread = from; 2895 spin_unlock(&tmp->lock); 2896 break; 2897 } 2898 spin_unlock(&tmp->lock); 2899 tmp = tmp->from_parent; 2900 } 2901 } 2902 binder_inner_proc_unlock(proc); 2903 } 2904 if (target_thread) 2905 e->to_thread = target_thread->pid; 2906 e->to_proc = target_proc->pid; 2907 2908 /* TODO: reuse incoming transaction for reply */ 2909 t = kzalloc(sizeof(*t), GFP_KERNEL); 2910 if (t == NULL) { 2911 return_error = BR_FAILED_REPLY; 2912 return_error_param = -ENOMEM; 2913 return_error_line = __LINE__; 2914 goto err_alloc_t_failed; 2915 } 2916 INIT_LIST_HEAD(&t->fd_fixups); 2917 binder_stats_created(BINDER_STAT_TRANSACTION); 2918 spin_lock_init(&t->lock); 2919 2920 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2921 if (tcomplete == NULL) { 2922 return_error = BR_FAILED_REPLY; 2923 return_error_param = -ENOMEM; 2924 return_error_line = __LINE__; 2925 goto err_alloc_tcomplete_failed; 2926 } 2927 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2928 2929 t->debug_id = t_debug_id; 2930 2931 if (reply) 2932 binder_debug(BINDER_DEBUG_TRANSACTION, 2933 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2934 proc->pid, thread->pid, t->debug_id, 2935 target_proc->pid, target_thread->pid, 2936 (u64)tr->data.ptr.buffer, 2937 (u64)tr->data.ptr.offsets, 2938 (u64)tr->data_size, (u64)tr->offsets_size, 2939 (u64)extra_buffers_size); 2940 else 2941 binder_debug(BINDER_DEBUG_TRANSACTION, 2942 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2943 proc->pid, thread->pid, t->debug_id, 2944 target_proc->pid, target_node->debug_id, 2945 (u64)tr->data.ptr.buffer, 2946 (u64)tr->data.ptr.offsets, 2947 (u64)tr->data_size, (u64)tr->offsets_size, 2948 (u64)extra_buffers_size); 2949 2950 if (!reply && !(tr->flags & TF_ONE_WAY)) 2951 t->from = thread; 2952 else 2953 t->from = NULL; 2954 t->sender_euid = task_euid(proc->tsk); 2955 t->to_proc = target_proc; 2956 t->to_thread = target_thread; 2957 t->code = tr->code; 2958 t->flags = tr->flags; 2959 t->priority = task_nice(current); 2960 2961 trace_binder_transaction(reply, t, target_node); 2962 2963 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 2964 tr->offsets_size, extra_buffers_size, 2965 !reply && (t->flags & TF_ONE_WAY)); 2966 if (IS_ERR(t->buffer)) { 2967 /* 2968 * -ESRCH indicates VMA cleared. The target is dying. 2969 */ 2970 return_error_param = PTR_ERR(t->buffer); 2971 return_error = return_error_param == -ESRCH ? 2972 BR_DEAD_REPLY : BR_FAILED_REPLY; 2973 return_error_line = __LINE__; 2974 t->buffer = NULL; 2975 goto err_binder_alloc_buf_failed; 2976 } 2977 t->buffer->debug_id = t->debug_id; 2978 t->buffer->transaction = t; 2979 t->buffer->target_node = target_node; 2980 trace_binder_transaction_alloc_buf(t->buffer); 2981 off_start = (binder_size_t *)(t->buffer->data + 2982 ALIGN(tr->data_size, sizeof(void *))); 2983 offp = off_start; 2984 2985 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2986 tr->data.ptr.buffer, tr->data_size)) { 2987 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2988 proc->pid, thread->pid); 2989 return_error = BR_FAILED_REPLY; 2990 return_error_param = -EFAULT; 2991 return_error_line = __LINE__; 2992 goto err_copy_data_failed; 2993 } 2994 if (copy_from_user(offp, (const void __user *)(uintptr_t) 2995 tr->data.ptr.offsets, tr->offsets_size)) { 2996 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2997 proc->pid, thread->pid); 2998 return_error = BR_FAILED_REPLY; 2999 return_error_param = -EFAULT; 3000 return_error_line = __LINE__; 3001 goto err_copy_data_failed; 3002 } 3003 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3004 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3005 proc->pid, thread->pid, (u64)tr->offsets_size); 3006 return_error = BR_FAILED_REPLY; 3007 return_error_param = -EINVAL; 3008 return_error_line = __LINE__; 3009 goto err_bad_offset; 3010 } 3011 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3012 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3013 proc->pid, thread->pid, 3014 (u64)extra_buffers_size); 3015 return_error = BR_FAILED_REPLY; 3016 return_error_param = -EINVAL; 3017 return_error_line = __LINE__; 3018 goto err_bad_offset; 3019 } 3020 off_end = (void *)off_start + tr->offsets_size; 3021 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 3022 sg_buf_end = sg_bufp + extra_buffers_size; 3023 off_min = 0; 3024 for (; offp < off_end; offp++) { 3025 struct binder_object_header *hdr; 3026 size_t object_size = binder_validate_object(t->buffer, *offp); 3027 3028 if (object_size == 0 || *offp < off_min) { 3029 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3030 proc->pid, thread->pid, (u64)*offp, 3031 (u64)off_min, 3032 (u64)t->buffer->data_size); 3033 return_error = BR_FAILED_REPLY; 3034 return_error_param = -EINVAL; 3035 return_error_line = __LINE__; 3036 goto err_bad_offset; 3037 } 3038 3039 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 3040 off_min = *offp + object_size; 3041 switch (hdr->type) { 3042 case BINDER_TYPE_BINDER: 3043 case BINDER_TYPE_WEAK_BINDER: { 3044 struct flat_binder_object *fp; 3045 3046 fp = to_flat_binder_object(hdr); 3047 ret = binder_translate_binder(fp, t, thread); 3048 if (ret < 0) { 3049 return_error = BR_FAILED_REPLY; 3050 return_error_param = ret; 3051 return_error_line = __LINE__; 3052 goto err_translate_failed; 3053 } 3054 } break; 3055 case BINDER_TYPE_HANDLE: 3056 case BINDER_TYPE_WEAK_HANDLE: { 3057 struct flat_binder_object *fp; 3058 3059 fp = to_flat_binder_object(hdr); 3060 ret = binder_translate_handle(fp, t, thread); 3061 if (ret < 0) { 3062 return_error = BR_FAILED_REPLY; 3063 return_error_param = ret; 3064 return_error_line = __LINE__; 3065 goto err_translate_failed; 3066 } 3067 } break; 3068 3069 case BINDER_TYPE_FD: { 3070 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3071 int ret = binder_translate_fd(&fp->fd, t, thread, 3072 in_reply_to); 3073 3074 if (ret < 0) { 3075 return_error = BR_FAILED_REPLY; 3076 return_error_param = ret; 3077 return_error_line = __LINE__; 3078 goto err_translate_failed; 3079 } 3080 fp->pad_binder = 0; 3081 } break; 3082 case BINDER_TYPE_FDA: { 3083 struct binder_fd_array_object *fda = 3084 to_binder_fd_array_object(hdr); 3085 struct binder_buffer_object *parent = 3086 binder_validate_ptr(t->buffer, fda->parent, 3087 off_start, 3088 offp - off_start); 3089 if (!parent) { 3090 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3091 proc->pid, thread->pid); 3092 return_error = BR_FAILED_REPLY; 3093 return_error_param = -EINVAL; 3094 return_error_line = __LINE__; 3095 goto err_bad_parent; 3096 } 3097 if (!binder_validate_fixup(t->buffer, off_start, 3098 parent, fda->parent_offset, 3099 last_fixup_obj, 3100 last_fixup_min_off)) { 3101 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3102 proc->pid, thread->pid); 3103 return_error = BR_FAILED_REPLY; 3104 return_error_param = -EINVAL; 3105 return_error_line = __LINE__; 3106 goto err_bad_parent; 3107 } 3108 ret = binder_translate_fd_array(fda, parent, t, thread, 3109 in_reply_to); 3110 if (ret < 0) { 3111 return_error = BR_FAILED_REPLY; 3112 return_error_param = ret; 3113 return_error_line = __LINE__; 3114 goto err_translate_failed; 3115 } 3116 last_fixup_obj = parent; 3117 last_fixup_min_off = 3118 fda->parent_offset + sizeof(u32) * fda->num_fds; 3119 } break; 3120 case BINDER_TYPE_PTR: { 3121 struct binder_buffer_object *bp = 3122 to_binder_buffer_object(hdr); 3123 size_t buf_left = sg_buf_end - sg_bufp; 3124 3125 if (bp->length > buf_left) { 3126 binder_user_error("%d:%d got transaction with too large buffer\n", 3127 proc->pid, thread->pid); 3128 return_error = BR_FAILED_REPLY; 3129 return_error_param = -EINVAL; 3130 return_error_line = __LINE__; 3131 goto err_bad_offset; 3132 } 3133 if (copy_from_user(sg_bufp, 3134 (const void __user *)(uintptr_t) 3135 bp->buffer, bp->length)) { 3136 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3137 proc->pid, thread->pid); 3138 return_error_param = -EFAULT; 3139 return_error = BR_FAILED_REPLY; 3140 return_error_line = __LINE__; 3141 goto err_copy_data_failed; 3142 } 3143 /* Fixup buffer pointer to target proc address space */ 3144 bp->buffer = (uintptr_t)sg_bufp + 3145 binder_alloc_get_user_buffer_offset( 3146 &target_proc->alloc); 3147 sg_bufp += ALIGN(bp->length, sizeof(u64)); 3148 3149 ret = binder_fixup_parent(t, thread, bp, off_start, 3150 offp - off_start, 3151 last_fixup_obj, 3152 last_fixup_min_off); 3153 if (ret < 0) { 3154 return_error = BR_FAILED_REPLY; 3155 return_error_param = ret; 3156 return_error_line = __LINE__; 3157 goto err_translate_failed; 3158 } 3159 last_fixup_obj = bp; 3160 last_fixup_min_off = 0; 3161 } break; 3162 default: 3163 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3164 proc->pid, thread->pid, hdr->type); 3165 return_error = BR_FAILED_REPLY; 3166 return_error_param = -EINVAL; 3167 return_error_line = __LINE__; 3168 goto err_bad_object_type; 3169 } 3170 } 3171 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3172 t->work.type = BINDER_WORK_TRANSACTION; 3173 3174 if (reply) { 3175 binder_enqueue_thread_work(thread, tcomplete); 3176 binder_inner_proc_lock(target_proc); 3177 if (target_thread->is_dead) { 3178 binder_inner_proc_unlock(target_proc); 3179 goto err_dead_proc_or_thread; 3180 } 3181 BUG_ON(t->buffer->async_transaction != 0); 3182 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3183 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3184 binder_inner_proc_unlock(target_proc); 3185 wake_up_interruptible_sync(&target_thread->wait); 3186 binder_free_transaction(in_reply_to); 3187 } else if (!(t->flags & TF_ONE_WAY)) { 3188 BUG_ON(t->buffer->async_transaction != 0); 3189 binder_inner_proc_lock(proc); 3190 /* 3191 * Defer the TRANSACTION_COMPLETE, so we don't return to 3192 * userspace immediately; this allows the target process to 3193 * immediately start processing this transaction, reducing 3194 * latency. We will then return the TRANSACTION_COMPLETE when 3195 * the target replies (or there is an error). 3196 */ 3197 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3198 t->need_reply = 1; 3199 t->from_parent = thread->transaction_stack; 3200 thread->transaction_stack = t; 3201 binder_inner_proc_unlock(proc); 3202 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3203 binder_inner_proc_lock(proc); 3204 binder_pop_transaction_ilocked(thread, t); 3205 binder_inner_proc_unlock(proc); 3206 goto err_dead_proc_or_thread; 3207 } 3208 } else { 3209 BUG_ON(target_node == NULL); 3210 BUG_ON(t->buffer->async_transaction != 1); 3211 binder_enqueue_thread_work(thread, tcomplete); 3212 if (!binder_proc_transaction(t, target_proc, NULL)) 3213 goto err_dead_proc_or_thread; 3214 } 3215 if (target_thread) 3216 binder_thread_dec_tmpref(target_thread); 3217 binder_proc_dec_tmpref(target_proc); 3218 if (target_node) 3219 binder_dec_node_tmpref(target_node); 3220 /* 3221 * write barrier to synchronize with initialization 3222 * of log entry 3223 */ 3224 smp_wmb(); 3225 WRITE_ONCE(e->debug_id_done, t_debug_id); 3226 return; 3227 3228 err_dead_proc_or_thread: 3229 return_error = BR_DEAD_REPLY; 3230 return_error_line = __LINE__; 3231 binder_dequeue_work(proc, tcomplete); 3232 err_translate_failed: 3233 err_bad_object_type: 3234 err_bad_offset: 3235 err_bad_parent: 3236 err_copy_data_failed: 3237 binder_free_txn_fixups(t); 3238 trace_binder_transaction_failed_buffer_release(t->buffer); 3239 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3240 if (target_node) 3241 binder_dec_node_tmpref(target_node); 3242 target_node = NULL; 3243 t->buffer->transaction = NULL; 3244 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3245 err_binder_alloc_buf_failed: 3246 kfree(tcomplete); 3247 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3248 err_alloc_tcomplete_failed: 3249 kfree(t); 3250 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3251 err_alloc_t_failed: 3252 err_bad_todo_list: 3253 err_bad_call_stack: 3254 err_empty_call_stack: 3255 err_dead_binder: 3256 err_invalid_target_handle: 3257 if (target_thread) 3258 binder_thread_dec_tmpref(target_thread); 3259 if (target_proc) 3260 binder_proc_dec_tmpref(target_proc); 3261 if (target_node) { 3262 binder_dec_node(target_node, 1, 0); 3263 binder_dec_node_tmpref(target_node); 3264 } 3265 3266 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3267 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3268 proc->pid, thread->pid, return_error, return_error_param, 3269 (u64)tr->data_size, (u64)tr->offsets_size, 3270 return_error_line); 3271 3272 { 3273 struct binder_transaction_log_entry *fe; 3274 3275 e->return_error = return_error; 3276 e->return_error_param = return_error_param; 3277 e->return_error_line = return_error_line; 3278 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3279 *fe = *e; 3280 /* 3281 * write barrier to synchronize with initialization 3282 * of log entry 3283 */ 3284 smp_wmb(); 3285 WRITE_ONCE(e->debug_id_done, t_debug_id); 3286 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3287 } 3288 3289 BUG_ON(thread->return_error.cmd != BR_OK); 3290 if (in_reply_to) { 3291 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3292 binder_enqueue_thread_work(thread, &thread->return_error.work); 3293 binder_send_failed_reply(in_reply_to, return_error); 3294 } else { 3295 thread->return_error.cmd = return_error; 3296 binder_enqueue_thread_work(thread, &thread->return_error.work); 3297 } 3298 } 3299 3300 /** 3301 * binder_free_buf() - free the specified buffer 3302 * @proc: binder proc that owns buffer 3303 * @buffer: buffer to be freed 3304 * 3305 * If buffer for an async transaction, enqueue the next async 3306 * transaction from the node. 3307 * 3308 * Cleanup buffer and free it. 3309 */ 3310 static void 3311 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) 3312 { 3313 if (buffer->transaction) { 3314 buffer->transaction->buffer = NULL; 3315 buffer->transaction = NULL; 3316 } 3317 if (buffer->async_transaction && buffer->target_node) { 3318 struct binder_node *buf_node; 3319 struct binder_work *w; 3320 3321 buf_node = buffer->target_node; 3322 binder_node_inner_lock(buf_node); 3323 BUG_ON(!buf_node->has_async_transaction); 3324 BUG_ON(buf_node->proc != proc); 3325 w = binder_dequeue_work_head_ilocked( 3326 &buf_node->async_todo); 3327 if (!w) { 3328 buf_node->has_async_transaction = false; 3329 } else { 3330 binder_enqueue_work_ilocked( 3331 w, &proc->todo); 3332 binder_wakeup_proc_ilocked(proc); 3333 } 3334 binder_node_inner_unlock(buf_node); 3335 } 3336 trace_binder_transaction_buffer_release(buffer); 3337 binder_transaction_buffer_release(proc, buffer, NULL); 3338 binder_alloc_free_buf(&proc->alloc, buffer); 3339 } 3340 3341 static int binder_thread_write(struct binder_proc *proc, 3342 struct binder_thread *thread, 3343 binder_uintptr_t binder_buffer, size_t size, 3344 binder_size_t *consumed) 3345 { 3346 uint32_t cmd; 3347 struct binder_context *context = proc->context; 3348 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3349 void __user *ptr = buffer + *consumed; 3350 void __user *end = buffer + size; 3351 3352 while (ptr < end && thread->return_error.cmd == BR_OK) { 3353 int ret; 3354 3355 if (get_user(cmd, (uint32_t __user *)ptr)) 3356 return -EFAULT; 3357 ptr += sizeof(uint32_t); 3358 trace_binder_command(cmd); 3359 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3360 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3361 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3362 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3363 } 3364 switch (cmd) { 3365 case BC_INCREFS: 3366 case BC_ACQUIRE: 3367 case BC_RELEASE: 3368 case BC_DECREFS: { 3369 uint32_t target; 3370 const char *debug_string; 3371 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3372 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3373 struct binder_ref_data rdata; 3374 3375 if (get_user(target, (uint32_t __user *)ptr)) 3376 return -EFAULT; 3377 3378 ptr += sizeof(uint32_t); 3379 ret = -1; 3380 if (increment && !target) { 3381 struct binder_node *ctx_mgr_node; 3382 mutex_lock(&context->context_mgr_node_lock); 3383 ctx_mgr_node = context->binder_context_mgr_node; 3384 if (ctx_mgr_node) 3385 ret = binder_inc_ref_for_node( 3386 proc, ctx_mgr_node, 3387 strong, NULL, &rdata); 3388 mutex_unlock(&context->context_mgr_node_lock); 3389 } 3390 if (ret) 3391 ret = binder_update_ref_for_handle( 3392 proc, target, increment, strong, 3393 &rdata); 3394 if (!ret && rdata.desc != target) { 3395 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3396 proc->pid, thread->pid, 3397 target, rdata.desc); 3398 } 3399 switch (cmd) { 3400 case BC_INCREFS: 3401 debug_string = "IncRefs"; 3402 break; 3403 case BC_ACQUIRE: 3404 debug_string = "Acquire"; 3405 break; 3406 case BC_RELEASE: 3407 debug_string = "Release"; 3408 break; 3409 case BC_DECREFS: 3410 default: 3411 debug_string = "DecRefs"; 3412 break; 3413 } 3414 if (ret) { 3415 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3416 proc->pid, thread->pid, debug_string, 3417 strong, target, ret); 3418 break; 3419 } 3420 binder_debug(BINDER_DEBUG_USER_REFS, 3421 "%d:%d %s ref %d desc %d s %d w %d\n", 3422 proc->pid, thread->pid, debug_string, 3423 rdata.debug_id, rdata.desc, rdata.strong, 3424 rdata.weak); 3425 break; 3426 } 3427 case BC_INCREFS_DONE: 3428 case BC_ACQUIRE_DONE: { 3429 binder_uintptr_t node_ptr; 3430 binder_uintptr_t cookie; 3431 struct binder_node *node; 3432 bool free_node; 3433 3434 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3435 return -EFAULT; 3436 ptr += sizeof(binder_uintptr_t); 3437 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3438 return -EFAULT; 3439 ptr += sizeof(binder_uintptr_t); 3440 node = binder_get_node(proc, node_ptr); 3441 if (node == NULL) { 3442 binder_user_error("%d:%d %s u%016llx no match\n", 3443 proc->pid, thread->pid, 3444 cmd == BC_INCREFS_DONE ? 3445 "BC_INCREFS_DONE" : 3446 "BC_ACQUIRE_DONE", 3447 (u64)node_ptr); 3448 break; 3449 } 3450 if (cookie != node->cookie) { 3451 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3452 proc->pid, thread->pid, 3453 cmd == BC_INCREFS_DONE ? 3454 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3455 (u64)node_ptr, node->debug_id, 3456 (u64)cookie, (u64)node->cookie); 3457 binder_put_node(node); 3458 break; 3459 } 3460 binder_node_inner_lock(node); 3461 if (cmd == BC_ACQUIRE_DONE) { 3462 if (node->pending_strong_ref == 0) { 3463 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3464 proc->pid, thread->pid, 3465 node->debug_id); 3466 binder_node_inner_unlock(node); 3467 binder_put_node(node); 3468 break; 3469 } 3470 node->pending_strong_ref = 0; 3471 } else { 3472 if (node->pending_weak_ref == 0) { 3473 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3474 proc->pid, thread->pid, 3475 node->debug_id); 3476 binder_node_inner_unlock(node); 3477 binder_put_node(node); 3478 break; 3479 } 3480 node->pending_weak_ref = 0; 3481 } 3482 free_node = binder_dec_node_nilocked(node, 3483 cmd == BC_ACQUIRE_DONE, 0); 3484 WARN_ON(free_node); 3485 binder_debug(BINDER_DEBUG_USER_REFS, 3486 "%d:%d %s node %d ls %d lw %d tr %d\n", 3487 proc->pid, thread->pid, 3488 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3489 node->debug_id, node->local_strong_refs, 3490 node->local_weak_refs, node->tmp_refs); 3491 binder_node_inner_unlock(node); 3492 binder_put_node(node); 3493 break; 3494 } 3495 case BC_ATTEMPT_ACQUIRE: 3496 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3497 return -EINVAL; 3498 case BC_ACQUIRE_RESULT: 3499 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3500 return -EINVAL; 3501 3502 case BC_FREE_BUFFER: { 3503 binder_uintptr_t data_ptr; 3504 struct binder_buffer *buffer; 3505 3506 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3507 return -EFAULT; 3508 ptr += sizeof(binder_uintptr_t); 3509 3510 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3511 data_ptr); 3512 if (IS_ERR_OR_NULL(buffer)) { 3513 if (PTR_ERR(buffer) == -EPERM) { 3514 binder_user_error( 3515 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3516 proc->pid, thread->pid, 3517 (u64)data_ptr); 3518 } else { 3519 binder_user_error( 3520 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3521 proc->pid, thread->pid, 3522 (u64)data_ptr); 3523 } 3524 break; 3525 } 3526 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3527 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3528 proc->pid, thread->pid, (u64)data_ptr, 3529 buffer->debug_id, 3530 buffer->transaction ? "active" : "finished"); 3531 binder_free_buf(proc, buffer); 3532 break; 3533 } 3534 3535 case BC_TRANSACTION_SG: 3536 case BC_REPLY_SG: { 3537 struct binder_transaction_data_sg tr; 3538 3539 if (copy_from_user(&tr, ptr, sizeof(tr))) 3540 return -EFAULT; 3541 ptr += sizeof(tr); 3542 binder_transaction(proc, thread, &tr.transaction_data, 3543 cmd == BC_REPLY_SG, tr.buffers_size); 3544 break; 3545 } 3546 case BC_TRANSACTION: 3547 case BC_REPLY: { 3548 struct binder_transaction_data tr; 3549 3550 if (copy_from_user(&tr, ptr, sizeof(tr))) 3551 return -EFAULT; 3552 ptr += sizeof(tr); 3553 binder_transaction(proc, thread, &tr, 3554 cmd == BC_REPLY, 0); 3555 break; 3556 } 3557 3558 case BC_REGISTER_LOOPER: 3559 binder_debug(BINDER_DEBUG_THREADS, 3560 "%d:%d BC_REGISTER_LOOPER\n", 3561 proc->pid, thread->pid); 3562 binder_inner_proc_lock(proc); 3563 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3564 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3565 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3566 proc->pid, thread->pid); 3567 } else if (proc->requested_threads == 0) { 3568 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3569 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3570 proc->pid, thread->pid); 3571 } else { 3572 proc->requested_threads--; 3573 proc->requested_threads_started++; 3574 } 3575 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3576 binder_inner_proc_unlock(proc); 3577 break; 3578 case BC_ENTER_LOOPER: 3579 binder_debug(BINDER_DEBUG_THREADS, 3580 "%d:%d BC_ENTER_LOOPER\n", 3581 proc->pid, thread->pid); 3582 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3583 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3584 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3585 proc->pid, thread->pid); 3586 } 3587 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3588 break; 3589 case BC_EXIT_LOOPER: 3590 binder_debug(BINDER_DEBUG_THREADS, 3591 "%d:%d BC_EXIT_LOOPER\n", 3592 proc->pid, thread->pid); 3593 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3594 break; 3595 3596 case BC_REQUEST_DEATH_NOTIFICATION: 3597 case BC_CLEAR_DEATH_NOTIFICATION: { 3598 uint32_t target; 3599 binder_uintptr_t cookie; 3600 struct binder_ref *ref; 3601 struct binder_ref_death *death = NULL; 3602 3603 if (get_user(target, (uint32_t __user *)ptr)) 3604 return -EFAULT; 3605 ptr += sizeof(uint32_t); 3606 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3607 return -EFAULT; 3608 ptr += sizeof(binder_uintptr_t); 3609 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3610 /* 3611 * Allocate memory for death notification 3612 * before taking lock 3613 */ 3614 death = kzalloc(sizeof(*death), GFP_KERNEL); 3615 if (death == NULL) { 3616 WARN_ON(thread->return_error.cmd != 3617 BR_OK); 3618 thread->return_error.cmd = BR_ERROR; 3619 binder_enqueue_thread_work( 3620 thread, 3621 &thread->return_error.work); 3622 binder_debug( 3623 BINDER_DEBUG_FAILED_TRANSACTION, 3624 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3625 proc->pid, thread->pid); 3626 break; 3627 } 3628 } 3629 binder_proc_lock(proc); 3630 ref = binder_get_ref_olocked(proc, target, false); 3631 if (ref == NULL) { 3632 binder_user_error("%d:%d %s invalid ref %d\n", 3633 proc->pid, thread->pid, 3634 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3635 "BC_REQUEST_DEATH_NOTIFICATION" : 3636 "BC_CLEAR_DEATH_NOTIFICATION", 3637 target); 3638 binder_proc_unlock(proc); 3639 kfree(death); 3640 break; 3641 } 3642 3643 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3644 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3645 proc->pid, thread->pid, 3646 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3647 "BC_REQUEST_DEATH_NOTIFICATION" : 3648 "BC_CLEAR_DEATH_NOTIFICATION", 3649 (u64)cookie, ref->data.debug_id, 3650 ref->data.desc, ref->data.strong, 3651 ref->data.weak, ref->node->debug_id); 3652 3653 binder_node_lock(ref->node); 3654 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3655 if (ref->death) { 3656 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3657 proc->pid, thread->pid); 3658 binder_node_unlock(ref->node); 3659 binder_proc_unlock(proc); 3660 kfree(death); 3661 break; 3662 } 3663 binder_stats_created(BINDER_STAT_DEATH); 3664 INIT_LIST_HEAD(&death->work.entry); 3665 death->cookie = cookie; 3666 ref->death = death; 3667 if (ref->node->proc == NULL) { 3668 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3669 3670 binder_inner_proc_lock(proc); 3671 binder_enqueue_work_ilocked( 3672 &ref->death->work, &proc->todo); 3673 binder_wakeup_proc_ilocked(proc); 3674 binder_inner_proc_unlock(proc); 3675 } 3676 } else { 3677 if (ref->death == NULL) { 3678 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3679 proc->pid, thread->pid); 3680 binder_node_unlock(ref->node); 3681 binder_proc_unlock(proc); 3682 break; 3683 } 3684 death = ref->death; 3685 if (death->cookie != cookie) { 3686 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3687 proc->pid, thread->pid, 3688 (u64)death->cookie, 3689 (u64)cookie); 3690 binder_node_unlock(ref->node); 3691 binder_proc_unlock(proc); 3692 break; 3693 } 3694 ref->death = NULL; 3695 binder_inner_proc_lock(proc); 3696 if (list_empty(&death->work.entry)) { 3697 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3698 if (thread->looper & 3699 (BINDER_LOOPER_STATE_REGISTERED | 3700 BINDER_LOOPER_STATE_ENTERED)) 3701 binder_enqueue_thread_work_ilocked( 3702 thread, 3703 &death->work); 3704 else { 3705 binder_enqueue_work_ilocked( 3706 &death->work, 3707 &proc->todo); 3708 binder_wakeup_proc_ilocked( 3709 proc); 3710 } 3711 } else { 3712 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3713 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3714 } 3715 binder_inner_proc_unlock(proc); 3716 } 3717 binder_node_unlock(ref->node); 3718 binder_proc_unlock(proc); 3719 } break; 3720 case BC_DEAD_BINDER_DONE: { 3721 struct binder_work *w; 3722 binder_uintptr_t cookie; 3723 struct binder_ref_death *death = NULL; 3724 3725 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3726 return -EFAULT; 3727 3728 ptr += sizeof(cookie); 3729 binder_inner_proc_lock(proc); 3730 list_for_each_entry(w, &proc->delivered_death, 3731 entry) { 3732 struct binder_ref_death *tmp_death = 3733 container_of(w, 3734 struct binder_ref_death, 3735 work); 3736 3737 if (tmp_death->cookie == cookie) { 3738 death = tmp_death; 3739 break; 3740 } 3741 } 3742 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3743 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 3744 proc->pid, thread->pid, (u64)cookie, 3745 death); 3746 if (death == NULL) { 3747 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3748 proc->pid, thread->pid, (u64)cookie); 3749 binder_inner_proc_unlock(proc); 3750 break; 3751 } 3752 binder_dequeue_work_ilocked(&death->work); 3753 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3754 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3755 if (thread->looper & 3756 (BINDER_LOOPER_STATE_REGISTERED | 3757 BINDER_LOOPER_STATE_ENTERED)) 3758 binder_enqueue_thread_work_ilocked( 3759 thread, &death->work); 3760 else { 3761 binder_enqueue_work_ilocked( 3762 &death->work, 3763 &proc->todo); 3764 binder_wakeup_proc_ilocked(proc); 3765 } 3766 } 3767 binder_inner_proc_unlock(proc); 3768 } break; 3769 3770 default: 3771 pr_err("%d:%d unknown command %d\n", 3772 proc->pid, thread->pid, cmd); 3773 return -EINVAL; 3774 } 3775 *consumed = ptr - buffer; 3776 } 3777 return 0; 3778 } 3779 3780 static void binder_stat_br(struct binder_proc *proc, 3781 struct binder_thread *thread, uint32_t cmd) 3782 { 3783 trace_binder_return(cmd); 3784 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 3785 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 3786 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 3787 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 3788 } 3789 } 3790 3791 static int binder_put_node_cmd(struct binder_proc *proc, 3792 struct binder_thread *thread, 3793 void __user **ptrp, 3794 binder_uintptr_t node_ptr, 3795 binder_uintptr_t node_cookie, 3796 int node_debug_id, 3797 uint32_t cmd, const char *cmd_name) 3798 { 3799 void __user *ptr = *ptrp; 3800 3801 if (put_user(cmd, (uint32_t __user *)ptr)) 3802 return -EFAULT; 3803 ptr += sizeof(uint32_t); 3804 3805 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3806 return -EFAULT; 3807 ptr += sizeof(binder_uintptr_t); 3808 3809 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 3810 return -EFAULT; 3811 ptr += sizeof(binder_uintptr_t); 3812 3813 binder_stat_br(proc, thread, cmd); 3814 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 3815 proc->pid, thread->pid, cmd_name, node_debug_id, 3816 (u64)node_ptr, (u64)node_cookie); 3817 3818 *ptrp = ptr; 3819 return 0; 3820 } 3821 3822 static int binder_wait_for_work(struct binder_thread *thread, 3823 bool do_proc_work) 3824 { 3825 DEFINE_WAIT(wait); 3826 struct binder_proc *proc = thread->proc; 3827 int ret = 0; 3828 3829 freezer_do_not_count(); 3830 binder_inner_proc_lock(proc); 3831 for (;;) { 3832 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 3833 if (binder_has_work_ilocked(thread, do_proc_work)) 3834 break; 3835 if (do_proc_work) 3836 list_add(&thread->waiting_thread_node, 3837 &proc->waiting_threads); 3838 binder_inner_proc_unlock(proc); 3839 schedule(); 3840 binder_inner_proc_lock(proc); 3841 list_del_init(&thread->waiting_thread_node); 3842 if (signal_pending(current)) { 3843 ret = -ERESTARTSYS; 3844 break; 3845 } 3846 } 3847 finish_wait(&thread->wait, &wait); 3848 binder_inner_proc_unlock(proc); 3849 freezer_count(); 3850 3851 return ret; 3852 } 3853 3854 /** 3855 * binder_apply_fd_fixups() - finish fd translation 3856 * @t: binder transaction with list of fd fixups 3857 * 3858 * Now that we are in the context of the transaction target 3859 * process, we can allocate and install fds. Process the 3860 * list of fds to translate and fixup the buffer with the 3861 * new fds. 3862 * 3863 * If we fail to allocate an fd, then free the resources by 3864 * fput'ing files that have not been processed and ksys_close'ing 3865 * any fds that have already been allocated. 3866 */ 3867 static int binder_apply_fd_fixups(struct binder_transaction *t) 3868 { 3869 struct binder_txn_fd_fixup *fixup, *tmp; 3870 int ret = 0; 3871 3872 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 3873 int fd = get_unused_fd_flags(O_CLOEXEC); 3874 u32 *fdp; 3875 3876 if (fd < 0) { 3877 binder_debug(BINDER_DEBUG_TRANSACTION, 3878 "failed fd fixup txn %d fd %d\n", 3879 t->debug_id, fd); 3880 ret = -ENOMEM; 3881 break; 3882 } 3883 binder_debug(BINDER_DEBUG_TRANSACTION, 3884 "fd fixup txn %d fd %d\n", 3885 t->debug_id, fd); 3886 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 3887 fd_install(fd, fixup->file); 3888 fixup->file = NULL; 3889 fdp = (u32 *)(t->buffer->data + fixup->offset); 3890 /* 3891 * This store can cause problems for CPUs with a 3892 * VIVT cache (eg ARMv5) since the cache cannot 3893 * detect virtual aliases to the same physical cacheline. 3894 * To support VIVT, this address and the user-space VA 3895 * would both need to be flushed. Since this kernel 3896 * VA is not constructed via page_to_virt(), we can't 3897 * use flush_dcache_page() on it, so we'd have to use 3898 * an internal function. If devices with VIVT ever 3899 * need to run Android, we'll either need to go back 3900 * to patching the translated fd from the sender side 3901 * (using the non-standard kernel functions), or rework 3902 * how the kernel uses the buffer to use page_to_virt() 3903 * addresses instead of allocating in our own vm area. 3904 * 3905 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT. 3906 */ 3907 *fdp = fd; 3908 } 3909 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 3910 if (fixup->file) { 3911 fput(fixup->file); 3912 } else if (ret) { 3913 u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); 3914 3915 ksys_close(*fdp); 3916 } 3917 list_del(&fixup->fixup_entry); 3918 kfree(fixup); 3919 } 3920 3921 return ret; 3922 } 3923 3924 static int binder_thread_read(struct binder_proc *proc, 3925 struct binder_thread *thread, 3926 binder_uintptr_t binder_buffer, size_t size, 3927 binder_size_t *consumed, int non_block) 3928 { 3929 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3930 void __user *ptr = buffer + *consumed; 3931 void __user *end = buffer + size; 3932 3933 int ret = 0; 3934 int wait_for_proc_work; 3935 3936 if (*consumed == 0) { 3937 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 3938 return -EFAULT; 3939 ptr += sizeof(uint32_t); 3940 } 3941 3942 retry: 3943 binder_inner_proc_lock(proc); 3944 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 3945 binder_inner_proc_unlock(proc); 3946 3947 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3948 3949 trace_binder_wait_for_work(wait_for_proc_work, 3950 !!thread->transaction_stack, 3951 !binder_worklist_empty(proc, &thread->todo)); 3952 if (wait_for_proc_work) { 3953 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3954 BINDER_LOOPER_STATE_ENTERED))) { 3955 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 3956 proc->pid, thread->pid, thread->looper); 3957 wait_event_interruptible(binder_user_error_wait, 3958 binder_stop_on_user_error < 2); 3959 } 3960 binder_set_nice(proc->default_priority); 3961 } 3962 3963 if (non_block) { 3964 if (!binder_has_work(thread, wait_for_proc_work)) 3965 ret = -EAGAIN; 3966 } else { 3967 ret = binder_wait_for_work(thread, wait_for_proc_work); 3968 } 3969 3970 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3971 3972 if (ret) 3973 return ret; 3974 3975 while (1) { 3976 uint32_t cmd; 3977 struct binder_transaction_data tr; 3978 struct binder_work *w = NULL; 3979 struct list_head *list = NULL; 3980 struct binder_transaction *t = NULL; 3981 struct binder_thread *t_from; 3982 3983 binder_inner_proc_lock(proc); 3984 if (!binder_worklist_empty_ilocked(&thread->todo)) 3985 list = &thread->todo; 3986 else if (!binder_worklist_empty_ilocked(&proc->todo) && 3987 wait_for_proc_work) 3988 list = &proc->todo; 3989 else { 3990 binder_inner_proc_unlock(proc); 3991 3992 /* no data added */ 3993 if (ptr - buffer == 4 && !thread->looper_need_return) 3994 goto retry; 3995 break; 3996 } 3997 3998 if (end - ptr < sizeof(tr) + 4) { 3999 binder_inner_proc_unlock(proc); 4000 break; 4001 } 4002 w = binder_dequeue_work_head_ilocked(list); 4003 if (binder_worklist_empty_ilocked(&thread->todo)) 4004 thread->process_todo = false; 4005 4006 switch (w->type) { 4007 case BINDER_WORK_TRANSACTION: { 4008 binder_inner_proc_unlock(proc); 4009 t = container_of(w, struct binder_transaction, work); 4010 } break; 4011 case BINDER_WORK_RETURN_ERROR: { 4012 struct binder_error *e = container_of( 4013 w, struct binder_error, work); 4014 4015 WARN_ON(e->cmd == BR_OK); 4016 binder_inner_proc_unlock(proc); 4017 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4018 return -EFAULT; 4019 cmd = e->cmd; 4020 e->cmd = BR_OK; 4021 ptr += sizeof(uint32_t); 4022 4023 binder_stat_br(proc, thread, cmd); 4024 } break; 4025 case BINDER_WORK_TRANSACTION_COMPLETE: { 4026 binder_inner_proc_unlock(proc); 4027 cmd = BR_TRANSACTION_COMPLETE; 4028 if (put_user(cmd, (uint32_t __user *)ptr)) 4029 return -EFAULT; 4030 ptr += sizeof(uint32_t); 4031 4032 binder_stat_br(proc, thread, cmd); 4033 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4034 "%d:%d BR_TRANSACTION_COMPLETE\n", 4035 proc->pid, thread->pid); 4036 kfree(w); 4037 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4038 } break; 4039 case BINDER_WORK_NODE: { 4040 struct binder_node *node = container_of(w, struct binder_node, work); 4041 int strong, weak; 4042 binder_uintptr_t node_ptr = node->ptr; 4043 binder_uintptr_t node_cookie = node->cookie; 4044 int node_debug_id = node->debug_id; 4045 int has_weak_ref; 4046 int has_strong_ref; 4047 void __user *orig_ptr = ptr; 4048 4049 BUG_ON(proc != node->proc); 4050 strong = node->internal_strong_refs || 4051 node->local_strong_refs; 4052 weak = !hlist_empty(&node->refs) || 4053 node->local_weak_refs || 4054 node->tmp_refs || strong; 4055 has_strong_ref = node->has_strong_ref; 4056 has_weak_ref = node->has_weak_ref; 4057 4058 if (weak && !has_weak_ref) { 4059 node->has_weak_ref = 1; 4060 node->pending_weak_ref = 1; 4061 node->local_weak_refs++; 4062 } 4063 if (strong && !has_strong_ref) { 4064 node->has_strong_ref = 1; 4065 node->pending_strong_ref = 1; 4066 node->local_strong_refs++; 4067 } 4068 if (!strong && has_strong_ref) 4069 node->has_strong_ref = 0; 4070 if (!weak && has_weak_ref) 4071 node->has_weak_ref = 0; 4072 if (!weak && !strong) { 4073 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4074 "%d:%d node %d u%016llx c%016llx deleted\n", 4075 proc->pid, thread->pid, 4076 node_debug_id, 4077 (u64)node_ptr, 4078 (u64)node_cookie); 4079 rb_erase(&node->rb_node, &proc->nodes); 4080 binder_inner_proc_unlock(proc); 4081 binder_node_lock(node); 4082 /* 4083 * Acquire the node lock before freeing the 4084 * node to serialize with other threads that 4085 * may have been holding the node lock while 4086 * decrementing this node (avoids race where 4087 * this thread frees while the other thread 4088 * is unlocking the node after the final 4089 * decrement) 4090 */ 4091 binder_node_unlock(node); 4092 binder_free_node(node); 4093 } else 4094 binder_inner_proc_unlock(proc); 4095 4096 if (weak && !has_weak_ref) 4097 ret = binder_put_node_cmd( 4098 proc, thread, &ptr, node_ptr, 4099 node_cookie, node_debug_id, 4100 BR_INCREFS, "BR_INCREFS"); 4101 if (!ret && strong && !has_strong_ref) 4102 ret = binder_put_node_cmd( 4103 proc, thread, &ptr, node_ptr, 4104 node_cookie, node_debug_id, 4105 BR_ACQUIRE, "BR_ACQUIRE"); 4106 if (!ret && !strong && has_strong_ref) 4107 ret = binder_put_node_cmd( 4108 proc, thread, &ptr, node_ptr, 4109 node_cookie, node_debug_id, 4110 BR_RELEASE, "BR_RELEASE"); 4111 if (!ret && !weak && has_weak_ref) 4112 ret = binder_put_node_cmd( 4113 proc, thread, &ptr, node_ptr, 4114 node_cookie, node_debug_id, 4115 BR_DECREFS, "BR_DECREFS"); 4116 if (orig_ptr == ptr) 4117 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4118 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4119 proc->pid, thread->pid, 4120 node_debug_id, 4121 (u64)node_ptr, 4122 (u64)node_cookie); 4123 if (ret) 4124 return ret; 4125 } break; 4126 case BINDER_WORK_DEAD_BINDER: 4127 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4128 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4129 struct binder_ref_death *death; 4130 uint32_t cmd; 4131 binder_uintptr_t cookie; 4132 4133 death = container_of(w, struct binder_ref_death, work); 4134 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4135 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4136 else 4137 cmd = BR_DEAD_BINDER; 4138 cookie = death->cookie; 4139 4140 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4141 "%d:%d %s %016llx\n", 4142 proc->pid, thread->pid, 4143 cmd == BR_DEAD_BINDER ? 4144 "BR_DEAD_BINDER" : 4145 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4146 (u64)cookie); 4147 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4148 binder_inner_proc_unlock(proc); 4149 kfree(death); 4150 binder_stats_deleted(BINDER_STAT_DEATH); 4151 } else { 4152 binder_enqueue_work_ilocked( 4153 w, &proc->delivered_death); 4154 binder_inner_proc_unlock(proc); 4155 } 4156 if (put_user(cmd, (uint32_t __user *)ptr)) 4157 return -EFAULT; 4158 ptr += sizeof(uint32_t); 4159 if (put_user(cookie, 4160 (binder_uintptr_t __user *)ptr)) 4161 return -EFAULT; 4162 ptr += sizeof(binder_uintptr_t); 4163 binder_stat_br(proc, thread, cmd); 4164 if (cmd == BR_DEAD_BINDER) 4165 goto done; /* DEAD_BINDER notifications can cause transactions */ 4166 } break; 4167 } 4168 4169 if (!t) 4170 continue; 4171 4172 BUG_ON(t->buffer == NULL); 4173 if (t->buffer->target_node) { 4174 struct binder_node *target_node = t->buffer->target_node; 4175 4176 tr.target.ptr = target_node->ptr; 4177 tr.cookie = target_node->cookie; 4178 t->saved_priority = task_nice(current); 4179 if (t->priority < target_node->min_priority && 4180 !(t->flags & TF_ONE_WAY)) 4181 binder_set_nice(t->priority); 4182 else if (!(t->flags & TF_ONE_WAY) || 4183 t->saved_priority > target_node->min_priority) 4184 binder_set_nice(target_node->min_priority); 4185 cmd = BR_TRANSACTION; 4186 } else { 4187 tr.target.ptr = 0; 4188 tr.cookie = 0; 4189 cmd = BR_REPLY; 4190 } 4191 tr.code = t->code; 4192 tr.flags = t->flags; 4193 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4194 4195 t_from = binder_get_txn_from(t); 4196 if (t_from) { 4197 struct task_struct *sender = t_from->proc->tsk; 4198 4199 tr.sender_pid = task_tgid_nr_ns(sender, 4200 task_active_pid_ns(current)); 4201 } else { 4202 tr.sender_pid = 0; 4203 } 4204 4205 ret = binder_apply_fd_fixups(t); 4206 if (ret) { 4207 struct binder_buffer *buffer = t->buffer; 4208 bool oneway = !!(t->flags & TF_ONE_WAY); 4209 int tid = t->debug_id; 4210 4211 if (t_from) 4212 binder_thread_dec_tmpref(t_from); 4213 buffer->transaction = NULL; 4214 binder_cleanup_transaction(t, "fd fixups failed", 4215 BR_FAILED_REPLY); 4216 binder_free_buf(proc, buffer); 4217 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4218 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4219 proc->pid, thread->pid, 4220 oneway ? "async " : 4221 (cmd == BR_REPLY ? "reply " : ""), 4222 tid, BR_FAILED_REPLY, ret, __LINE__); 4223 if (cmd == BR_REPLY) { 4224 cmd = BR_FAILED_REPLY; 4225 if (put_user(cmd, (uint32_t __user *)ptr)) 4226 return -EFAULT; 4227 ptr += sizeof(uint32_t); 4228 binder_stat_br(proc, thread, cmd); 4229 break; 4230 } 4231 continue; 4232 } 4233 tr.data_size = t->buffer->data_size; 4234 tr.offsets_size = t->buffer->offsets_size; 4235 tr.data.ptr.buffer = (binder_uintptr_t) 4236 ((uintptr_t)t->buffer->data + 4237 binder_alloc_get_user_buffer_offset(&proc->alloc)); 4238 tr.data.ptr.offsets = tr.data.ptr.buffer + 4239 ALIGN(t->buffer->data_size, 4240 sizeof(void *)); 4241 4242 if (put_user(cmd, (uint32_t __user *)ptr)) { 4243 if (t_from) 4244 binder_thread_dec_tmpref(t_from); 4245 4246 binder_cleanup_transaction(t, "put_user failed", 4247 BR_FAILED_REPLY); 4248 4249 return -EFAULT; 4250 } 4251 ptr += sizeof(uint32_t); 4252 if (copy_to_user(ptr, &tr, sizeof(tr))) { 4253 if (t_from) 4254 binder_thread_dec_tmpref(t_from); 4255 4256 binder_cleanup_transaction(t, "copy_to_user failed", 4257 BR_FAILED_REPLY); 4258 4259 return -EFAULT; 4260 } 4261 ptr += sizeof(tr); 4262 4263 trace_binder_transaction_received(t); 4264 binder_stat_br(proc, thread, cmd); 4265 binder_debug(BINDER_DEBUG_TRANSACTION, 4266 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4267 proc->pid, thread->pid, 4268 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4269 "BR_REPLY", 4270 t->debug_id, t_from ? t_from->proc->pid : 0, 4271 t_from ? t_from->pid : 0, cmd, 4272 t->buffer->data_size, t->buffer->offsets_size, 4273 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 4274 4275 if (t_from) 4276 binder_thread_dec_tmpref(t_from); 4277 t->buffer->allow_user_free = 1; 4278 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 4279 binder_inner_proc_lock(thread->proc); 4280 t->to_parent = thread->transaction_stack; 4281 t->to_thread = thread; 4282 thread->transaction_stack = t; 4283 binder_inner_proc_unlock(thread->proc); 4284 } else { 4285 binder_free_transaction(t); 4286 } 4287 break; 4288 } 4289 4290 done: 4291 4292 *consumed = ptr - buffer; 4293 binder_inner_proc_lock(proc); 4294 if (proc->requested_threads == 0 && 4295 list_empty(&thread->proc->waiting_threads) && 4296 proc->requested_threads_started < proc->max_threads && 4297 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4298 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4299 /*spawn a new thread if we leave this out */) { 4300 proc->requested_threads++; 4301 binder_inner_proc_unlock(proc); 4302 binder_debug(BINDER_DEBUG_THREADS, 4303 "%d:%d BR_SPAWN_LOOPER\n", 4304 proc->pid, thread->pid); 4305 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4306 return -EFAULT; 4307 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4308 } else 4309 binder_inner_proc_unlock(proc); 4310 return 0; 4311 } 4312 4313 static void binder_release_work(struct binder_proc *proc, 4314 struct list_head *list) 4315 { 4316 struct binder_work *w; 4317 4318 while (1) { 4319 w = binder_dequeue_work_head(proc, list); 4320 if (!w) 4321 return; 4322 4323 switch (w->type) { 4324 case BINDER_WORK_TRANSACTION: { 4325 struct binder_transaction *t; 4326 4327 t = container_of(w, struct binder_transaction, work); 4328 4329 binder_cleanup_transaction(t, "process died.", 4330 BR_DEAD_REPLY); 4331 } break; 4332 case BINDER_WORK_RETURN_ERROR: { 4333 struct binder_error *e = container_of( 4334 w, struct binder_error, work); 4335 4336 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4337 "undelivered TRANSACTION_ERROR: %u\n", 4338 e->cmd); 4339 } break; 4340 case BINDER_WORK_TRANSACTION_COMPLETE: { 4341 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4342 "undelivered TRANSACTION_COMPLETE\n"); 4343 kfree(w); 4344 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4345 } break; 4346 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4347 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4348 struct binder_ref_death *death; 4349 4350 death = container_of(w, struct binder_ref_death, work); 4351 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4352 "undelivered death notification, %016llx\n", 4353 (u64)death->cookie); 4354 kfree(death); 4355 binder_stats_deleted(BINDER_STAT_DEATH); 4356 } break; 4357 default: 4358 pr_err("unexpected work type, %d, not freed\n", 4359 w->type); 4360 break; 4361 } 4362 } 4363 4364 } 4365 4366 static struct binder_thread *binder_get_thread_ilocked( 4367 struct binder_proc *proc, struct binder_thread *new_thread) 4368 { 4369 struct binder_thread *thread = NULL; 4370 struct rb_node *parent = NULL; 4371 struct rb_node **p = &proc->threads.rb_node; 4372 4373 while (*p) { 4374 parent = *p; 4375 thread = rb_entry(parent, struct binder_thread, rb_node); 4376 4377 if (current->pid < thread->pid) 4378 p = &(*p)->rb_left; 4379 else if (current->pid > thread->pid) 4380 p = &(*p)->rb_right; 4381 else 4382 return thread; 4383 } 4384 if (!new_thread) 4385 return NULL; 4386 thread = new_thread; 4387 binder_stats_created(BINDER_STAT_THREAD); 4388 thread->proc = proc; 4389 thread->pid = current->pid; 4390 atomic_set(&thread->tmp_ref, 0); 4391 init_waitqueue_head(&thread->wait); 4392 INIT_LIST_HEAD(&thread->todo); 4393 rb_link_node(&thread->rb_node, parent, p); 4394 rb_insert_color(&thread->rb_node, &proc->threads); 4395 thread->looper_need_return = true; 4396 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4397 thread->return_error.cmd = BR_OK; 4398 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4399 thread->reply_error.cmd = BR_OK; 4400 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4401 return thread; 4402 } 4403 4404 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4405 { 4406 struct binder_thread *thread; 4407 struct binder_thread *new_thread; 4408 4409 binder_inner_proc_lock(proc); 4410 thread = binder_get_thread_ilocked(proc, NULL); 4411 binder_inner_proc_unlock(proc); 4412 if (!thread) { 4413 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4414 if (new_thread == NULL) 4415 return NULL; 4416 binder_inner_proc_lock(proc); 4417 thread = binder_get_thread_ilocked(proc, new_thread); 4418 binder_inner_proc_unlock(proc); 4419 if (thread != new_thread) 4420 kfree(new_thread); 4421 } 4422 return thread; 4423 } 4424 4425 static void binder_free_proc(struct binder_proc *proc) 4426 { 4427 BUG_ON(!list_empty(&proc->todo)); 4428 BUG_ON(!list_empty(&proc->delivered_death)); 4429 binder_alloc_deferred_release(&proc->alloc); 4430 put_task_struct(proc->tsk); 4431 binder_stats_deleted(BINDER_STAT_PROC); 4432 kfree(proc); 4433 } 4434 4435 static void binder_free_thread(struct binder_thread *thread) 4436 { 4437 BUG_ON(!list_empty(&thread->todo)); 4438 binder_stats_deleted(BINDER_STAT_THREAD); 4439 binder_proc_dec_tmpref(thread->proc); 4440 kfree(thread); 4441 } 4442 4443 static int binder_thread_release(struct binder_proc *proc, 4444 struct binder_thread *thread) 4445 { 4446 struct binder_transaction *t; 4447 struct binder_transaction *send_reply = NULL; 4448 int active_transactions = 0; 4449 struct binder_transaction *last_t = NULL; 4450 4451 binder_inner_proc_lock(thread->proc); 4452 /* 4453 * take a ref on the proc so it survives 4454 * after we remove this thread from proc->threads. 4455 * The corresponding dec is when we actually 4456 * free the thread in binder_free_thread() 4457 */ 4458 proc->tmp_ref++; 4459 /* 4460 * take a ref on this thread to ensure it 4461 * survives while we are releasing it 4462 */ 4463 atomic_inc(&thread->tmp_ref); 4464 rb_erase(&thread->rb_node, &proc->threads); 4465 t = thread->transaction_stack; 4466 if (t) { 4467 spin_lock(&t->lock); 4468 if (t->to_thread == thread) 4469 send_reply = t; 4470 } 4471 thread->is_dead = true; 4472 4473 while (t) { 4474 last_t = t; 4475 active_transactions++; 4476 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4477 "release %d:%d transaction %d %s, still active\n", 4478 proc->pid, thread->pid, 4479 t->debug_id, 4480 (t->to_thread == thread) ? "in" : "out"); 4481 4482 if (t->to_thread == thread) { 4483 t->to_proc = NULL; 4484 t->to_thread = NULL; 4485 if (t->buffer) { 4486 t->buffer->transaction = NULL; 4487 t->buffer = NULL; 4488 } 4489 t = t->to_parent; 4490 } else if (t->from == thread) { 4491 t->from = NULL; 4492 t = t->from_parent; 4493 } else 4494 BUG(); 4495 spin_unlock(&last_t->lock); 4496 if (t) 4497 spin_lock(&t->lock); 4498 } 4499 4500 /* 4501 * If this thread used poll, make sure we remove the waitqueue 4502 * from any epoll data structures holding it with POLLFREE. 4503 * waitqueue_active() is safe to use here because we're holding 4504 * the inner lock. 4505 */ 4506 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4507 waitqueue_active(&thread->wait)) { 4508 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4509 } 4510 4511 binder_inner_proc_unlock(thread->proc); 4512 4513 /* 4514 * This is needed to avoid races between wake_up_poll() above and 4515 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4516 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4517 * lock, so we can be sure it's done after calling synchronize_rcu(). 4518 */ 4519 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4520 synchronize_rcu(); 4521 4522 if (send_reply) 4523 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4524 binder_release_work(proc, &thread->todo); 4525 binder_thread_dec_tmpref(thread); 4526 return active_transactions; 4527 } 4528 4529 static __poll_t binder_poll(struct file *filp, 4530 struct poll_table_struct *wait) 4531 { 4532 struct binder_proc *proc = filp->private_data; 4533 struct binder_thread *thread = NULL; 4534 bool wait_for_proc_work; 4535 4536 thread = binder_get_thread(proc); 4537 if (!thread) 4538 return POLLERR; 4539 4540 binder_inner_proc_lock(thread->proc); 4541 thread->looper |= BINDER_LOOPER_STATE_POLL; 4542 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4543 4544 binder_inner_proc_unlock(thread->proc); 4545 4546 poll_wait(filp, &thread->wait, wait); 4547 4548 if (binder_has_work(thread, wait_for_proc_work)) 4549 return EPOLLIN; 4550 4551 return 0; 4552 } 4553 4554 static int binder_ioctl_write_read(struct file *filp, 4555 unsigned int cmd, unsigned long arg, 4556 struct binder_thread *thread) 4557 { 4558 int ret = 0; 4559 struct binder_proc *proc = filp->private_data; 4560 unsigned int size = _IOC_SIZE(cmd); 4561 void __user *ubuf = (void __user *)arg; 4562 struct binder_write_read bwr; 4563 4564 if (size != sizeof(struct binder_write_read)) { 4565 ret = -EINVAL; 4566 goto out; 4567 } 4568 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4569 ret = -EFAULT; 4570 goto out; 4571 } 4572 binder_debug(BINDER_DEBUG_READ_WRITE, 4573 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4574 proc->pid, thread->pid, 4575 (u64)bwr.write_size, (u64)bwr.write_buffer, 4576 (u64)bwr.read_size, (u64)bwr.read_buffer); 4577 4578 if (bwr.write_size > 0) { 4579 ret = binder_thread_write(proc, thread, 4580 bwr.write_buffer, 4581 bwr.write_size, 4582 &bwr.write_consumed); 4583 trace_binder_write_done(ret); 4584 if (ret < 0) { 4585 bwr.read_consumed = 0; 4586 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4587 ret = -EFAULT; 4588 goto out; 4589 } 4590 } 4591 if (bwr.read_size > 0) { 4592 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4593 bwr.read_size, 4594 &bwr.read_consumed, 4595 filp->f_flags & O_NONBLOCK); 4596 trace_binder_read_done(ret); 4597 binder_inner_proc_lock(proc); 4598 if (!binder_worklist_empty_ilocked(&proc->todo)) 4599 binder_wakeup_proc_ilocked(proc); 4600 binder_inner_proc_unlock(proc); 4601 if (ret < 0) { 4602 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4603 ret = -EFAULT; 4604 goto out; 4605 } 4606 } 4607 binder_debug(BINDER_DEBUG_READ_WRITE, 4608 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4609 proc->pid, thread->pid, 4610 (u64)bwr.write_consumed, (u64)bwr.write_size, 4611 (u64)bwr.read_consumed, (u64)bwr.read_size); 4612 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4613 ret = -EFAULT; 4614 goto out; 4615 } 4616 out: 4617 return ret; 4618 } 4619 4620 static int binder_ioctl_set_ctx_mgr(struct file *filp) 4621 { 4622 int ret = 0; 4623 struct binder_proc *proc = filp->private_data; 4624 struct binder_context *context = proc->context; 4625 struct binder_node *new_node; 4626 kuid_t curr_euid = current_euid(); 4627 4628 mutex_lock(&context->context_mgr_node_lock); 4629 if (context->binder_context_mgr_node) { 4630 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4631 ret = -EBUSY; 4632 goto out; 4633 } 4634 ret = security_binder_set_context_mgr(proc->tsk); 4635 if (ret < 0) 4636 goto out; 4637 if (uid_valid(context->binder_context_mgr_uid)) { 4638 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4639 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4640 from_kuid(&init_user_ns, curr_euid), 4641 from_kuid(&init_user_ns, 4642 context->binder_context_mgr_uid)); 4643 ret = -EPERM; 4644 goto out; 4645 } 4646 } else { 4647 context->binder_context_mgr_uid = curr_euid; 4648 } 4649 new_node = binder_new_node(proc, NULL); 4650 if (!new_node) { 4651 ret = -ENOMEM; 4652 goto out; 4653 } 4654 binder_node_lock(new_node); 4655 new_node->local_weak_refs++; 4656 new_node->local_strong_refs++; 4657 new_node->has_strong_ref = 1; 4658 new_node->has_weak_ref = 1; 4659 context->binder_context_mgr_node = new_node; 4660 binder_node_unlock(new_node); 4661 binder_put_node(new_node); 4662 out: 4663 mutex_unlock(&context->context_mgr_node_lock); 4664 return ret; 4665 } 4666 4667 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 4668 struct binder_node_info_for_ref *info) 4669 { 4670 struct binder_node *node; 4671 struct binder_context *context = proc->context; 4672 __u32 handle = info->handle; 4673 4674 if (info->strong_count || info->weak_count || info->reserved1 || 4675 info->reserved2 || info->reserved3) { 4676 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 4677 proc->pid); 4678 return -EINVAL; 4679 } 4680 4681 /* This ioctl may only be used by the context manager */ 4682 mutex_lock(&context->context_mgr_node_lock); 4683 if (!context->binder_context_mgr_node || 4684 context->binder_context_mgr_node->proc != proc) { 4685 mutex_unlock(&context->context_mgr_node_lock); 4686 return -EPERM; 4687 } 4688 mutex_unlock(&context->context_mgr_node_lock); 4689 4690 node = binder_get_node_from_ref(proc, handle, true, NULL); 4691 if (!node) 4692 return -EINVAL; 4693 4694 info->strong_count = node->local_strong_refs + 4695 node->internal_strong_refs; 4696 info->weak_count = node->local_weak_refs; 4697 4698 binder_put_node(node); 4699 4700 return 0; 4701 } 4702 4703 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4704 struct binder_node_debug_info *info) 4705 { 4706 struct rb_node *n; 4707 binder_uintptr_t ptr = info->ptr; 4708 4709 memset(info, 0, sizeof(*info)); 4710 4711 binder_inner_proc_lock(proc); 4712 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4713 struct binder_node *node = rb_entry(n, struct binder_node, 4714 rb_node); 4715 if (node->ptr > ptr) { 4716 info->ptr = node->ptr; 4717 info->cookie = node->cookie; 4718 info->has_strong_ref = node->has_strong_ref; 4719 info->has_weak_ref = node->has_weak_ref; 4720 break; 4721 } 4722 } 4723 binder_inner_proc_unlock(proc); 4724 4725 return 0; 4726 } 4727 4728 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4729 { 4730 int ret; 4731 struct binder_proc *proc = filp->private_data; 4732 struct binder_thread *thread; 4733 unsigned int size = _IOC_SIZE(cmd); 4734 void __user *ubuf = (void __user *)arg; 4735 4736 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4737 proc->pid, current->pid, cmd, arg);*/ 4738 4739 binder_selftest_alloc(&proc->alloc); 4740 4741 trace_binder_ioctl(cmd, arg); 4742 4743 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4744 if (ret) 4745 goto err_unlocked; 4746 4747 thread = binder_get_thread(proc); 4748 if (thread == NULL) { 4749 ret = -ENOMEM; 4750 goto err; 4751 } 4752 4753 switch (cmd) { 4754 case BINDER_WRITE_READ: 4755 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 4756 if (ret) 4757 goto err; 4758 break; 4759 case BINDER_SET_MAX_THREADS: { 4760 int max_threads; 4761 4762 if (copy_from_user(&max_threads, ubuf, 4763 sizeof(max_threads))) { 4764 ret = -EINVAL; 4765 goto err; 4766 } 4767 binder_inner_proc_lock(proc); 4768 proc->max_threads = max_threads; 4769 binder_inner_proc_unlock(proc); 4770 break; 4771 } 4772 case BINDER_SET_CONTEXT_MGR: 4773 ret = binder_ioctl_set_ctx_mgr(filp); 4774 if (ret) 4775 goto err; 4776 break; 4777 case BINDER_THREAD_EXIT: 4778 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 4779 proc->pid, thread->pid); 4780 binder_thread_release(proc, thread); 4781 thread = NULL; 4782 break; 4783 case BINDER_VERSION: { 4784 struct binder_version __user *ver = ubuf; 4785 4786 if (size != sizeof(struct binder_version)) { 4787 ret = -EINVAL; 4788 goto err; 4789 } 4790 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 4791 &ver->protocol_version)) { 4792 ret = -EINVAL; 4793 goto err; 4794 } 4795 break; 4796 } 4797 case BINDER_GET_NODE_INFO_FOR_REF: { 4798 struct binder_node_info_for_ref info; 4799 4800 if (copy_from_user(&info, ubuf, sizeof(info))) { 4801 ret = -EFAULT; 4802 goto err; 4803 } 4804 4805 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 4806 if (ret < 0) 4807 goto err; 4808 4809 if (copy_to_user(ubuf, &info, sizeof(info))) { 4810 ret = -EFAULT; 4811 goto err; 4812 } 4813 4814 break; 4815 } 4816 case BINDER_GET_NODE_DEBUG_INFO: { 4817 struct binder_node_debug_info info; 4818 4819 if (copy_from_user(&info, ubuf, sizeof(info))) { 4820 ret = -EFAULT; 4821 goto err; 4822 } 4823 4824 ret = binder_ioctl_get_node_debug_info(proc, &info); 4825 if (ret < 0) 4826 goto err; 4827 4828 if (copy_to_user(ubuf, &info, sizeof(info))) { 4829 ret = -EFAULT; 4830 goto err; 4831 } 4832 break; 4833 } 4834 default: 4835 ret = -EINVAL; 4836 goto err; 4837 } 4838 ret = 0; 4839 err: 4840 if (thread) 4841 thread->looper_need_return = false; 4842 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4843 if (ret && ret != -ERESTARTSYS) 4844 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 4845 err_unlocked: 4846 trace_binder_ioctl_done(ret); 4847 return ret; 4848 } 4849 4850 static void binder_vma_open(struct vm_area_struct *vma) 4851 { 4852 struct binder_proc *proc = vma->vm_private_data; 4853 4854 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4855 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4856 proc->pid, vma->vm_start, vma->vm_end, 4857 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4858 (unsigned long)pgprot_val(vma->vm_page_prot)); 4859 } 4860 4861 static void binder_vma_close(struct vm_area_struct *vma) 4862 { 4863 struct binder_proc *proc = vma->vm_private_data; 4864 4865 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4866 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4867 proc->pid, vma->vm_start, vma->vm_end, 4868 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4869 (unsigned long)pgprot_val(vma->vm_page_prot)); 4870 binder_alloc_vma_close(&proc->alloc); 4871 } 4872 4873 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 4874 { 4875 return VM_FAULT_SIGBUS; 4876 } 4877 4878 static const struct vm_operations_struct binder_vm_ops = { 4879 .open = binder_vma_open, 4880 .close = binder_vma_close, 4881 .fault = binder_vm_fault, 4882 }; 4883 4884 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 4885 { 4886 int ret; 4887 struct binder_proc *proc = filp->private_data; 4888 const char *failure_string; 4889 4890 if (proc->tsk != current->group_leader) 4891 return -EINVAL; 4892 4893 if ((vma->vm_end - vma->vm_start) > SZ_4M) 4894 vma->vm_end = vma->vm_start + SZ_4M; 4895 4896 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4897 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 4898 __func__, proc->pid, vma->vm_start, vma->vm_end, 4899 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4900 (unsigned long)pgprot_val(vma->vm_page_prot)); 4901 4902 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 4903 ret = -EPERM; 4904 failure_string = "bad vm_flags"; 4905 goto err_bad_arg; 4906 } 4907 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 4908 vma->vm_flags &= ~VM_MAYWRITE; 4909 4910 vma->vm_ops = &binder_vm_ops; 4911 vma->vm_private_data = proc; 4912 4913 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4914 if (ret) 4915 return ret; 4916 return 0; 4917 4918 err_bad_arg: 4919 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 4920 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 4921 return ret; 4922 } 4923 4924 static int binder_open(struct inode *nodp, struct file *filp) 4925 { 4926 struct binder_proc *proc; 4927 struct binder_device *binder_dev; 4928 4929 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 4930 current->group_leader->pid, current->pid); 4931 4932 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 4933 if (proc == NULL) 4934 return -ENOMEM; 4935 spin_lock_init(&proc->inner_lock); 4936 spin_lock_init(&proc->outer_lock); 4937 get_task_struct(current->group_leader); 4938 proc->tsk = current->group_leader; 4939 INIT_LIST_HEAD(&proc->todo); 4940 proc->default_priority = task_nice(current); 4941 binder_dev = container_of(filp->private_data, struct binder_device, 4942 miscdev); 4943 proc->context = &binder_dev->context; 4944 binder_alloc_init(&proc->alloc); 4945 4946 binder_stats_created(BINDER_STAT_PROC); 4947 proc->pid = current->group_leader->pid; 4948 INIT_LIST_HEAD(&proc->delivered_death); 4949 INIT_LIST_HEAD(&proc->waiting_threads); 4950 filp->private_data = proc; 4951 4952 mutex_lock(&binder_procs_lock); 4953 hlist_add_head(&proc->proc_node, &binder_procs); 4954 mutex_unlock(&binder_procs_lock); 4955 4956 if (binder_debugfs_dir_entry_proc) { 4957 char strbuf[11]; 4958 4959 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 4960 /* 4961 * proc debug entries are shared between contexts, so 4962 * this will fail if the process tries to open the driver 4963 * again with a different context. The priting code will 4964 * anyway print all contexts that a given PID has, so this 4965 * is not a problem. 4966 */ 4967 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 4968 binder_debugfs_dir_entry_proc, 4969 (void *)(unsigned long)proc->pid, 4970 &binder_proc_fops); 4971 } 4972 4973 return 0; 4974 } 4975 4976 static int binder_flush(struct file *filp, fl_owner_t id) 4977 { 4978 struct binder_proc *proc = filp->private_data; 4979 4980 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 4981 4982 return 0; 4983 } 4984 4985 static void binder_deferred_flush(struct binder_proc *proc) 4986 { 4987 struct rb_node *n; 4988 int wake_count = 0; 4989 4990 binder_inner_proc_lock(proc); 4991 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 4992 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 4993 4994 thread->looper_need_return = true; 4995 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 4996 wake_up_interruptible(&thread->wait); 4997 wake_count++; 4998 } 4999 } 5000 binder_inner_proc_unlock(proc); 5001 5002 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5003 "binder_flush: %d woke %d threads\n", proc->pid, 5004 wake_count); 5005 } 5006 5007 static int binder_release(struct inode *nodp, struct file *filp) 5008 { 5009 struct binder_proc *proc = filp->private_data; 5010 5011 debugfs_remove(proc->debugfs_entry); 5012 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5013 5014 return 0; 5015 } 5016 5017 static int binder_node_release(struct binder_node *node, int refs) 5018 { 5019 struct binder_ref *ref; 5020 int death = 0; 5021 struct binder_proc *proc = node->proc; 5022 5023 binder_release_work(proc, &node->async_todo); 5024 5025 binder_node_lock(node); 5026 binder_inner_proc_lock(proc); 5027 binder_dequeue_work_ilocked(&node->work); 5028 /* 5029 * The caller must have taken a temporary ref on the node, 5030 */ 5031 BUG_ON(!node->tmp_refs); 5032 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5033 binder_inner_proc_unlock(proc); 5034 binder_node_unlock(node); 5035 binder_free_node(node); 5036 5037 return refs; 5038 } 5039 5040 node->proc = NULL; 5041 node->local_strong_refs = 0; 5042 node->local_weak_refs = 0; 5043 binder_inner_proc_unlock(proc); 5044 5045 spin_lock(&binder_dead_nodes_lock); 5046 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5047 spin_unlock(&binder_dead_nodes_lock); 5048 5049 hlist_for_each_entry(ref, &node->refs, node_entry) { 5050 refs++; 5051 /* 5052 * Need the node lock to synchronize 5053 * with new notification requests and the 5054 * inner lock to synchronize with queued 5055 * death notifications. 5056 */ 5057 binder_inner_proc_lock(ref->proc); 5058 if (!ref->death) { 5059 binder_inner_proc_unlock(ref->proc); 5060 continue; 5061 } 5062 5063 death++; 5064 5065 BUG_ON(!list_empty(&ref->death->work.entry)); 5066 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5067 binder_enqueue_work_ilocked(&ref->death->work, 5068 &ref->proc->todo); 5069 binder_wakeup_proc_ilocked(ref->proc); 5070 binder_inner_proc_unlock(ref->proc); 5071 } 5072 5073 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5074 "node %d now dead, refs %d, death %d\n", 5075 node->debug_id, refs, death); 5076 binder_node_unlock(node); 5077 binder_put_node(node); 5078 5079 return refs; 5080 } 5081 5082 static void binder_deferred_release(struct binder_proc *proc) 5083 { 5084 struct binder_context *context = proc->context; 5085 struct rb_node *n; 5086 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5087 5088 mutex_lock(&binder_procs_lock); 5089 hlist_del(&proc->proc_node); 5090 mutex_unlock(&binder_procs_lock); 5091 5092 mutex_lock(&context->context_mgr_node_lock); 5093 if (context->binder_context_mgr_node && 5094 context->binder_context_mgr_node->proc == proc) { 5095 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5096 "%s: %d context_mgr_node gone\n", 5097 __func__, proc->pid); 5098 context->binder_context_mgr_node = NULL; 5099 } 5100 mutex_unlock(&context->context_mgr_node_lock); 5101 binder_inner_proc_lock(proc); 5102 /* 5103 * Make sure proc stays alive after we 5104 * remove all the threads 5105 */ 5106 proc->tmp_ref++; 5107 5108 proc->is_dead = true; 5109 threads = 0; 5110 active_transactions = 0; 5111 while ((n = rb_first(&proc->threads))) { 5112 struct binder_thread *thread; 5113 5114 thread = rb_entry(n, struct binder_thread, rb_node); 5115 binder_inner_proc_unlock(proc); 5116 threads++; 5117 active_transactions += binder_thread_release(proc, thread); 5118 binder_inner_proc_lock(proc); 5119 } 5120 5121 nodes = 0; 5122 incoming_refs = 0; 5123 while ((n = rb_first(&proc->nodes))) { 5124 struct binder_node *node; 5125 5126 node = rb_entry(n, struct binder_node, rb_node); 5127 nodes++; 5128 /* 5129 * take a temporary ref on the node before 5130 * calling binder_node_release() which will either 5131 * kfree() the node or call binder_put_node() 5132 */ 5133 binder_inc_node_tmpref_ilocked(node); 5134 rb_erase(&node->rb_node, &proc->nodes); 5135 binder_inner_proc_unlock(proc); 5136 incoming_refs = binder_node_release(node, incoming_refs); 5137 binder_inner_proc_lock(proc); 5138 } 5139 binder_inner_proc_unlock(proc); 5140 5141 outgoing_refs = 0; 5142 binder_proc_lock(proc); 5143 while ((n = rb_first(&proc->refs_by_desc))) { 5144 struct binder_ref *ref; 5145 5146 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5147 outgoing_refs++; 5148 binder_cleanup_ref_olocked(ref); 5149 binder_proc_unlock(proc); 5150 binder_free_ref(ref); 5151 binder_proc_lock(proc); 5152 } 5153 binder_proc_unlock(proc); 5154 5155 binder_release_work(proc, &proc->todo); 5156 binder_release_work(proc, &proc->delivered_death); 5157 5158 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5159 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5160 __func__, proc->pid, threads, nodes, incoming_refs, 5161 outgoing_refs, active_transactions); 5162 5163 binder_proc_dec_tmpref(proc); 5164 } 5165 5166 static void binder_deferred_func(struct work_struct *work) 5167 { 5168 struct binder_proc *proc; 5169 5170 int defer; 5171 5172 do { 5173 mutex_lock(&binder_deferred_lock); 5174 if (!hlist_empty(&binder_deferred_list)) { 5175 proc = hlist_entry(binder_deferred_list.first, 5176 struct binder_proc, deferred_work_node); 5177 hlist_del_init(&proc->deferred_work_node); 5178 defer = proc->deferred_work; 5179 proc->deferred_work = 0; 5180 } else { 5181 proc = NULL; 5182 defer = 0; 5183 } 5184 mutex_unlock(&binder_deferred_lock); 5185 5186 if (defer & BINDER_DEFERRED_FLUSH) 5187 binder_deferred_flush(proc); 5188 5189 if (defer & BINDER_DEFERRED_RELEASE) 5190 binder_deferred_release(proc); /* frees proc */ 5191 } while (proc); 5192 } 5193 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5194 5195 static void 5196 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5197 { 5198 mutex_lock(&binder_deferred_lock); 5199 proc->deferred_work |= defer; 5200 if (hlist_unhashed(&proc->deferred_work_node)) { 5201 hlist_add_head(&proc->deferred_work_node, 5202 &binder_deferred_list); 5203 schedule_work(&binder_deferred_work); 5204 } 5205 mutex_unlock(&binder_deferred_lock); 5206 } 5207 5208 static void print_binder_transaction_ilocked(struct seq_file *m, 5209 struct binder_proc *proc, 5210 const char *prefix, 5211 struct binder_transaction *t) 5212 { 5213 struct binder_proc *to_proc; 5214 struct binder_buffer *buffer = t->buffer; 5215 5216 spin_lock(&t->lock); 5217 to_proc = t->to_proc; 5218 seq_printf(m, 5219 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5220 prefix, t->debug_id, t, 5221 t->from ? t->from->proc->pid : 0, 5222 t->from ? t->from->pid : 0, 5223 to_proc ? to_proc->pid : 0, 5224 t->to_thread ? t->to_thread->pid : 0, 5225 t->code, t->flags, t->priority, t->need_reply); 5226 spin_unlock(&t->lock); 5227 5228 if (proc != to_proc) { 5229 /* 5230 * Can only safely deref buffer if we are holding the 5231 * correct proc inner lock for this node 5232 */ 5233 seq_puts(m, "\n"); 5234 return; 5235 } 5236 5237 if (buffer == NULL) { 5238 seq_puts(m, " buffer free\n"); 5239 return; 5240 } 5241 if (buffer->target_node) 5242 seq_printf(m, " node %d", buffer->target_node->debug_id); 5243 seq_printf(m, " size %zd:%zd data %pK\n", 5244 buffer->data_size, buffer->offsets_size, 5245 buffer->data); 5246 } 5247 5248 static void print_binder_work_ilocked(struct seq_file *m, 5249 struct binder_proc *proc, 5250 const char *prefix, 5251 const char *transaction_prefix, 5252 struct binder_work *w) 5253 { 5254 struct binder_node *node; 5255 struct binder_transaction *t; 5256 5257 switch (w->type) { 5258 case BINDER_WORK_TRANSACTION: 5259 t = container_of(w, struct binder_transaction, work); 5260 print_binder_transaction_ilocked( 5261 m, proc, transaction_prefix, t); 5262 break; 5263 case BINDER_WORK_RETURN_ERROR: { 5264 struct binder_error *e = container_of( 5265 w, struct binder_error, work); 5266 5267 seq_printf(m, "%stransaction error: %u\n", 5268 prefix, e->cmd); 5269 } break; 5270 case BINDER_WORK_TRANSACTION_COMPLETE: 5271 seq_printf(m, "%stransaction complete\n", prefix); 5272 break; 5273 case BINDER_WORK_NODE: 5274 node = container_of(w, struct binder_node, work); 5275 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5276 prefix, node->debug_id, 5277 (u64)node->ptr, (u64)node->cookie); 5278 break; 5279 case BINDER_WORK_DEAD_BINDER: 5280 seq_printf(m, "%shas dead binder\n", prefix); 5281 break; 5282 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5283 seq_printf(m, "%shas cleared dead binder\n", prefix); 5284 break; 5285 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5286 seq_printf(m, "%shas cleared death notification\n", prefix); 5287 break; 5288 default: 5289 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5290 break; 5291 } 5292 } 5293 5294 static void print_binder_thread_ilocked(struct seq_file *m, 5295 struct binder_thread *thread, 5296 int print_always) 5297 { 5298 struct binder_transaction *t; 5299 struct binder_work *w; 5300 size_t start_pos = m->count; 5301 size_t header_pos; 5302 5303 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5304 thread->pid, thread->looper, 5305 thread->looper_need_return, 5306 atomic_read(&thread->tmp_ref)); 5307 header_pos = m->count; 5308 t = thread->transaction_stack; 5309 while (t) { 5310 if (t->from == thread) { 5311 print_binder_transaction_ilocked(m, thread->proc, 5312 " outgoing transaction", t); 5313 t = t->from_parent; 5314 } else if (t->to_thread == thread) { 5315 print_binder_transaction_ilocked(m, thread->proc, 5316 " incoming transaction", t); 5317 t = t->to_parent; 5318 } else { 5319 print_binder_transaction_ilocked(m, thread->proc, 5320 " bad transaction", t); 5321 t = NULL; 5322 } 5323 } 5324 list_for_each_entry(w, &thread->todo, entry) { 5325 print_binder_work_ilocked(m, thread->proc, " ", 5326 " pending transaction", w); 5327 } 5328 if (!print_always && m->count == header_pos) 5329 m->count = start_pos; 5330 } 5331 5332 static void print_binder_node_nilocked(struct seq_file *m, 5333 struct binder_node *node) 5334 { 5335 struct binder_ref *ref; 5336 struct binder_work *w; 5337 int count; 5338 5339 count = 0; 5340 hlist_for_each_entry(ref, &node->refs, node_entry) 5341 count++; 5342 5343 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5344 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5345 node->has_strong_ref, node->has_weak_ref, 5346 node->local_strong_refs, node->local_weak_refs, 5347 node->internal_strong_refs, count, node->tmp_refs); 5348 if (count) { 5349 seq_puts(m, " proc"); 5350 hlist_for_each_entry(ref, &node->refs, node_entry) 5351 seq_printf(m, " %d", ref->proc->pid); 5352 } 5353 seq_puts(m, "\n"); 5354 if (node->proc) { 5355 list_for_each_entry(w, &node->async_todo, entry) 5356 print_binder_work_ilocked(m, node->proc, " ", 5357 " pending async transaction", w); 5358 } 5359 } 5360 5361 static void print_binder_ref_olocked(struct seq_file *m, 5362 struct binder_ref *ref) 5363 { 5364 binder_node_lock(ref->node); 5365 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5366 ref->data.debug_id, ref->data.desc, 5367 ref->node->proc ? "" : "dead ", 5368 ref->node->debug_id, ref->data.strong, 5369 ref->data.weak, ref->death); 5370 binder_node_unlock(ref->node); 5371 } 5372 5373 static void print_binder_proc(struct seq_file *m, 5374 struct binder_proc *proc, int print_all) 5375 { 5376 struct binder_work *w; 5377 struct rb_node *n; 5378 size_t start_pos = m->count; 5379 size_t header_pos; 5380 struct binder_node *last_node = NULL; 5381 5382 seq_printf(m, "proc %d\n", proc->pid); 5383 seq_printf(m, "context %s\n", proc->context->name); 5384 header_pos = m->count; 5385 5386 binder_inner_proc_lock(proc); 5387 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5388 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5389 rb_node), print_all); 5390 5391 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5392 struct binder_node *node = rb_entry(n, struct binder_node, 5393 rb_node); 5394 /* 5395 * take a temporary reference on the node so it 5396 * survives and isn't removed from the tree 5397 * while we print it. 5398 */ 5399 binder_inc_node_tmpref_ilocked(node); 5400 /* Need to drop inner lock to take node lock */ 5401 binder_inner_proc_unlock(proc); 5402 if (last_node) 5403 binder_put_node(last_node); 5404 binder_node_inner_lock(node); 5405 print_binder_node_nilocked(m, node); 5406 binder_node_inner_unlock(node); 5407 last_node = node; 5408 binder_inner_proc_lock(proc); 5409 } 5410 binder_inner_proc_unlock(proc); 5411 if (last_node) 5412 binder_put_node(last_node); 5413 5414 if (print_all) { 5415 binder_proc_lock(proc); 5416 for (n = rb_first(&proc->refs_by_desc); 5417 n != NULL; 5418 n = rb_next(n)) 5419 print_binder_ref_olocked(m, rb_entry(n, 5420 struct binder_ref, 5421 rb_node_desc)); 5422 binder_proc_unlock(proc); 5423 } 5424 binder_alloc_print_allocated(m, &proc->alloc); 5425 binder_inner_proc_lock(proc); 5426 list_for_each_entry(w, &proc->todo, entry) 5427 print_binder_work_ilocked(m, proc, " ", 5428 " pending transaction", w); 5429 list_for_each_entry(w, &proc->delivered_death, entry) { 5430 seq_puts(m, " has delivered dead binder\n"); 5431 break; 5432 } 5433 binder_inner_proc_unlock(proc); 5434 if (!print_all && m->count == header_pos) 5435 m->count = start_pos; 5436 } 5437 5438 static const char * const binder_return_strings[] = { 5439 "BR_ERROR", 5440 "BR_OK", 5441 "BR_TRANSACTION", 5442 "BR_REPLY", 5443 "BR_ACQUIRE_RESULT", 5444 "BR_DEAD_REPLY", 5445 "BR_TRANSACTION_COMPLETE", 5446 "BR_INCREFS", 5447 "BR_ACQUIRE", 5448 "BR_RELEASE", 5449 "BR_DECREFS", 5450 "BR_ATTEMPT_ACQUIRE", 5451 "BR_NOOP", 5452 "BR_SPAWN_LOOPER", 5453 "BR_FINISHED", 5454 "BR_DEAD_BINDER", 5455 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5456 "BR_FAILED_REPLY" 5457 }; 5458 5459 static const char * const binder_command_strings[] = { 5460 "BC_TRANSACTION", 5461 "BC_REPLY", 5462 "BC_ACQUIRE_RESULT", 5463 "BC_FREE_BUFFER", 5464 "BC_INCREFS", 5465 "BC_ACQUIRE", 5466 "BC_RELEASE", 5467 "BC_DECREFS", 5468 "BC_INCREFS_DONE", 5469 "BC_ACQUIRE_DONE", 5470 "BC_ATTEMPT_ACQUIRE", 5471 "BC_REGISTER_LOOPER", 5472 "BC_ENTER_LOOPER", 5473 "BC_EXIT_LOOPER", 5474 "BC_REQUEST_DEATH_NOTIFICATION", 5475 "BC_CLEAR_DEATH_NOTIFICATION", 5476 "BC_DEAD_BINDER_DONE", 5477 "BC_TRANSACTION_SG", 5478 "BC_REPLY_SG", 5479 }; 5480 5481 static const char * const binder_objstat_strings[] = { 5482 "proc", 5483 "thread", 5484 "node", 5485 "ref", 5486 "death", 5487 "transaction", 5488 "transaction_complete" 5489 }; 5490 5491 static void print_binder_stats(struct seq_file *m, const char *prefix, 5492 struct binder_stats *stats) 5493 { 5494 int i; 5495 5496 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5497 ARRAY_SIZE(binder_command_strings)); 5498 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5499 int temp = atomic_read(&stats->bc[i]); 5500 5501 if (temp) 5502 seq_printf(m, "%s%s: %d\n", prefix, 5503 binder_command_strings[i], temp); 5504 } 5505 5506 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5507 ARRAY_SIZE(binder_return_strings)); 5508 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5509 int temp = atomic_read(&stats->br[i]); 5510 5511 if (temp) 5512 seq_printf(m, "%s%s: %d\n", prefix, 5513 binder_return_strings[i], temp); 5514 } 5515 5516 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5517 ARRAY_SIZE(binder_objstat_strings)); 5518 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5519 ARRAY_SIZE(stats->obj_deleted)); 5520 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5521 int created = atomic_read(&stats->obj_created[i]); 5522 int deleted = atomic_read(&stats->obj_deleted[i]); 5523 5524 if (created || deleted) 5525 seq_printf(m, "%s%s: active %d total %d\n", 5526 prefix, 5527 binder_objstat_strings[i], 5528 created - deleted, 5529 created); 5530 } 5531 } 5532 5533 static void print_binder_proc_stats(struct seq_file *m, 5534 struct binder_proc *proc) 5535 { 5536 struct binder_work *w; 5537 struct binder_thread *thread; 5538 struct rb_node *n; 5539 int count, strong, weak, ready_threads; 5540 size_t free_async_space = 5541 binder_alloc_get_free_async_space(&proc->alloc); 5542 5543 seq_printf(m, "proc %d\n", proc->pid); 5544 seq_printf(m, "context %s\n", proc->context->name); 5545 count = 0; 5546 ready_threads = 0; 5547 binder_inner_proc_lock(proc); 5548 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5549 count++; 5550 5551 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5552 ready_threads++; 5553 5554 seq_printf(m, " threads: %d\n", count); 5555 seq_printf(m, " requested threads: %d+%d/%d\n" 5556 " ready threads %d\n" 5557 " free async space %zd\n", proc->requested_threads, 5558 proc->requested_threads_started, proc->max_threads, 5559 ready_threads, 5560 free_async_space); 5561 count = 0; 5562 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5563 count++; 5564 binder_inner_proc_unlock(proc); 5565 seq_printf(m, " nodes: %d\n", count); 5566 count = 0; 5567 strong = 0; 5568 weak = 0; 5569 binder_proc_lock(proc); 5570 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5571 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5572 rb_node_desc); 5573 count++; 5574 strong += ref->data.strong; 5575 weak += ref->data.weak; 5576 } 5577 binder_proc_unlock(proc); 5578 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5579 5580 count = binder_alloc_get_allocated_count(&proc->alloc); 5581 seq_printf(m, " buffers: %d\n", count); 5582 5583 binder_alloc_print_pages(m, &proc->alloc); 5584 5585 count = 0; 5586 binder_inner_proc_lock(proc); 5587 list_for_each_entry(w, &proc->todo, entry) { 5588 if (w->type == BINDER_WORK_TRANSACTION) 5589 count++; 5590 } 5591 binder_inner_proc_unlock(proc); 5592 seq_printf(m, " pending transactions: %d\n", count); 5593 5594 print_binder_stats(m, " ", &proc->stats); 5595 } 5596 5597 5598 static int binder_state_show(struct seq_file *m, void *unused) 5599 { 5600 struct binder_proc *proc; 5601 struct binder_node *node; 5602 struct binder_node *last_node = NULL; 5603 5604 seq_puts(m, "binder state:\n"); 5605 5606 spin_lock(&binder_dead_nodes_lock); 5607 if (!hlist_empty(&binder_dead_nodes)) 5608 seq_puts(m, "dead nodes:\n"); 5609 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5610 /* 5611 * take a temporary reference on the node so it 5612 * survives and isn't removed from the list 5613 * while we print it. 5614 */ 5615 node->tmp_refs++; 5616 spin_unlock(&binder_dead_nodes_lock); 5617 if (last_node) 5618 binder_put_node(last_node); 5619 binder_node_lock(node); 5620 print_binder_node_nilocked(m, node); 5621 binder_node_unlock(node); 5622 last_node = node; 5623 spin_lock(&binder_dead_nodes_lock); 5624 } 5625 spin_unlock(&binder_dead_nodes_lock); 5626 if (last_node) 5627 binder_put_node(last_node); 5628 5629 mutex_lock(&binder_procs_lock); 5630 hlist_for_each_entry(proc, &binder_procs, proc_node) 5631 print_binder_proc(m, proc, 1); 5632 mutex_unlock(&binder_procs_lock); 5633 5634 return 0; 5635 } 5636 5637 static int binder_stats_show(struct seq_file *m, void *unused) 5638 { 5639 struct binder_proc *proc; 5640 5641 seq_puts(m, "binder stats:\n"); 5642 5643 print_binder_stats(m, "", &binder_stats); 5644 5645 mutex_lock(&binder_procs_lock); 5646 hlist_for_each_entry(proc, &binder_procs, proc_node) 5647 print_binder_proc_stats(m, proc); 5648 mutex_unlock(&binder_procs_lock); 5649 5650 return 0; 5651 } 5652 5653 static int binder_transactions_show(struct seq_file *m, void *unused) 5654 { 5655 struct binder_proc *proc; 5656 5657 seq_puts(m, "binder transactions:\n"); 5658 mutex_lock(&binder_procs_lock); 5659 hlist_for_each_entry(proc, &binder_procs, proc_node) 5660 print_binder_proc(m, proc, 0); 5661 mutex_unlock(&binder_procs_lock); 5662 5663 return 0; 5664 } 5665 5666 static int binder_proc_show(struct seq_file *m, void *unused) 5667 { 5668 struct binder_proc *itr; 5669 int pid = (unsigned long)m->private; 5670 5671 mutex_lock(&binder_procs_lock); 5672 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5673 if (itr->pid == pid) { 5674 seq_puts(m, "binder proc state:\n"); 5675 print_binder_proc(m, itr, 1); 5676 } 5677 } 5678 mutex_unlock(&binder_procs_lock); 5679 5680 return 0; 5681 } 5682 5683 static void print_binder_transaction_log_entry(struct seq_file *m, 5684 struct binder_transaction_log_entry *e) 5685 { 5686 int debug_id = READ_ONCE(e->debug_id_done); 5687 /* 5688 * read barrier to guarantee debug_id_done read before 5689 * we print the log values 5690 */ 5691 smp_rmb(); 5692 seq_printf(m, 5693 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5694 e->debug_id, (e->call_type == 2) ? "reply" : 5695 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5696 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5697 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5698 e->return_error, e->return_error_param, 5699 e->return_error_line); 5700 /* 5701 * read-barrier to guarantee read of debug_id_done after 5702 * done printing the fields of the entry 5703 */ 5704 smp_rmb(); 5705 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5706 "\n" : " (incomplete)\n"); 5707 } 5708 5709 static int binder_transaction_log_show(struct seq_file *m, void *unused) 5710 { 5711 struct binder_transaction_log *log = m->private; 5712 unsigned int log_cur = atomic_read(&log->cur); 5713 unsigned int count; 5714 unsigned int cur; 5715 int i; 5716 5717 count = log_cur + 1; 5718 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5719 0 : count % ARRAY_SIZE(log->entry); 5720 if (count > ARRAY_SIZE(log->entry) || log->full) 5721 count = ARRAY_SIZE(log->entry); 5722 for (i = 0; i < count; i++) { 5723 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5724 5725 print_binder_transaction_log_entry(m, &log->entry[index]); 5726 } 5727 return 0; 5728 } 5729 5730 static const struct file_operations binder_fops = { 5731 .owner = THIS_MODULE, 5732 .poll = binder_poll, 5733 .unlocked_ioctl = binder_ioctl, 5734 .compat_ioctl = binder_ioctl, 5735 .mmap = binder_mmap, 5736 .open = binder_open, 5737 .flush = binder_flush, 5738 .release = binder_release, 5739 }; 5740 5741 BINDER_DEBUG_ENTRY(state); 5742 BINDER_DEBUG_ENTRY(stats); 5743 BINDER_DEBUG_ENTRY(transactions); 5744 BINDER_DEBUG_ENTRY(transaction_log); 5745 5746 static int __init init_binder_device(const char *name) 5747 { 5748 int ret; 5749 struct binder_device *binder_device; 5750 5751 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 5752 if (!binder_device) 5753 return -ENOMEM; 5754 5755 binder_device->miscdev.fops = &binder_fops; 5756 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 5757 binder_device->miscdev.name = name; 5758 5759 binder_device->context.binder_context_mgr_uid = INVALID_UID; 5760 binder_device->context.name = name; 5761 mutex_init(&binder_device->context.context_mgr_node_lock); 5762 5763 ret = misc_register(&binder_device->miscdev); 5764 if (ret < 0) { 5765 kfree(binder_device); 5766 return ret; 5767 } 5768 5769 hlist_add_head(&binder_device->hlist, &binder_devices); 5770 5771 return ret; 5772 } 5773 5774 static int __init binder_init(void) 5775 { 5776 int ret; 5777 char *device_name, *device_names, *device_tmp; 5778 struct binder_device *device; 5779 struct hlist_node *tmp; 5780 5781 ret = binder_alloc_shrinker_init(); 5782 if (ret) 5783 return ret; 5784 5785 atomic_set(&binder_transaction_log.cur, ~0U); 5786 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5787 5788 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 5789 if (binder_debugfs_dir_entry_root) 5790 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 5791 binder_debugfs_dir_entry_root); 5792 5793 if (binder_debugfs_dir_entry_root) { 5794 debugfs_create_file("state", 5795 0444, 5796 binder_debugfs_dir_entry_root, 5797 NULL, 5798 &binder_state_fops); 5799 debugfs_create_file("stats", 5800 0444, 5801 binder_debugfs_dir_entry_root, 5802 NULL, 5803 &binder_stats_fops); 5804 debugfs_create_file("transactions", 5805 0444, 5806 binder_debugfs_dir_entry_root, 5807 NULL, 5808 &binder_transactions_fops); 5809 debugfs_create_file("transaction_log", 5810 0444, 5811 binder_debugfs_dir_entry_root, 5812 &binder_transaction_log, 5813 &binder_transaction_log_fops); 5814 debugfs_create_file("failed_transaction_log", 5815 0444, 5816 binder_debugfs_dir_entry_root, 5817 &binder_transaction_log_failed, 5818 &binder_transaction_log_fops); 5819 } 5820 5821 /* 5822 * Copy the module_parameter string, because we don't want to 5823 * tokenize it in-place. 5824 */ 5825 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5826 if (!device_names) { 5827 ret = -ENOMEM; 5828 goto err_alloc_device_names_failed; 5829 } 5830 5831 device_tmp = device_names; 5832 while ((device_name = strsep(&device_tmp, ","))) { 5833 ret = init_binder_device(device_name); 5834 if (ret) 5835 goto err_init_binder_device_failed; 5836 } 5837 5838 return ret; 5839 5840 err_init_binder_device_failed: 5841 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 5842 misc_deregister(&device->miscdev); 5843 hlist_del(&device->hlist); 5844 kfree(device); 5845 } 5846 5847 kfree(device_names); 5848 5849 err_alloc_device_names_failed: 5850 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 5851 5852 return ret; 5853 } 5854 5855 device_initcall(binder_init); 5856 5857 #define CREATE_TRACE_POINTS 5858 #include "binder_trace.h" 5859 5860 MODULE_LICENSE("GPL v2"); 5861