1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <asm/cacheflush.h> 21 #include <linux/fdtable.h> 22 #include <linux/file.h> 23 #include <linux/freezer.h> 24 #include <linux/fs.h> 25 #include <linux/list.h> 26 #include <linux/miscdevice.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/mutex.h> 30 #include <linux/nsproxy.h> 31 #include <linux/poll.h> 32 #include <linux/debugfs.h> 33 #include <linux/rbtree.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/uaccess.h> 37 #include <linux/vmalloc.h> 38 #include <linux/slab.h> 39 #include <linux/pid_namespace.h> 40 41 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 42 #define BINDER_IPC_32BIT 1 43 #endif 44 45 #include <uapi/linux/android/binder.h> 46 #include "binder_trace.h" 47 48 static DEFINE_MUTEX(binder_main_lock); 49 static DEFINE_MUTEX(binder_deferred_lock); 50 static DEFINE_MUTEX(binder_mmap_lock); 51 52 static HLIST_HEAD(binder_procs); 53 static HLIST_HEAD(binder_deferred_list); 54 static HLIST_HEAD(binder_dead_nodes); 55 56 static struct dentry *binder_debugfs_dir_entry_root; 57 static struct dentry *binder_debugfs_dir_entry_proc; 58 static struct binder_node *binder_context_mgr_node; 59 static kuid_t binder_context_mgr_uid = INVALID_UID; 60 static int binder_last_id; 61 static struct workqueue_struct *binder_deferred_workqueue; 62 63 #define BINDER_DEBUG_ENTRY(name) \ 64 static int binder_##name##_open(struct inode *inode, struct file *file) \ 65 { \ 66 return single_open(file, binder_##name##_show, inode->i_private); \ 67 } \ 68 \ 69 static const struct file_operations binder_##name##_fops = { \ 70 .owner = THIS_MODULE, \ 71 .open = binder_##name##_open, \ 72 .read = seq_read, \ 73 .llseek = seq_lseek, \ 74 .release = single_release, \ 75 } 76 77 static int binder_proc_show(struct seq_file *m, void *unused); 78 BINDER_DEBUG_ENTRY(proc); 79 80 /* This is only defined in include/asm-arm/sizes.h */ 81 #ifndef SZ_1K 82 #define SZ_1K 0x400 83 #endif 84 85 #ifndef SZ_4M 86 #define SZ_4M 0x400000 87 #endif 88 89 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 90 91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 92 93 enum { 94 BINDER_DEBUG_USER_ERROR = 1U << 0, 95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 98 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 100 BINDER_DEBUG_READ_WRITE = 1U << 6, 101 BINDER_DEBUG_USER_REFS = 1U << 7, 102 BINDER_DEBUG_THREADS = 1U << 8, 103 BINDER_DEBUG_TRANSACTION = 1U << 9, 104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 105 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 110 }; 111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 114 115 static bool binder_debug_no_lock; 116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 117 118 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 119 static int binder_stop_on_user_error; 120 121 static int binder_set_stop_on_user_error(const char *val, 122 struct kernel_param *kp) 123 { 124 int ret; 125 126 ret = param_set_int(val, kp); 127 if (binder_stop_on_user_error < 2) 128 wake_up(&binder_user_error_wait); 129 return ret; 130 } 131 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 132 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 133 134 #define binder_debug(mask, x...) \ 135 do { \ 136 if (binder_debug_mask & mask) \ 137 pr_info(x); \ 138 } while (0) 139 140 #define binder_user_error(x...) \ 141 do { \ 142 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 143 pr_info(x); \ 144 if (binder_stop_on_user_error) \ 145 binder_stop_on_user_error = 2; \ 146 } while (0) 147 148 enum binder_stat_types { 149 BINDER_STAT_PROC, 150 BINDER_STAT_THREAD, 151 BINDER_STAT_NODE, 152 BINDER_STAT_REF, 153 BINDER_STAT_DEATH, 154 BINDER_STAT_TRANSACTION, 155 BINDER_STAT_TRANSACTION_COMPLETE, 156 BINDER_STAT_COUNT 157 }; 158 159 struct binder_stats { 160 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 161 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 162 int obj_created[BINDER_STAT_COUNT]; 163 int obj_deleted[BINDER_STAT_COUNT]; 164 }; 165 166 static struct binder_stats binder_stats; 167 168 static inline void binder_stats_deleted(enum binder_stat_types type) 169 { 170 binder_stats.obj_deleted[type]++; 171 } 172 173 static inline void binder_stats_created(enum binder_stat_types type) 174 { 175 binder_stats.obj_created[type]++; 176 } 177 178 struct binder_transaction_log_entry { 179 int debug_id; 180 int call_type; 181 int from_proc; 182 int from_thread; 183 int target_handle; 184 int to_proc; 185 int to_thread; 186 int to_node; 187 int data_size; 188 int offsets_size; 189 }; 190 struct binder_transaction_log { 191 int next; 192 int full; 193 struct binder_transaction_log_entry entry[32]; 194 }; 195 static struct binder_transaction_log binder_transaction_log; 196 static struct binder_transaction_log binder_transaction_log_failed; 197 198 static struct binder_transaction_log_entry *binder_transaction_log_add( 199 struct binder_transaction_log *log) 200 { 201 struct binder_transaction_log_entry *e; 202 203 e = &log->entry[log->next]; 204 memset(e, 0, sizeof(*e)); 205 log->next++; 206 if (log->next == ARRAY_SIZE(log->entry)) { 207 log->next = 0; 208 log->full = 1; 209 } 210 return e; 211 } 212 213 struct binder_work { 214 struct list_head entry; 215 enum { 216 BINDER_WORK_TRANSACTION = 1, 217 BINDER_WORK_TRANSACTION_COMPLETE, 218 BINDER_WORK_NODE, 219 BINDER_WORK_DEAD_BINDER, 220 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 221 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 222 } type; 223 }; 224 225 struct binder_node { 226 int debug_id; 227 struct binder_work work; 228 union { 229 struct rb_node rb_node; 230 struct hlist_node dead_node; 231 }; 232 struct binder_proc *proc; 233 struct hlist_head refs; 234 int internal_strong_refs; 235 int local_weak_refs; 236 int local_strong_refs; 237 binder_uintptr_t ptr; 238 binder_uintptr_t cookie; 239 unsigned has_strong_ref:1; 240 unsigned pending_strong_ref:1; 241 unsigned has_weak_ref:1; 242 unsigned pending_weak_ref:1; 243 unsigned has_async_transaction:1; 244 unsigned accept_fds:1; 245 unsigned min_priority:8; 246 struct list_head async_todo; 247 }; 248 249 struct binder_ref_death { 250 struct binder_work work; 251 binder_uintptr_t cookie; 252 }; 253 254 struct binder_ref { 255 /* Lookups needed: */ 256 /* node + proc => ref (transaction) */ 257 /* desc + proc => ref (transaction, inc/dec ref) */ 258 /* node => refs + procs (proc exit) */ 259 int debug_id; 260 struct rb_node rb_node_desc; 261 struct rb_node rb_node_node; 262 struct hlist_node node_entry; 263 struct binder_proc *proc; 264 struct binder_node *node; 265 uint32_t desc; 266 int strong; 267 int weak; 268 struct binder_ref_death *death; 269 }; 270 271 struct binder_buffer { 272 struct list_head entry; /* free and allocated entries by address */ 273 struct rb_node rb_node; /* free entry by size or allocated entry */ 274 /* by address */ 275 unsigned free:1; 276 unsigned allow_user_free:1; 277 unsigned async_transaction:1; 278 unsigned debug_id:29; 279 280 struct binder_transaction *transaction; 281 282 struct binder_node *target_node; 283 size_t data_size; 284 size_t offsets_size; 285 uint8_t data[0]; 286 }; 287 288 enum binder_deferred_state { 289 BINDER_DEFERRED_PUT_FILES = 0x01, 290 BINDER_DEFERRED_FLUSH = 0x02, 291 BINDER_DEFERRED_RELEASE = 0x04, 292 }; 293 294 struct binder_proc { 295 struct hlist_node proc_node; 296 struct rb_root threads; 297 struct rb_root nodes; 298 struct rb_root refs_by_desc; 299 struct rb_root refs_by_node; 300 int pid; 301 struct vm_area_struct *vma; 302 struct mm_struct *vma_vm_mm; 303 struct task_struct *tsk; 304 struct files_struct *files; 305 struct hlist_node deferred_work_node; 306 int deferred_work; 307 void *buffer; 308 ptrdiff_t user_buffer_offset; 309 310 struct list_head buffers; 311 struct rb_root free_buffers; 312 struct rb_root allocated_buffers; 313 size_t free_async_space; 314 315 struct page **pages; 316 size_t buffer_size; 317 uint32_t buffer_free; 318 struct list_head todo; 319 wait_queue_head_t wait; 320 struct binder_stats stats; 321 struct list_head delivered_death; 322 int max_threads; 323 int requested_threads; 324 int requested_threads_started; 325 int ready_threads; 326 long default_priority; 327 struct dentry *debugfs_entry; 328 }; 329 330 enum { 331 BINDER_LOOPER_STATE_REGISTERED = 0x01, 332 BINDER_LOOPER_STATE_ENTERED = 0x02, 333 BINDER_LOOPER_STATE_EXITED = 0x04, 334 BINDER_LOOPER_STATE_INVALID = 0x08, 335 BINDER_LOOPER_STATE_WAITING = 0x10, 336 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 337 }; 338 339 struct binder_thread { 340 struct binder_proc *proc; 341 struct rb_node rb_node; 342 int pid; 343 int looper; 344 struct binder_transaction *transaction_stack; 345 struct list_head todo; 346 uint32_t return_error; /* Write failed, return error code in read buf */ 347 uint32_t return_error2; /* Write failed, return error code in read */ 348 /* buffer. Used when sending a reply to a dead process that */ 349 /* we are also waiting on */ 350 wait_queue_head_t wait; 351 struct binder_stats stats; 352 }; 353 354 struct binder_transaction { 355 int debug_id; 356 struct binder_work work; 357 struct binder_thread *from; 358 struct binder_transaction *from_parent; 359 struct binder_proc *to_proc; 360 struct binder_thread *to_thread; 361 struct binder_transaction *to_parent; 362 unsigned need_reply:1; 363 /* unsigned is_dead:1; */ /* not used at the moment */ 364 365 struct binder_buffer *buffer; 366 unsigned int code; 367 unsigned int flags; 368 long priority; 369 long saved_priority; 370 kuid_t sender_euid; 371 }; 372 373 static void 374 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 375 376 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 377 { 378 struct files_struct *files = proc->files; 379 unsigned long rlim_cur; 380 unsigned long irqs; 381 382 if (files == NULL) 383 return -ESRCH; 384 385 if (!lock_task_sighand(proc->tsk, &irqs)) 386 return -EMFILE; 387 388 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 389 unlock_task_sighand(proc->tsk, &irqs); 390 391 return __alloc_fd(files, 0, rlim_cur, flags); 392 } 393 394 /* 395 * copied from fd_install 396 */ 397 static void task_fd_install( 398 struct binder_proc *proc, unsigned int fd, struct file *file) 399 { 400 if (proc->files) 401 __fd_install(proc->files, fd, file); 402 } 403 404 /* 405 * copied from sys_close 406 */ 407 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 408 { 409 int retval; 410 411 if (proc->files == NULL) 412 return -ESRCH; 413 414 retval = __close_fd(proc->files, fd); 415 /* can't restart close syscall because file table entry was cleared */ 416 if (unlikely(retval == -ERESTARTSYS || 417 retval == -ERESTARTNOINTR || 418 retval == -ERESTARTNOHAND || 419 retval == -ERESTART_RESTARTBLOCK)) 420 retval = -EINTR; 421 422 return retval; 423 } 424 425 static inline void binder_lock(const char *tag) 426 { 427 trace_binder_lock(tag); 428 mutex_lock(&binder_main_lock); 429 trace_binder_locked(tag); 430 } 431 432 static inline void binder_unlock(const char *tag) 433 { 434 trace_binder_unlock(tag); 435 mutex_unlock(&binder_main_lock); 436 } 437 438 static void binder_set_nice(long nice) 439 { 440 long min_nice; 441 442 if (can_nice(current, nice)) { 443 set_user_nice(current, nice); 444 return; 445 } 446 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 447 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 448 "%d: nice value %ld not allowed use %ld instead\n", 449 current->pid, nice, min_nice); 450 set_user_nice(current, min_nice); 451 if (min_nice <= MAX_NICE) 452 return; 453 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 454 } 455 456 static size_t binder_buffer_size(struct binder_proc *proc, 457 struct binder_buffer *buffer) 458 { 459 if (list_is_last(&buffer->entry, &proc->buffers)) 460 return proc->buffer + proc->buffer_size - (void *)buffer->data; 461 return (size_t)list_entry(buffer->entry.next, 462 struct binder_buffer, entry) - (size_t)buffer->data; 463 } 464 465 static void binder_insert_free_buffer(struct binder_proc *proc, 466 struct binder_buffer *new_buffer) 467 { 468 struct rb_node **p = &proc->free_buffers.rb_node; 469 struct rb_node *parent = NULL; 470 struct binder_buffer *buffer; 471 size_t buffer_size; 472 size_t new_buffer_size; 473 474 BUG_ON(!new_buffer->free); 475 476 new_buffer_size = binder_buffer_size(proc, new_buffer); 477 478 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 479 "%d: add free buffer, size %zd, at %p\n", 480 proc->pid, new_buffer_size, new_buffer); 481 482 while (*p) { 483 parent = *p; 484 buffer = rb_entry(parent, struct binder_buffer, rb_node); 485 BUG_ON(!buffer->free); 486 487 buffer_size = binder_buffer_size(proc, buffer); 488 489 if (new_buffer_size < buffer_size) 490 p = &parent->rb_left; 491 else 492 p = &parent->rb_right; 493 } 494 rb_link_node(&new_buffer->rb_node, parent, p); 495 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 496 } 497 498 static void binder_insert_allocated_buffer(struct binder_proc *proc, 499 struct binder_buffer *new_buffer) 500 { 501 struct rb_node **p = &proc->allocated_buffers.rb_node; 502 struct rb_node *parent = NULL; 503 struct binder_buffer *buffer; 504 505 BUG_ON(new_buffer->free); 506 507 while (*p) { 508 parent = *p; 509 buffer = rb_entry(parent, struct binder_buffer, rb_node); 510 BUG_ON(buffer->free); 511 512 if (new_buffer < buffer) 513 p = &parent->rb_left; 514 else if (new_buffer > buffer) 515 p = &parent->rb_right; 516 else 517 BUG(); 518 } 519 rb_link_node(&new_buffer->rb_node, parent, p); 520 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 521 } 522 523 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 524 uintptr_t user_ptr) 525 { 526 struct rb_node *n = proc->allocated_buffers.rb_node; 527 struct binder_buffer *buffer; 528 struct binder_buffer *kern_ptr; 529 530 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 531 - offsetof(struct binder_buffer, data)); 532 533 while (n) { 534 buffer = rb_entry(n, struct binder_buffer, rb_node); 535 BUG_ON(buffer->free); 536 537 if (kern_ptr < buffer) 538 n = n->rb_left; 539 else if (kern_ptr > buffer) 540 n = n->rb_right; 541 else 542 return buffer; 543 } 544 return NULL; 545 } 546 547 static int binder_update_page_range(struct binder_proc *proc, int allocate, 548 void *start, void *end, 549 struct vm_area_struct *vma) 550 { 551 void *page_addr; 552 unsigned long user_page_addr; 553 struct vm_struct tmp_area; 554 struct page **page; 555 struct mm_struct *mm; 556 557 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 558 "%d: %s pages %p-%p\n", proc->pid, 559 allocate ? "allocate" : "free", start, end); 560 561 if (end <= start) 562 return 0; 563 564 trace_binder_update_page_range(proc, allocate, start, end); 565 566 if (vma) 567 mm = NULL; 568 else 569 mm = get_task_mm(proc->tsk); 570 571 if (mm) { 572 down_write(&mm->mmap_sem); 573 vma = proc->vma; 574 if (vma && mm != proc->vma_vm_mm) { 575 pr_err("%d: vma mm and task mm mismatch\n", 576 proc->pid); 577 vma = NULL; 578 } 579 } 580 581 if (allocate == 0) 582 goto free_range; 583 584 if (vma == NULL) { 585 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 586 proc->pid); 587 goto err_no_vma; 588 } 589 590 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 591 int ret; 592 593 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 594 595 BUG_ON(*page); 596 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 597 if (*page == NULL) { 598 pr_err("%d: binder_alloc_buf failed for page at %p\n", 599 proc->pid, page_addr); 600 goto err_alloc_page_failed; 601 } 602 tmp_area.addr = page_addr; 603 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 604 ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); 605 if (ret) { 606 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 607 proc->pid, page_addr); 608 goto err_map_kernel_failed; 609 } 610 user_page_addr = 611 (uintptr_t)page_addr + proc->user_buffer_offset; 612 ret = vm_insert_page(vma, user_page_addr, page[0]); 613 if (ret) { 614 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 615 proc->pid, user_page_addr); 616 goto err_vm_insert_page_failed; 617 } 618 /* vm_insert_page does not seem to increment the refcount */ 619 } 620 if (mm) { 621 up_write(&mm->mmap_sem); 622 mmput(mm); 623 } 624 return 0; 625 626 free_range: 627 for (page_addr = end - PAGE_SIZE; page_addr >= start; 628 page_addr -= PAGE_SIZE) { 629 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 630 if (vma) 631 zap_page_range(vma, (uintptr_t)page_addr + 632 proc->user_buffer_offset, PAGE_SIZE, NULL); 633 err_vm_insert_page_failed: 634 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 635 err_map_kernel_failed: 636 __free_page(*page); 637 *page = NULL; 638 err_alloc_page_failed: 639 ; 640 } 641 err_no_vma: 642 if (mm) { 643 up_write(&mm->mmap_sem); 644 mmput(mm); 645 } 646 return -ENOMEM; 647 } 648 649 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 650 size_t data_size, 651 size_t offsets_size, int is_async) 652 { 653 struct rb_node *n = proc->free_buffers.rb_node; 654 struct binder_buffer *buffer; 655 size_t buffer_size; 656 struct rb_node *best_fit = NULL; 657 void *has_page_addr; 658 void *end_page_addr; 659 size_t size; 660 661 if (proc->vma == NULL) { 662 pr_err("%d: binder_alloc_buf, no vma\n", 663 proc->pid); 664 return NULL; 665 } 666 667 size = ALIGN(data_size, sizeof(void *)) + 668 ALIGN(offsets_size, sizeof(void *)); 669 670 if (size < data_size || size < offsets_size) { 671 binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 672 proc->pid, data_size, offsets_size); 673 return NULL; 674 } 675 676 if (is_async && 677 proc->free_async_space < size + sizeof(struct binder_buffer)) { 678 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 679 "%d: binder_alloc_buf size %zd failed, no async space left\n", 680 proc->pid, size); 681 return NULL; 682 } 683 684 while (n) { 685 buffer = rb_entry(n, struct binder_buffer, rb_node); 686 BUG_ON(!buffer->free); 687 buffer_size = binder_buffer_size(proc, buffer); 688 689 if (size < buffer_size) { 690 best_fit = n; 691 n = n->rb_left; 692 } else if (size > buffer_size) 693 n = n->rb_right; 694 else { 695 best_fit = n; 696 break; 697 } 698 } 699 if (best_fit == NULL) { 700 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", 701 proc->pid, size); 702 return NULL; 703 } 704 if (n == NULL) { 705 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 706 buffer_size = binder_buffer_size(proc, buffer); 707 } 708 709 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 710 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", 711 proc->pid, size, buffer, buffer_size); 712 713 has_page_addr = 714 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 715 if (n == NULL) { 716 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 717 buffer_size = size; /* no room for other buffers */ 718 else 719 buffer_size = size + sizeof(struct binder_buffer); 720 } 721 end_page_addr = 722 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 723 if (end_page_addr > has_page_addr) 724 end_page_addr = has_page_addr; 725 if (binder_update_page_range(proc, 1, 726 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 727 return NULL; 728 729 rb_erase(best_fit, &proc->free_buffers); 730 buffer->free = 0; 731 binder_insert_allocated_buffer(proc, buffer); 732 if (buffer_size != size) { 733 struct binder_buffer *new_buffer = (void *)buffer->data + size; 734 735 list_add(&new_buffer->entry, &buffer->entry); 736 new_buffer->free = 1; 737 binder_insert_free_buffer(proc, new_buffer); 738 } 739 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 740 "%d: binder_alloc_buf size %zd got %p\n", 741 proc->pid, size, buffer); 742 buffer->data_size = data_size; 743 buffer->offsets_size = offsets_size; 744 buffer->async_transaction = is_async; 745 if (is_async) { 746 proc->free_async_space -= size + sizeof(struct binder_buffer); 747 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 748 "%d: binder_alloc_buf size %zd async free %zd\n", 749 proc->pid, size, proc->free_async_space); 750 } 751 752 return buffer; 753 } 754 755 static void *buffer_start_page(struct binder_buffer *buffer) 756 { 757 return (void *)((uintptr_t)buffer & PAGE_MASK); 758 } 759 760 static void *buffer_end_page(struct binder_buffer *buffer) 761 { 762 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 763 } 764 765 static void binder_delete_free_buffer(struct binder_proc *proc, 766 struct binder_buffer *buffer) 767 { 768 struct binder_buffer *prev, *next = NULL; 769 int free_page_end = 1; 770 int free_page_start = 1; 771 772 BUG_ON(proc->buffers.next == &buffer->entry); 773 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 774 BUG_ON(!prev->free); 775 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 776 free_page_start = 0; 777 if (buffer_end_page(prev) == buffer_end_page(buffer)) 778 free_page_end = 0; 779 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 780 "%d: merge free, buffer %p share page with %p\n", 781 proc->pid, buffer, prev); 782 } 783 784 if (!list_is_last(&buffer->entry, &proc->buffers)) { 785 next = list_entry(buffer->entry.next, 786 struct binder_buffer, entry); 787 if (buffer_start_page(next) == buffer_end_page(buffer)) { 788 free_page_end = 0; 789 if (buffer_start_page(next) == 790 buffer_start_page(buffer)) 791 free_page_start = 0; 792 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 793 "%d: merge free, buffer %p share page with %p\n", 794 proc->pid, buffer, prev); 795 } 796 } 797 list_del(&buffer->entry); 798 if (free_page_start || free_page_end) { 799 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 800 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", 801 proc->pid, buffer, free_page_start ? "" : " end", 802 free_page_end ? "" : " start", prev, next); 803 binder_update_page_range(proc, 0, free_page_start ? 804 buffer_start_page(buffer) : buffer_end_page(buffer), 805 (free_page_end ? buffer_end_page(buffer) : 806 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 807 } 808 } 809 810 static void binder_free_buf(struct binder_proc *proc, 811 struct binder_buffer *buffer) 812 { 813 size_t size, buffer_size; 814 815 buffer_size = binder_buffer_size(proc, buffer); 816 817 size = ALIGN(buffer->data_size, sizeof(void *)) + 818 ALIGN(buffer->offsets_size, sizeof(void *)); 819 820 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 821 "%d: binder_free_buf %p size %zd buffer_size %zd\n", 822 proc->pid, buffer, size, buffer_size); 823 824 BUG_ON(buffer->free); 825 BUG_ON(size > buffer_size); 826 BUG_ON(buffer->transaction != NULL); 827 BUG_ON((void *)buffer < proc->buffer); 828 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 829 830 if (buffer->async_transaction) { 831 proc->free_async_space += size + sizeof(struct binder_buffer); 832 833 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 834 "%d: binder_free_buf size %zd async free %zd\n", 835 proc->pid, size, proc->free_async_space); 836 } 837 838 binder_update_page_range(proc, 0, 839 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 840 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 841 NULL); 842 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 843 buffer->free = 1; 844 if (!list_is_last(&buffer->entry, &proc->buffers)) { 845 struct binder_buffer *next = list_entry(buffer->entry.next, 846 struct binder_buffer, entry); 847 848 if (next->free) { 849 rb_erase(&next->rb_node, &proc->free_buffers); 850 binder_delete_free_buffer(proc, next); 851 } 852 } 853 if (proc->buffers.next != &buffer->entry) { 854 struct binder_buffer *prev = list_entry(buffer->entry.prev, 855 struct binder_buffer, entry); 856 857 if (prev->free) { 858 binder_delete_free_buffer(proc, buffer); 859 rb_erase(&prev->rb_node, &proc->free_buffers); 860 buffer = prev; 861 } 862 } 863 binder_insert_free_buffer(proc, buffer); 864 } 865 866 static struct binder_node *binder_get_node(struct binder_proc *proc, 867 binder_uintptr_t ptr) 868 { 869 struct rb_node *n = proc->nodes.rb_node; 870 struct binder_node *node; 871 872 while (n) { 873 node = rb_entry(n, struct binder_node, rb_node); 874 875 if (ptr < node->ptr) 876 n = n->rb_left; 877 else if (ptr > node->ptr) 878 n = n->rb_right; 879 else 880 return node; 881 } 882 return NULL; 883 } 884 885 static struct binder_node *binder_new_node(struct binder_proc *proc, 886 binder_uintptr_t ptr, 887 binder_uintptr_t cookie) 888 { 889 struct rb_node **p = &proc->nodes.rb_node; 890 struct rb_node *parent = NULL; 891 struct binder_node *node; 892 893 while (*p) { 894 parent = *p; 895 node = rb_entry(parent, struct binder_node, rb_node); 896 897 if (ptr < node->ptr) 898 p = &(*p)->rb_left; 899 else if (ptr > node->ptr) 900 p = &(*p)->rb_right; 901 else 902 return NULL; 903 } 904 905 node = kzalloc(sizeof(*node), GFP_KERNEL); 906 if (node == NULL) 907 return NULL; 908 binder_stats_created(BINDER_STAT_NODE); 909 rb_link_node(&node->rb_node, parent, p); 910 rb_insert_color(&node->rb_node, &proc->nodes); 911 node->debug_id = ++binder_last_id; 912 node->proc = proc; 913 node->ptr = ptr; 914 node->cookie = cookie; 915 node->work.type = BINDER_WORK_NODE; 916 INIT_LIST_HEAD(&node->work.entry); 917 INIT_LIST_HEAD(&node->async_todo); 918 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 919 "%d:%d node %d u%016llx c%016llx created\n", 920 proc->pid, current->pid, node->debug_id, 921 (u64)node->ptr, (u64)node->cookie); 922 return node; 923 } 924 925 static int binder_inc_node(struct binder_node *node, int strong, int internal, 926 struct list_head *target_list) 927 { 928 if (strong) { 929 if (internal) { 930 if (target_list == NULL && 931 node->internal_strong_refs == 0 && 932 !(node == binder_context_mgr_node && 933 node->has_strong_ref)) { 934 pr_err("invalid inc strong node for %d\n", 935 node->debug_id); 936 return -EINVAL; 937 } 938 node->internal_strong_refs++; 939 } else 940 node->local_strong_refs++; 941 if (!node->has_strong_ref && target_list) { 942 list_del_init(&node->work.entry); 943 list_add_tail(&node->work.entry, target_list); 944 } 945 } else { 946 if (!internal) 947 node->local_weak_refs++; 948 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 949 if (target_list == NULL) { 950 pr_err("invalid inc weak node for %d\n", 951 node->debug_id); 952 return -EINVAL; 953 } 954 list_add_tail(&node->work.entry, target_list); 955 } 956 } 957 return 0; 958 } 959 960 static int binder_dec_node(struct binder_node *node, int strong, int internal) 961 { 962 if (strong) { 963 if (internal) 964 node->internal_strong_refs--; 965 else 966 node->local_strong_refs--; 967 if (node->local_strong_refs || node->internal_strong_refs) 968 return 0; 969 } else { 970 if (!internal) 971 node->local_weak_refs--; 972 if (node->local_weak_refs || !hlist_empty(&node->refs)) 973 return 0; 974 } 975 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 976 if (list_empty(&node->work.entry)) { 977 list_add_tail(&node->work.entry, &node->proc->todo); 978 wake_up_interruptible(&node->proc->wait); 979 } 980 } else { 981 if (hlist_empty(&node->refs) && !node->local_strong_refs && 982 !node->local_weak_refs) { 983 list_del_init(&node->work.entry); 984 if (node->proc) { 985 rb_erase(&node->rb_node, &node->proc->nodes); 986 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 987 "refless node %d deleted\n", 988 node->debug_id); 989 } else { 990 hlist_del(&node->dead_node); 991 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 992 "dead node %d deleted\n", 993 node->debug_id); 994 } 995 kfree(node); 996 binder_stats_deleted(BINDER_STAT_NODE); 997 } 998 } 999 1000 return 0; 1001 } 1002 1003 1004 static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1005 uint32_t desc) 1006 { 1007 struct rb_node *n = proc->refs_by_desc.rb_node; 1008 struct binder_ref *ref; 1009 1010 while (n) { 1011 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1012 1013 if (desc < ref->desc) 1014 n = n->rb_left; 1015 else if (desc > ref->desc) 1016 n = n->rb_right; 1017 else 1018 return ref; 1019 } 1020 return NULL; 1021 } 1022 1023 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1024 struct binder_node *node) 1025 { 1026 struct rb_node *n; 1027 struct rb_node **p = &proc->refs_by_node.rb_node; 1028 struct rb_node *parent = NULL; 1029 struct binder_ref *ref, *new_ref; 1030 1031 while (*p) { 1032 parent = *p; 1033 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1034 1035 if (node < ref->node) 1036 p = &(*p)->rb_left; 1037 else if (node > ref->node) 1038 p = &(*p)->rb_right; 1039 else 1040 return ref; 1041 } 1042 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1043 if (new_ref == NULL) 1044 return NULL; 1045 binder_stats_created(BINDER_STAT_REF); 1046 new_ref->debug_id = ++binder_last_id; 1047 new_ref->proc = proc; 1048 new_ref->node = node; 1049 rb_link_node(&new_ref->rb_node_node, parent, p); 1050 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1051 1052 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1053 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1054 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1055 if (ref->desc > new_ref->desc) 1056 break; 1057 new_ref->desc = ref->desc + 1; 1058 } 1059 1060 p = &proc->refs_by_desc.rb_node; 1061 while (*p) { 1062 parent = *p; 1063 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1064 1065 if (new_ref->desc < ref->desc) 1066 p = &(*p)->rb_left; 1067 else if (new_ref->desc > ref->desc) 1068 p = &(*p)->rb_right; 1069 else 1070 BUG(); 1071 } 1072 rb_link_node(&new_ref->rb_node_desc, parent, p); 1073 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1074 if (node) { 1075 hlist_add_head(&new_ref->node_entry, &node->refs); 1076 1077 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1078 "%d new ref %d desc %d for node %d\n", 1079 proc->pid, new_ref->debug_id, new_ref->desc, 1080 node->debug_id); 1081 } else { 1082 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1083 "%d new ref %d desc %d for dead node\n", 1084 proc->pid, new_ref->debug_id, new_ref->desc); 1085 } 1086 return new_ref; 1087 } 1088 1089 static void binder_delete_ref(struct binder_ref *ref) 1090 { 1091 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1092 "%d delete ref %d desc %d for node %d\n", 1093 ref->proc->pid, ref->debug_id, ref->desc, 1094 ref->node->debug_id); 1095 1096 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1097 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1098 if (ref->strong) 1099 binder_dec_node(ref->node, 1, 1); 1100 hlist_del(&ref->node_entry); 1101 binder_dec_node(ref->node, 0, 1); 1102 if (ref->death) { 1103 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1104 "%d delete ref %d desc %d has death notification\n", 1105 ref->proc->pid, ref->debug_id, ref->desc); 1106 list_del(&ref->death->work.entry); 1107 kfree(ref->death); 1108 binder_stats_deleted(BINDER_STAT_DEATH); 1109 } 1110 kfree(ref); 1111 binder_stats_deleted(BINDER_STAT_REF); 1112 } 1113 1114 static int binder_inc_ref(struct binder_ref *ref, int strong, 1115 struct list_head *target_list) 1116 { 1117 int ret; 1118 1119 if (strong) { 1120 if (ref->strong == 0) { 1121 ret = binder_inc_node(ref->node, 1, 1, target_list); 1122 if (ret) 1123 return ret; 1124 } 1125 ref->strong++; 1126 } else { 1127 if (ref->weak == 0) { 1128 ret = binder_inc_node(ref->node, 0, 1, target_list); 1129 if (ret) 1130 return ret; 1131 } 1132 ref->weak++; 1133 } 1134 return 0; 1135 } 1136 1137 1138 static int binder_dec_ref(struct binder_ref *ref, int strong) 1139 { 1140 if (strong) { 1141 if (ref->strong == 0) { 1142 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1143 ref->proc->pid, ref->debug_id, 1144 ref->desc, ref->strong, ref->weak); 1145 return -EINVAL; 1146 } 1147 ref->strong--; 1148 if (ref->strong == 0) { 1149 int ret; 1150 1151 ret = binder_dec_node(ref->node, strong, 1); 1152 if (ret) 1153 return ret; 1154 } 1155 } else { 1156 if (ref->weak == 0) { 1157 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1158 ref->proc->pid, ref->debug_id, 1159 ref->desc, ref->strong, ref->weak); 1160 return -EINVAL; 1161 } 1162 ref->weak--; 1163 } 1164 if (ref->strong == 0 && ref->weak == 0) 1165 binder_delete_ref(ref); 1166 return 0; 1167 } 1168 1169 static void binder_pop_transaction(struct binder_thread *target_thread, 1170 struct binder_transaction *t) 1171 { 1172 if (target_thread) { 1173 BUG_ON(target_thread->transaction_stack != t); 1174 BUG_ON(target_thread->transaction_stack->from != target_thread); 1175 target_thread->transaction_stack = 1176 target_thread->transaction_stack->from_parent; 1177 t->from = NULL; 1178 } 1179 t->need_reply = 0; 1180 if (t->buffer) 1181 t->buffer->transaction = NULL; 1182 kfree(t); 1183 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1184 } 1185 1186 static void binder_send_failed_reply(struct binder_transaction *t, 1187 uint32_t error_code) 1188 { 1189 struct binder_thread *target_thread; 1190 struct binder_transaction *next; 1191 1192 BUG_ON(t->flags & TF_ONE_WAY); 1193 while (1) { 1194 target_thread = t->from; 1195 if (target_thread) { 1196 if (target_thread->return_error != BR_OK && 1197 target_thread->return_error2 == BR_OK) { 1198 target_thread->return_error2 = 1199 target_thread->return_error; 1200 target_thread->return_error = BR_OK; 1201 } 1202 if (target_thread->return_error == BR_OK) { 1203 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1204 "send failed reply for transaction %d to %d:%d\n", 1205 t->debug_id, 1206 target_thread->proc->pid, 1207 target_thread->pid); 1208 1209 binder_pop_transaction(target_thread, t); 1210 target_thread->return_error = error_code; 1211 wake_up_interruptible(&target_thread->wait); 1212 } else { 1213 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 1214 target_thread->proc->pid, 1215 target_thread->pid, 1216 target_thread->return_error); 1217 } 1218 return; 1219 } 1220 next = t->from_parent; 1221 1222 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1223 "send failed reply for transaction %d, target dead\n", 1224 t->debug_id); 1225 1226 binder_pop_transaction(target_thread, t); 1227 if (next == NULL) { 1228 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1229 "reply failed, no target thread at root\n"); 1230 return; 1231 } 1232 t = next; 1233 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1234 "reply failed, no target thread -- retry %d\n", 1235 t->debug_id); 1236 } 1237 } 1238 1239 static void binder_transaction_buffer_release(struct binder_proc *proc, 1240 struct binder_buffer *buffer, 1241 binder_size_t *failed_at) 1242 { 1243 binder_size_t *offp, *off_end; 1244 int debug_id = buffer->debug_id; 1245 1246 binder_debug(BINDER_DEBUG_TRANSACTION, 1247 "%d buffer release %d, size %zd-%zd, failed at %p\n", 1248 proc->pid, buffer->debug_id, 1249 buffer->data_size, buffer->offsets_size, failed_at); 1250 1251 if (buffer->target_node) 1252 binder_dec_node(buffer->target_node, 1, 0); 1253 1254 offp = (binder_size_t *)(buffer->data + 1255 ALIGN(buffer->data_size, sizeof(void *))); 1256 if (failed_at) 1257 off_end = failed_at; 1258 else 1259 off_end = (void *)offp + buffer->offsets_size; 1260 for (; offp < off_end; offp++) { 1261 struct flat_binder_object *fp; 1262 1263 if (*offp > buffer->data_size - sizeof(*fp) || 1264 buffer->data_size < sizeof(*fp) || 1265 !IS_ALIGNED(*offp, sizeof(u32))) { 1266 pr_err("transaction release %d bad offset %lld, size %zd\n", 1267 debug_id, (u64)*offp, buffer->data_size); 1268 continue; 1269 } 1270 fp = (struct flat_binder_object *)(buffer->data + *offp); 1271 switch (fp->type) { 1272 case BINDER_TYPE_BINDER: 1273 case BINDER_TYPE_WEAK_BINDER: { 1274 struct binder_node *node = binder_get_node(proc, fp->binder); 1275 1276 if (node == NULL) { 1277 pr_err("transaction release %d bad node %016llx\n", 1278 debug_id, (u64)fp->binder); 1279 break; 1280 } 1281 binder_debug(BINDER_DEBUG_TRANSACTION, 1282 " node %d u%016llx\n", 1283 node->debug_id, (u64)node->ptr); 1284 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1285 } break; 1286 case BINDER_TYPE_HANDLE: 1287 case BINDER_TYPE_WEAK_HANDLE: { 1288 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1289 1290 if (ref == NULL) { 1291 pr_err("transaction release %d bad handle %d\n", 1292 debug_id, fp->handle); 1293 break; 1294 } 1295 binder_debug(BINDER_DEBUG_TRANSACTION, 1296 " ref %d desc %d (node %d)\n", 1297 ref->debug_id, ref->desc, ref->node->debug_id); 1298 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1299 } break; 1300 1301 case BINDER_TYPE_FD: 1302 binder_debug(BINDER_DEBUG_TRANSACTION, 1303 " fd %d\n", fp->handle); 1304 if (failed_at) 1305 task_close_fd(proc, fp->handle); 1306 break; 1307 1308 default: 1309 pr_err("transaction release %d bad object type %x\n", 1310 debug_id, fp->type); 1311 break; 1312 } 1313 } 1314 } 1315 1316 static void binder_transaction(struct binder_proc *proc, 1317 struct binder_thread *thread, 1318 struct binder_transaction_data *tr, int reply) 1319 { 1320 struct binder_transaction *t; 1321 struct binder_work *tcomplete; 1322 binder_size_t *offp, *off_end; 1323 struct binder_proc *target_proc; 1324 struct binder_thread *target_thread = NULL; 1325 struct binder_node *target_node = NULL; 1326 struct list_head *target_list; 1327 wait_queue_head_t *target_wait; 1328 struct binder_transaction *in_reply_to = NULL; 1329 struct binder_transaction_log_entry *e; 1330 uint32_t return_error; 1331 1332 e = binder_transaction_log_add(&binder_transaction_log); 1333 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1334 e->from_proc = proc->pid; 1335 e->from_thread = thread->pid; 1336 e->target_handle = tr->target.handle; 1337 e->data_size = tr->data_size; 1338 e->offsets_size = tr->offsets_size; 1339 1340 if (reply) { 1341 in_reply_to = thread->transaction_stack; 1342 if (in_reply_to == NULL) { 1343 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1344 proc->pid, thread->pid); 1345 return_error = BR_FAILED_REPLY; 1346 goto err_empty_call_stack; 1347 } 1348 binder_set_nice(in_reply_to->saved_priority); 1349 if (in_reply_to->to_thread != thread) { 1350 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1351 proc->pid, thread->pid, in_reply_to->debug_id, 1352 in_reply_to->to_proc ? 1353 in_reply_to->to_proc->pid : 0, 1354 in_reply_to->to_thread ? 1355 in_reply_to->to_thread->pid : 0); 1356 return_error = BR_FAILED_REPLY; 1357 in_reply_to = NULL; 1358 goto err_bad_call_stack; 1359 } 1360 thread->transaction_stack = in_reply_to->to_parent; 1361 target_thread = in_reply_to->from; 1362 if (target_thread == NULL) { 1363 return_error = BR_DEAD_REPLY; 1364 goto err_dead_binder; 1365 } 1366 if (target_thread->transaction_stack != in_reply_to) { 1367 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1368 proc->pid, thread->pid, 1369 target_thread->transaction_stack ? 1370 target_thread->transaction_stack->debug_id : 0, 1371 in_reply_to->debug_id); 1372 return_error = BR_FAILED_REPLY; 1373 in_reply_to = NULL; 1374 target_thread = NULL; 1375 goto err_dead_binder; 1376 } 1377 target_proc = target_thread->proc; 1378 } else { 1379 if (tr->target.handle) { 1380 struct binder_ref *ref; 1381 1382 ref = binder_get_ref(proc, tr->target.handle); 1383 if (ref == NULL) { 1384 binder_user_error("%d:%d got transaction to invalid handle\n", 1385 proc->pid, thread->pid); 1386 return_error = BR_FAILED_REPLY; 1387 goto err_invalid_target_handle; 1388 } 1389 target_node = ref->node; 1390 } else { 1391 target_node = binder_context_mgr_node; 1392 if (target_node == NULL) { 1393 return_error = BR_DEAD_REPLY; 1394 goto err_no_context_mgr_node; 1395 } 1396 } 1397 e->to_node = target_node->debug_id; 1398 target_proc = target_node->proc; 1399 if (target_proc == NULL) { 1400 return_error = BR_DEAD_REPLY; 1401 goto err_dead_binder; 1402 } 1403 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1404 struct binder_transaction *tmp; 1405 1406 tmp = thread->transaction_stack; 1407 if (tmp->to_thread != thread) { 1408 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1409 proc->pid, thread->pid, tmp->debug_id, 1410 tmp->to_proc ? tmp->to_proc->pid : 0, 1411 tmp->to_thread ? 1412 tmp->to_thread->pid : 0); 1413 return_error = BR_FAILED_REPLY; 1414 goto err_bad_call_stack; 1415 } 1416 while (tmp) { 1417 if (tmp->from && tmp->from->proc == target_proc) 1418 target_thread = tmp->from; 1419 tmp = tmp->from_parent; 1420 } 1421 } 1422 } 1423 if (target_thread) { 1424 e->to_thread = target_thread->pid; 1425 target_list = &target_thread->todo; 1426 target_wait = &target_thread->wait; 1427 } else { 1428 target_list = &target_proc->todo; 1429 target_wait = &target_proc->wait; 1430 } 1431 e->to_proc = target_proc->pid; 1432 1433 /* TODO: reuse incoming transaction for reply */ 1434 t = kzalloc(sizeof(*t), GFP_KERNEL); 1435 if (t == NULL) { 1436 return_error = BR_FAILED_REPLY; 1437 goto err_alloc_t_failed; 1438 } 1439 binder_stats_created(BINDER_STAT_TRANSACTION); 1440 1441 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1442 if (tcomplete == NULL) { 1443 return_error = BR_FAILED_REPLY; 1444 goto err_alloc_tcomplete_failed; 1445 } 1446 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1447 1448 t->debug_id = ++binder_last_id; 1449 e->debug_id = t->debug_id; 1450 1451 if (reply) 1452 binder_debug(BINDER_DEBUG_TRANSACTION, 1453 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", 1454 proc->pid, thread->pid, t->debug_id, 1455 target_proc->pid, target_thread->pid, 1456 (u64)tr->data.ptr.buffer, 1457 (u64)tr->data.ptr.offsets, 1458 (u64)tr->data_size, (u64)tr->offsets_size); 1459 else 1460 binder_debug(BINDER_DEBUG_TRANSACTION, 1461 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", 1462 proc->pid, thread->pid, t->debug_id, 1463 target_proc->pid, target_node->debug_id, 1464 (u64)tr->data.ptr.buffer, 1465 (u64)tr->data.ptr.offsets, 1466 (u64)tr->data_size, (u64)tr->offsets_size); 1467 1468 if (!reply && !(tr->flags & TF_ONE_WAY)) 1469 t->from = thread; 1470 else 1471 t->from = NULL; 1472 t->sender_euid = task_euid(proc->tsk); 1473 t->to_proc = target_proc; 1474 t->to_thread = target_thread; 1475 t->code = tr->code; 1476 t->flags = tr->flags; 1477 t->priority = task_nice(current); 1478 1479 trace_binder_transaction(reply, t, target_node); 1480 1481 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1482 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1483 if (t->buffer == NULL) { 1484 return_error = BR_FAILED_REPLY; 1485 goto err_binder_alloc_buf_failed; 1486 } 1487 t->buffer->allow_user_free = 0; 1488 t->buffer->debug_id = t->debug_id; 1489 t->buffer->transaction = t; 1490 t->buffer->target_node = target_node; 1491 trace_binder_transaction_alloc_buf(t->buffer); 1492 if (target_node) 1493 binder_inc_node(target_node, 1, 0, NULL); 1494 1495 offp = (binder_size_t *)(t->buffer->data + 1496 ALIGN(tr->data_size, sizeof(void *))); 1497 1498 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 1499 tr->data.ptr.buffer, tr->data_size)) { 1500 binder_user_error("%d:%d got transaction with invalid data ptr\n", 1501 proc->pid, thread->pid); 1502 return_error = BR_FAILED_REPLY; 1503 goto err_copy_data_failed; 1504 } 1505 if (copy_from_user(offp, (const void __user *)(uintptr_t) 1506 tr->data.ptr.offsets, tr->offsets_size)) { 1507 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 1508 proc->pid, thread->pid); 1509 return_error = BR_FAILED_REPLY; 1510 goto err_copy_data_failed; 1511 } 1512 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 1513 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 1514 proc->pid, thread->pid, (u64)tr->offsets_size); 1515 return_error = BR_FAILED_REPLY; 1516 goto err_bad_offset; 1517 } 1518 off_end = (void *)offp + tr->offsets_size; 1519 for (; offp < off_end; offp++) { 1520 struct flat_binder_object *fp; 1521 1522 if (*offp > t->buffer->data_size - sizeof(*fp) || 1523 t->buffer->data_size < sizeof(*fp) || 1524 !IS_ALIGNED(*offp, sizeof(u32))) { 1525 binder_user_error("%d:%d got transaction with invalid offset, %lld\n", 1526 proc->pid, thread->pid, (u64)*offp); 1527 return_error = BR_FAILED_REPLY; 1528 goto err_bad_offset; 1529 } 1530 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1531 switch (fp->type) { 1532 case BINDER_TYPE_BINDER: 1533 case BINDER_TYPE_WEAK_BINDER: { 1534 struct binder_ref *ref; 1535 struct binder_node *node = binder_get_node(proc, fp->binder); 1536 1537 if (node == NULL) { 1538 node = binder_new_node(proc, fp->binder, fp->cookie); 1539 if (node == NULL) { 1540 return_error = BR_FAILED_REPLY; 1541 goto err_binder_new_node_failed; 1542 } 1543 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1544 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1545 } 1546 if (fp->cookie != node->cookie) { 1547 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1548 proc->pid, thread->pid, 1549 (u64)fp->binder, node->debug_id, 1550 (u64)fp->cookie, (u64)node->cookie); 1551 return_error = BR_FAILED_REPLY; 1552 goto err_binder_get_ref_for_node_failed; 1553 } 1554 ref = binder_get_ref_for_node(target_proc, node); 1555 if (ref == NULL) { 1556 return_error = BR_FAILED_REPLY; 1557 goto err_binder_get_ref_for_node_failed; 1558 } 1559 if (fp->type == BINDER_TYPE_BINDER) 1560 fp->type = BINDER_TYPE_HANDLE; 1561 else 1562 fp->type = BINDER_TYPE_WEAK_HANDLE; 1563 fp->handle = ref->desc; 1564 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1565 &thread->todo); 1566 1567 trace_binder_transaction_node_to_ref(t, node, ref); 1568 binder_debug(BINDER_DEBUG_TRANSACTION, 1569 " node %d u%016llx -> ref %d desc %d\n", 1570 node->debug_id, (u64)node->ptr, 1571 ref->debug_id, ref->desc); 1572 } break; 1573 case BINDER_TYPE_HANDLE: 1574 case BINDER_TYPE_WEAK_HANDLE: { 1575 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1576 1577 if (ref == NULL) { 1578 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1579 proc->pid, 1580 thread->pid, fp->handle); 1581 return_error = BR_FAILED_REPLY; 1582 goto err_binder_get_ref_failed; 1583 } 1584 if (ref->node->proc == target_proc) { 1585 if (fp->type == BINDER_TYPE_HANDLE) 1586 fp->type = BINDER_TYPE_BINDER; 1587 else 1588 fp->type = BINDER_TYPE_WEAK_BINDER; 1589 fp->binder = ref->node->ptr; 1590 fp->cookie = ref->node->cookie; 1591 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1592 trace_binder_transaction_ref_to_node(t, ref); 1593 binder_debug(BINDER_DEBUG_TRANSACTION, 1594 " ref %d desc %d -> node %d u%016llx\n", 1595 ref->debug_id, ref->desc, ref->node->debug_id, 1596 (u64)ref->node->ptr); 1597 } else { 1598 struct binder_ref *new_ref; 1599 1600 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1601 if (new_ref == NULL) { 1602 return_error = BR_FAILED_REPLY; 1603 goto err_binder_get_ref_for_node_failed; 1604 } 1605 fp->handle = new_ref->desc; 1606 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1607 trace_binder_transaction_ref_to_ref(t, ref, 1608 new_ref); 1609 binder_debug(BINDER_DEBUG_TRANSACTION, 1610 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1611 ref->debug_id, ref->desc, new_ref->debug_id, 1612 new_ref->desc, ref->node->debug_id); 1613 } 1614 } break; 1615 1616 case BINDER_TYPE_FD: { 1617 int target_fd; 1618 struct file *file; 1619 1620 if (reply) { 1621 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1622 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", 1623 proc->pid, thread->pid, fp->handle); 1624 return_error = BR_FAILED_REPLY; 1625 goto err_fd_not_allowed; 1626 } 1627 } else if (!target_node->accept_fds) { 1628 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", 1629 proc->pid, thread->pid, fp->handle); 1630 return_error = BR_FAILED_REPLY; 1631 goto err_fd_not_allowed; 1632 } 1633 1634 file = fget(fp->handle); 1635 if (file == NULL) { 1636 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1637 proc->pid, thread->pid, fp->handle); 1638 return_error = BR_FAILED_REPLY; 1639 goto err_fget_failed; 1640 } 1641 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1642 if (target_fd < 0) { 1643 fput(file); 1644 return_error = BR_FAILED_REPLY; 1645 goto err_get_unused_fd_failed; 1646 } 1647 task_fd_install(target_proc, target_fd, file); 1648 trace_binder_transaction_fd(t, fp->handle, target_fd); 1649 binder_debug(BINDER_DEBUG_TRANSACTION, 1650 " fd %d -> %d\n", fp->handle, target_fd); 1651 /* TODO: fput? */ 1652 fp->handle = target_fd; 1653 } break; 1654 1655 default: 1656 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 1657 proc->pid, thread->pid, fp->type); 1658 return_error = BR_FAILED_REPLY; 1659 goto err_bad_object_type; 1660 } 1661 } 1662 if (reply) { 1663 BUG_ON(t->buffer->async_transaction != 0); 1664 binder_pop_transaction(target_thread, in_reply_to); 1665 } else if (!(t->flags & TF_ONE_WAY)) { 1666 BUG_ON(t->buffer->async_transaction != 0); 1667 t->need_reply = 1; 1668 t->from_parent = thread->transaction_stack; 1669 thread->transaction_stack = t; 1670 } else { 1671 BUG_ON(target_node == NULL); 1672 BUG_ON(t->buffer->async_transaction != 1); 1673 if (target_node->has_async_transaction) { 1674 target_list = &target_node->async_todo; 1675 target_wait = NULL; 1676 } else 1677 target_node->has_async_transaction = 1; 1678 } 1679 t->work.type = BINDER_WORK_TRANSACTION; 1680 list_add_tail(&t->work.entry, target_list); 1681 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1682 list_add_tail(&tcomplete->entry, &thread->todo); 1683 if (target_wait) 1684 wake_up_interruptible(target_wait); 1685 return; 1686 1687 err_get_unused_fd_failed: 1688 err_fget_failed: 1689 err_fd_not_allowed: 1690 err_binder_get_ref_for_node_failed: 1691 err_binder_get_ref_failed: 1692 err_binder_new_node_failed: 1693 err_bad_object_type: 1694 err_bad_offset: 1695 err_copy_data_failed: 1696 trace_binder_transaction_failed_buffer_release(t->buffer); 1697 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1698 t->buffer->transaction = NULL; 1699 binder_free_buf(target_proc, t->buffer); 1700 err_binder_alloc_buf_failed: 1701 kfree(tcomplete); 1702 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1703 err_alloc_tcomplete_failed: 1704 kfree(t); 1705 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1706 err_alloc_t_failed: 1707 err_bad_call_stack: 1708 err_empty_call_stack: 1709 err_dead_binder: 1710 err_invalid_target_handle: 1711 err_no_context_mgr_node: 1712 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1713 "%d:%d transaction failed %d, size %lld-%lld\n", 1714 proc->pid, thread->pid, return_error, 1715 (u64)tr->data_size, (u64)tr->offsets_size); 1716 1717 { 1718 struct binder_transaction_log_entry *fe; 1719 1720 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1721 *fe = *e; 1722 } 1723 1724 BUG_ON(thread->return_error != BR_OK); 1725 if (in_reply_to) { 1726 thread->return_error = BR_TRANSACTION_COMPLETE; 1727 binder_send_failed_reply(in_reply_to, return_error); 1728 } else 1729 thread->return_error = return_error; 1730 } 1731 1732 static int binder_thread_write(struct binder_proc *proc, 1733 struct binder_thread *thread, 1734 binder_uintptr_t binder_buffer, size_t size, 1735 binder_size_t *consumed) 1736 { 1737 uint32_t cmd; 1738 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 1739 void __user *ptr = buffer + *consumed; 1740 void __user *end = buffer + size; 1741 1742 while (ptr < end && thread->return_error == BR_OK) { 1743 if (get_user(cmd, (uint32_t __user *)ptr)) 1744 return -EFAULT; 1745 ptr += sizeof(uint32_t); 1746 trace_binder_command(cmd); 1747 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1748 binder_stats.bc[_IOC_NR(cmd)]++; 1749 proc->stats.bc[_IOC_NR(cmd)]++; 1750 thread->stats.bc[_IOC_NR(cmd)]++; 1751 } 1752 switch (cmd) { 1753 case BC_INCREFS: 1754 case BC_ACQUIRE: 1755 case BC_RELEASE: 1756 case BC_DECREFS: { 1757 uint32_t target; 1758 struct binder_ref *ref; 1759 const char *debug_string; 1760 1761 if (get_user(target, (uint32_t __user *)ptr)) 1762 return -EFAULT; 1763 ptr += sizeof(uint32_t); 1764 if (target == 0 && binder_context_mgr_node && 1765 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1766 ref = binder_get_ref_for_node(proc, 1767 binder_context_mgr_node); 1768 if (ref->desc != target) { 1769 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 1770 proc->pid, thread->pid, 1771 ref->desc); 1772 } 1773 } else 1774 ref = binder_get_ref(proc, target); 1775 if (ref == NULL) { 1776 binder_user_error("%d:%d refcount change on invalid ref %d\n", 1777 proc->pid, thread->pid, target); 1778 break; 1779 } 1780 switch (cmd) { 1781 case BC_INCREFS: 1782 debug_string = "IncRefs"; 1783 binder_inc_ref(ref, 0, NULL); 1784 break; 1785 case BC_ACQUIRE: 1786 debug_string = "Acquire"; 1787 binder_inc_ref(ref, 1, NULL); 1788 break; 1789 case BC_RELEASE: 1790 debug_string = "Release"; 1791 binder_dec_ref(ref, 1); 1792 break; 1793 case BC_DECREFS: 1794 default: 1795 debug_string = "DecRefs"; 1796 binder_dec_ref(ref, 0); 1797 break; 1798 } 1799 binder_debug(BINDER_DEBUG_USER_REFS, 1800 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 1801 proc->pid, thread->pid, debug_string, ref->debug_id, 1802 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1803 break; 1804 } 1805 case BC_INCREFS_DONE: 1806 case BC_ACQUIRE_DONE: { 1807 binder_uintptr_t node_ptr; 1808 binder_uintptr_t cookie; 1809 struct binder_node *node; 1810 1811 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 1812 return -EFAULT; 1813 ptr += sizeof(binder_uintptr_t); 1814 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1815 return -EFAULT; 1816 ptr += sizeof(binder_uintptr_t); 1817 node = binder_get_node(proc, node_ptr); 1818 if (node == NULL) { 1819 binder_user_error("%d:%d %s u%016llx no match\n", 1820 proc->pid, thread->pid, 1821 cmd == BC_INCREFS_DONE ? 1822 "BC_INCREFS_DONE" : 1823 "BC_ACQUIRE_DONE", 1824 (u64)node_ptr); 1825 break; 1826 } 1827 if (cookie != node->cookie) { 1828 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 1829 proc->pid, thread->pid, 1830 cmd == BC_INCREFS_DONE ? 1831 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1832 (u64)node_ptr, node->debug_id, 1833 (u64)cookie, (u64)node->cookie); 1834 break; 1835 } 1836 if (cmd == BC_ACQUIRE_DONE) { 1837 if (node->pending_strong_ref == 0) { 1838 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 1839 proc->pid, thread->pid, 1840 node->debug_id); 1841 break; 1842 } 1843 node->pending_strong_ref = 0; 1844 } else { 1845 if (node->pending_weak_ref == 0) { 1846 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 1847 proc->pid, thread->pid, 1848 node->debug_id); 1849 break; 1850 } 1851 node->pending_weak_ref = 0; 1852 } 1853 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1854 binder_debug(BINDER_DEBUG_USER_REFS, 1855 "%d:%d %s node %d ls %d lw %d\n", 1856 proc->pid, thread->pid, 1857 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1858 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1859 break; 1860 } 1861 case BC_ATTEMPT_ACQUIRE: 1862 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 1863 return -EINVAL; 1864 case BC_ACQUIRE_RESULT: 1865 pr_err("BC_ACQUIRE_RESULT not supported\n"); 1866 return -EINVAL; 1867 1868 case BC_FREE_BUFFER: { 1869 binder_uintptr_t data_ptr; 1870 struct binder_buffer *buffer; 1871 1872 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 1873 return -EFAULT; 1874 ptr += sizeof(binder_uintptr_t); 1875 1876 buffer = binder_buffer_lookup(proc, data_ptr); 1877 if (buffer == NULL) { 1878 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 1879 proc->pid, thread->pid, (u64)data_ptr); 1880 break; 1881 } 1882 if (!buffer->allow_user_free) { 1883 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 1884 proc->pid, thread->pid, (u64)data_ptr); 1885 break; 1886 } 1887 binder_debug(BINDER_DEBUG_FREE_BUFFER, 1888 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 1889 proc->pid, thread->pid, (u64)data_ptr, 1890 buffer->debug_id, 1891 buffer->transaction ? "active" : "finished"); 1892 1893 if (buffer->transaction) { 1894 buffer->transaction->buffer = NULL; 1895 buffer->transaction = NULL; 1896 } 1897 if (buffer->async_transaction && buffer->target_node) { 1898 BUG_ON(!buffer->target_node->has_async_transaction); 1899 if (list_empty(&buffer->target_node->async_todo)) 1900 buffer->target_node->has_async_transaction = 0; 1901 else 1902 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1903 } 1904 trace_binder_transaction_buffer_release(buffer); 1905 binder_transaction_buffer_release(proc, buffer, NULL); 1906 binder_free_buf(proc, buffer); 1907 break; 1908 } 1909 1910 case BC_TRANSACTION: 1911 case BC_REPLY: { 1912 struct binder_transaction_data tr; 1913 1914 if (copy_from_user(&tr, ptr, sizeof(tr))) 1915 return -EFAULT; 1916 ptr += sizeof(tr); 1917 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1918 break; 1919 } 1920 1921 case BC_REGISTER_LOOPER: 1922 binder_debug(BINDER_DEBUG_THREADS, 1923 "%d:%d BC_REGISTER_LOOPER\n", 1924 proc->pid, thread->pid); 1925 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1926 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1927 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 1928 proc->pid, thread->pid); 1929 } else if (proc->requested_threads == 0) { 1930 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1931 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 1932 proc->pid, thread->pid); 1933 } else { 1934 proc->requested_threads--; 1935 proc->requested_threads_started++; 1936 } 1937 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 1938 break; 1939 case BC_ENTER_LOOPER: 1940 binder_debug(BINDER_DEBUG_THREADS, 1941 "%d:%d BC_ENTER_LOOPER\n", 1942 proc->pid, thread->pid); 1943 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 1944 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1945 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 1946 proc->pid, thread->pid); 1947 } 1948 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 1949 break; 1950 case BC_EXIT_LOOPER: 1951 binder_debug(BINDER_DEBUG_THREADS, 1952 "%d:%d BC_EXIT_LOOPER\n", 1953 proc->pid, thread->pid); 1954 thread->looper |= BINDER_LOOPER_STATE_EXITED; 1955 break; 1956 1957 case BC_REQUEST_DEATH_NOTIFICATION: 1958 case BC_CLEAR_DEATH_NOTIFICATION: { 1959 uint32_t target; 1960 binder_uintptr_t cookie; 1961 struct binder_ref *ref; 1962 struct binder_ref_death *death; 1963 1964 if (get_user(target, (uint32_t __user *)ptr)) 1965 return -EFAULT; 1966 ptr += sizeof(uint32_t); 1967 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1968 return -EFAULT; 1969 ptr += sizeof(binder_uintptr_t); 1970 ref = binder_get_ref(proc, target); 1971 if (ref == NULL) { 1972 binder_user_error("%d:%d %s invalid ref %d\n", 1973 proc->pid, thread->pid, 1974 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1975 "BC_REQUEST_DEATH_NOTIFICATION" : 1976 "BC_CLEAR_DEATH_NOTIFICATION", 1977 target); 1978 break; 1979 } 1980 1981 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 1982 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 1983 proc->pid, thread->pid, 1984 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1985 "BC_REQUEST_DEATH_NOTIFICATION" : 1986 "BC_CLEAR_DEATH_NOTIFICATION", 1987 (u64)cookie, ref->debug_id, ref->desc, 1988 ref->strong, ref->weak, ref->node->debug_id); 1989 1990 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 1991 if (ref->death) { 1992 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 1993 proc->pid, thread->pid); 1994 break; 1995 } 1996 death = kzalloc(sizeof(*death), GFP_KERNEL); 1997 if (death == NULL) { 1998 thread->return_error = BR_ERROR; 1999 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2000 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2001 proc->pid, thread->pid); 2002 break; 2003 } 2004 binder_stats_created(BINDER_STAT_DEATH); 2005 INIT_LIST_HEAD(&death->work.entry); 2006 death->cookie = cookie; 2007 ref->death = death; 2008 if (ref->node->proc == NULL) { 2009 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2010 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2011 list_add_tail(&ref->death->work.entry, &thread->todo); 2012 } else { 2013 list_add_tail(&ref->death->work.entry, &proc->todo); 2014 wake_up_interruptible(&proc->wait); 2015 } 2016 } 2017 } else { 2018 if (ref->death == NULL) { 2019 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2020 proc->pid, thread->pid); 2021 break; 2022 } 2023 death = ref->death; 2024 if (death->cookie != cookie) { 2025 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2026 proc->pid, thread->pid, 2027 (u64)death->cookie, 2028 (u64)cookie); 2029 break; 2030 } 2031 ref->death = NULL; 2032 if (list_empty(&death->work.entry)) { 2033 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2034 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2035 list_add_tail(&death->work.entry, &thread->todo); 2036 } else { 2037 list_add_tail(&death->work.entry, &proc->todo); 2038 wake_up_interruptible(&proc->wait); 2039 } 2040 } else { 2041 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2042 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2043 } 2044 } 2045 } break; 2046 case BC_DEAD_BINDER_DONE: { 2047 struct binder_work *w; 2048 binder_uintptr_t cookie; 2049 struct binder_ref_death *death = NULL; 2050 2051 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2052 return -EFAULT; 2053 2054 ptr += sizeof(void *); 2055 list_for_each_entry(w, &proc->delivered_death, entry) { 2056 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2057 2058 if (tmp_death->cookie == cookie) { 2059 death = tmp_death; 2060 break; 2061 } 2062 } 2063 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2064 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2065 proc->pid, thread->pid, (u64)cookie, 2066 death); 2067 if (death == NULL) { 2068 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2069 proc->pid, thread->pid, (u64)cookie); 2070 break; 2071 } 2072 2073 list_del_init(&death->work.entry); 2074 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2075 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2076 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2077 list_add_tail(&death->work.entry, &thread->todo); 2078 } else { 2079 list_add_tail(&death->work.entry, &proc->todo); 2080 wake_up_interruptible(&proc->wait); 2081 } 2082 } 2083 } break; 2084 2085 default: 2086 pr_err("%d:%d unknown command %d\n", 2087 proc->pid, thread->pid, cmd); 2088 return -EINVAL; 2089 } 2090 *consumed = ptr - buffer; 2091 } 2092 return 0; 2093 } 2094 2095 static void binder_stat_br(struct binder_proc *proc, 2096 struct binder_thread *thread, uint32_t cmd) 2097 { 2098 trace_binder_return(cmd); 2099 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2100 binder_stats.br[_IOC_NR(cmd)]++; 2101 proc->stats.br[_IOC_NR(cmd)]++; 2102 thread->stats.br[_IOC_NR(cmd)]++; 2103 } 2104 } 2105 2106 static int binder_has_proc_work(struct binder_proc *proc, 2107 struct binder_thread *thread) 2108 { 2109 return !list_empty(&proc->todo) || 2110 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2111 } 2112 2113 static int binder_has_thread_work(struct binder_thread *thread) 2114 { 2115 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2116 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2117 } 2118 2119 static int binder_thread_read(struct binder_proc *proc, 2120 struct binder_thread *thread, 2121 binder_uintptr_t binder_buffer, size_t size, 2122 binder_size_t *consumed, int non_block) 2123 { 2124 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2125 void __user *ptr = buffer + *consumed; 2126 void __user *end = buffer + size; 2127 2128 int ret = 0; 2129 int wait_for_proc_work; 2130 2131 if (*consumed == 0) { 2132 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2133 return -EFAULT; 2134 ptr += sizeof(uint32_t); 2135 } 2136 2137 retry: 2138 wait_for_proc_work = thread->transaction_stack == NULL && 2139 list_empty(&thread->todo); 2140 2141 if (thread->return_error != BR_OK && ptr < end) { 2142 if (thread->return_error2 != BR_OK) { 2143 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2144 return -EFAULT; 2145 ptr += sizeof(uint32_t); 2146 binder_stat_br(proc, thread, thread->return_error2); 2147 if (ptr == end) 2148 goto done; 2149 thread->return_error2 = BR_OK; 2150 } 2151 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2152 return -EFAULT; 2153 ptr += sizeof(uint32_t); 2154 binder_stat_br(proc, thread, thread->return_error); 2155 thread->return_error = BR_OK; 2156 goto done; 2157 } 2158 2159 2160 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2161 if (wait_for_proc_work) 2162 proc->ready_threads++; 2163 2164 binder_unlock(__func__); 2165 2166 trace_binder_wait_for_work(wait_for_proc_work, 2167 !!thread->transaction_stack, 2168 !list_empty(&thread->todo)); 2169 if (wait_for_proc_work) { 2170 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2171 BINDER_LOOPER_STATE_ENTERED))) { 2172 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2173 proc->pid, thread->pid, thread->looper); 2174 wait_event_interruptible(binder_user_error_wait, 2175 binder_stop_on_user_error < 2); 2176 } 2177 binder_set_nice(proc->default_priority); 2178 if (non_block) { 2179 if (!binder_has_proc_work(proc, thread)) 2180 ret = -EAGAIN; 2181 } else 2182 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2183 } else { 2184 if (non_block) { 2185 if (!binder_has_thread_work(thread)) 2186 ret = -EAGAIN; 2187 } else 2188 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2189 } 2190 2191 binder_lock(__func__); 2192 2193 if (wait_for_proc_work) 2194 proc->ready_threads--; 2195 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2196 2197 if (ret) 2198 return ret; 2199 2200 while (1) { 2201 uint32_t cmd; 2202 struct binder_transaction_data tr; 2203 struct binder_work *w; 2204 struct binder_transaction *t = NULL; 2205 2206 if (!list_empty(&thread->todo)) { 2207 w = list_first_entry(&thread->todo, struct binder_work, 2208 entry); 2209 } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2210 w = list_first_entry(&proc->todo, struct binder_work, 2211 entry); 2212 } else { 2213 /* no data added */ 2214 if (ptr - buffer == 4 && 2215 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) 2216 goto retry; 2217 break; 2218 } 2219 2220 if (end - ptr < sizeof(tr) + 4) 2221 break; 2222 2223 switch (w->type) { 2224 case BINDER_WORK_TRANSACTION: { 2225 t = container_of(w, struct binder_transaction, work); 2226 } break; 2227 case BINDER_WORK_TRANSACTION_COMPLETE: { 2228 cmd = BR_TRANSACTION_COMPLETE; 2229 if (put_user(cmd, (uint32_t __user *)ptr)) 2230 return -EFAULT; 2231 ptr += sizeof(uint32_t); 2232 2233 binder_stat_br(proc, thread, cmd); 2234 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2235 "%d:%d BR_TRANSACTION_COMPLETE\n", 2236 proc->pid, thread->pid); 2237 2238 list_del(&w->entry); 2239 kfree(w); 2240 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2241 } break; 2242 case BINDER_WORK_NODE: { 2243 struct binder_node *node = container_of(w, struct binder_node, work); 2244 uint32_t cmd = BR_NOOP; 2245 const char *cmd_name; 2246 int strong = node->internal_strong_refs || node->local_strong_refs; 2247 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2248 2249 if (weak && !node->has_weak_ref) { 2250 cmd = BR_INCREFS; 2251 cmd_name = "BR_INCREFS"; 2252 node->has_weak_ref = 1; 2253 node->pending_weak_ref = 1; 2254 node->local_weak_refs++; 2255 } else if (strong && !node->has_strong_ref) { 2256 cmd = BR_ACQUIRE; 2257 cmd_name = "BR_ACQUIRE"; 2258 node->has_strong_ref = 1; 2259 node->pending_strong_ref = 1; 2260 node->local_strong_refs++; 2261 } else if (!strong && node->has_strong_ref) { 2262 cmd = BR_RELEASE; 2263 cmd_name = "BR_RELEASE"; 2264 node->has_strong_ref = 0; 2265 } else if (!weak && node->has_weak_ref) { 2266 cmd = BR_DECREFS; 2267 cmd_name = "BR_DECREFS"; 2268 node->has_weak_ref = 0; 2269 } 2270 if (cmd != BR_NOOP) { 2271 if (put_user(cmd, (uint32_t __user *)ptr)) 2272 return -EFAULT; 2273 ptr += sizeof(uint32_t); 2274 if (put_user(node->ptr, 2275 (binder_uintptr_t __user *)ptr)) 2276 return -EFAULT; 2277 ptr += sizeof(binder_uintptr_t); 2278 if (put_user(node->cookie, 2279 (binder_uintptr_t __user *)ptr)) 2280 return -EFAULT; 2281 ptr += sizeof(binder_uintptr_t); 2282 2283 binder_stat_br(proc, thread, cmd); 2284 binder_debug(BINDER_DEBUG_USER_REFS, 2285 "%d:%d %s %d u%016llx c%016llx\n", 2286 proc->pid, thread->pid, cmd_name, 2287 node->debug_id, 2288 (u64)node->ptr, (u64)node->cookie); 2289 } else { 2290 list_del_init(&w->entry); 2291 if (!weak && !strong) { 2292 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2293 "%d:%d node %d u%016llx c%016llx deleted\n", 2294 proc->pid, thread->pid, 2295 node->debug_id, 2296 (u64)node->ptr, 2297 (u64)node->cookie); 2298 rb_erase(&node->rb_node, &proc->nodes); 2299 kfree(node); 2300 binder_stats_deleted(BINDER_STAT_NODE); 2301 } else { 2302 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2303 "%d:%d node %d u%016llx c%016llx state unchanged\n", 2304 proc->pid, thread->pid, 2305 node->debug_id, 2306 (u64)node->ptr, 2307 (u64)node->cookie); 2308 } 2309 } 2310 } break; 2311 case BINDER_WORK_DEAD_BINDER: 2312 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2313 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2314 struct binder_ref_death *death; 2315 uint32_t cmd; 2316 2317 death = container_of(w, struct binder_ref_death, work); 2318 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2319 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2320 else 2321 cmd = BR_DEAD_BINDER; 2322 if (put_user(cmd, (uint32_t __user *)ptr)) 2323 return -EFAULT; 2324 ptr += sizeof(uint32_t); 2325 if (put_user(death->cookie, 2326 (binder_uintptr_t __user *)ptr)) 2327 return -EFAULT; 2328 ptr += sizeof(binder_uintptr_t); 2329 binder_stat_br(proc, thread, cmd); 2330 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2331 "%d:%d %s %016llx\n", 2332 proc->pid, thread->pid, 2333 cmd == BR_DEAD_BINDER ? 2334 "BR_DEAD_BINDER" : 2335 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2336 (u64)death->cookie); 2337 2338 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2339 list_del(&w->entry); 2340 kfree(death); 2341 binder_stats_deleted(BINDER_STAT_DEATH); 2342 } else 2343 list_move(&w->entry, &proc->delivered_death); 2344 if (cmd == BR_DEAD_BINDER) 2345 goto done; /* DEAD_BINDER notifications can cause transactions */ 2346 } break; 2347 } 2348 2349 if (!t) 2350 continue; 2351 2352 BUG_ON(t->buffer == NULL); 2353 if (t->buffer->target_node) { 2354 struct binder_node *target_node = t->buffer->target_node; 2355 2356 tr.target.ptr = target_node->ptr; 2357 tr.cookie = target_node->cookie; 2358 t->saved_priority = task_nice(current); 2359 if (t->priority < target_node->min_priority && 2360 !(t->flags & TF_ONE_WAY)) 2361 binder_set_nice(t->priority); 2362 else if (!(t->flags & TF_ONE_WAY) || 2363 t->saved_priority > target_node->min_priority) 2364 binder_set_nice(target_node->min_priority); 2365 cmd = BR_TRANSACTION; 2366 } else { 2367 tr.target.ptr = 0; 2368 tr.cookie = 0; 2369 cmd = BR_REPLY; 2370 } 2371 tr.code = t->code; 2372 tr.flags = t->flags; 2373 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2374 2375 if (t->from) { 2376 struct task_struct *sender = t->from->proc->tsk; 2377 2378 tr.sender_pid = task_tgid_nr_ns(sender, 2379 task_active_pid_ns(current)); 2380 } else { 2381 tr.sender_pid = 0; 2382 } 2383 2384 tr.data_size = t->buffer->data_size; 2385 tr.offsets_size = t->buffer->offsets_size; 2386 tr.data.ptr.buffer = (binder_uintptr_t)( 2387 (uintptr_t)t->buffer->data + 2388 proc->user_buffer_offset); 2389 tr.data.ptr.offsets = tr.data.ptr.buffer + 2390 ALIGN(t->buffer->data_size, 2391 sizeof(void *)); 2392 2393 if (put_user(cmd, (uint32_t __user *)ptr)) 2394 return -EFAULT; 2395 ptr += sizeof(uint32_t); 2396 if (copy_to_user(ptr, &tr, sizeof(tr))) 2397 return -EFAULT; 2398 ptr += sizeof(tr); 2399 2400 trace_binder_transaction_received(t); 2401 binder_stat_br(proc, thread, cmd); 2402 binder_debug(BINDER_DEBUG_TRANSACTION, 2403 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2404 proc->pid, thread->pid, 2405 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2406 "BR_REPLY", 2407 t->debug_id, t->from ? t->from->proc->pid : 0, 2408 t->from ? t->from->pid : 0, cmd, 2409 t->buffer->data_size, t->buffer->offsets_size, 2410 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2411 2412 list_del(&t->work.entry); 2413 t->buffer->allow_user_free = 1; 2414 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2415 t->to_parent = thread->transaction_stack; 2416 t->to_thread = thread; 2417 thread->transaction_stack = t; 2418 } else { 2419 t->buffer->transaction = NULL; 2420 kfree(t); 2421 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2422 } 2423 break; 2424 } 2425 2426 done: 2427 2428 *consumed = ptr - buffer; 2429 if (proc->requested_threads + proc->ready_threads == 0 && 2430 proc->requested_threads_started < proc->max_threads && 2431 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2432 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2433 /*spawn a new thread if we leave this out */) { 2434 proc->requested_threads++; 2435 binder_debug(BINDER_DEBUG_THREADS, 2436 "%d:%d BR_SPAWN_LOOPER\n", 2437 proc->pid, thread->pid); 2438 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2439 return -EFAULT; 2440 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 2441 } 2442 return 0; 2443 } 2444 2445 static void binder_release_work(struct list_head *list) 2446 { 2447 struct binder_work *w; 2448 2449 while (!list_empty(list)) { 2450 w = list_first_entry(list, struct binder_work, entry); 2451 list_del_init(&w->entry); 2452 switch (w->type) { 2453 case BINDER_WORK_TRANSACTION: { 2454 struct binder_transaction *t; 2455 2456 t = container_of(w, struct binder_transaction, work); 2457 if (t->buffer->target_node && 2458 !(t->flags & TF_ONE_WAY)) { 2459 binder_send_failed_reply(t, BR_DEAD_REPLY); 2460 } else { 2461 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2462 "undelivered transaction %d\n", 2463 t->debug_id); 2464 t->buffer->transaction = NULL; 2465 kfree(t); 2466 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2467 } 2468 } break; 2469 case BINDER_WORK_TRANSACTION_COMPLETE: { 2470 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2471 "undelivered TRANSACTION_COMPLETE\n"); 2472 kfree(w); 2473 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2474 } break; 2475 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2476 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2477 struct binder_ref_death *death; 2478 2479 death = container_of(w, struct binder_ref_death, work); 2480 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2481 "undelivered death notification, %016llx\n", 2482 (u64)death->cookie); 2483 kfree(death); 2484 binder_stats_deleted(BINDER_STAT_DEATH); 2485 } break; 2486 default: 2487 pr_err("unexpected work type, %d, not freed\n", 2488 w->type); 2489 break; 2490 } 2491 } 2492 2493 } 2494 2495 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2496 { 2497 struct binder_thread *thread = NULL; 2498 struct rb_node *parent = NULL; 2499 struct rb_node **p = &proc->threads.rb_node; 2500 2501 while (*p) { 2502 parent = *p; 2503 thread = rb_entry(parent, struct binder_thread, rb_node); 2504 2505 if (current->pid < thread->pid) 2506 p = &(*p)->rb_left; 2507 else if (current->pid > thread->pid) 2508 p = &(*p)->rb_right; 2509 else 2510 break; 2511 } 2512 if (*p == NULL) { 2513 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2514 if (thread == NULL) 2515 return NULL; 2516 binder_stats_created(BINDER_STAT_THREAD); 2517 thread->proc = proc; 2518 thread->pid = current->pid; 2519 init_waitqueue_head(&thread->wait); 2520 INIT_LIST_HEAD(&thread->todo); 2521 rb_link_node(&thread->rb_node, parent, p); 2522 rb_insert_color(&thread->rb_node, &proc->threads); 2523 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2524 thread->return_error = BR_OK; 2525 thread->return_error2 = BR_OK; 2526 } 2527 return thread; 2528 } 2529 2530 static int binder_free_thread(struct binder_proc *proc, 2531 struct binder_thread *thread) 2532 { 2533 struct binder_transaction *t; 2534 struct binder_transaction *send_reply = NULL; 2535 int active_transactions = 0; 2536 2537 rb_erase(&thread->rb_node, &proc->threads); 2538 t = thread->transaction_stack; 2539 if (t && t->to_thread == thread) 2540 send_reply = t; 2541 while (t) { 2542 active_transactions++; 2543 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2544 "release %d:%d transaction %d %s, still active\n", 2545 proc->pid, thread->pid, 2546 t->debug_id, 2547 (t->to_thread == thread) ? "in" : "out"); 2548 2549 if (t->to_thread == thread) { 2550 t->to_proc = NULL; 2551 t->to_thread = NULL; 2552 if (t->buffer) { 2553 t->buffer->transaction = NULL; 2554 t->buffer = NULL; 2555 } 2556 t = t->to_parent; 2557 } else if (t->from == thread) { 2558 t->from = NULL; 2559 t = t->from_parent; 2560 } else 2561 BUG(); 2562 } 2563 if (send_reply) 2564 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2565 binder_release_work(&thread->todo); 2566 kfree(thread); 2567 binder_stats_deleted(BINDER_STAT_THREAD); 2568 return active_transactions; 2569 } 2570 2571 static unsigned int binder_poll(struct file *filp, 2572 struct poll_table_struct *wait) 2573 { 2574 struct binder_proc *proc = filp->private_data; 2575 struct binder_thread *thread = NULL; 2576 int wait_for_proc_work; 2577 2578 binder_lock(__func__); 2579 2580 thread = binder_get_thread(proc); 2581 2582 wait_for_proc_work = thread->transaction_stack == NULL && 2583 list_empty(&thread->todo) && thread->return_error == BR_OK; 2584 2585 binder_unlock(__func__); 2586 2587 if (wait_for_proc_work) { 2588 if (binder_has_proc_work(proc, thread)) 2589 return POLLIN; 2590 poll_wait(filp, &proc->wait, wait); 2591 if (binder_has_proc_work(proc, thread)) 2592 return POLLIN; 2593 } else { 2594 if (binder_has_thread_work(thread)) 2595 return POLLIN; 2596 poll_wait(filp, &thread->wait, wait); 2597 if (binder_has_thread_work(thread)) 2598 return POLLIN; 2599 } 2600 return 0; 2601 } 2602 2603 static int binder_ioctl_write_read(struct file *filp, 2604 unsigned int cmd, unsigned long arg, 2605 struct binder_thread *thread) 2606 { 2607 int ret = 0; 2608 struct binder_proc *proc = filp->private_data; 2609 unsigned int size = _IOC_SIZE(cmd); 2610 void __user *ubuf = (void __user *)arg; 2611 struct binder_write_read bwr; 2612 2613 if (size != sizeof(struct binder_write_read)) { 2614 ret = -EINVAL; 2615 goto out; 2616 } 2617 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2618 ret = -EFAULT; 2619 goto out; 2620 } 2621 binder_debug(BINDER_DEBUG_READ_WRITE, 2622 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 2623 proc->pid, thread->pid, 2624 (u64)bwr.write_size, (u64)bwr.write_buffer, 2625 (u64)bwr.read_size, (u64)bwr.read_buffer); 2626 2627 if (bwr.write_size > 0) { 2628 ret = binder_thread_write(proc, thread, 2629 bwr.write_buffer, 2630 bwr.write_size, 2631 &bwr.write_consumed); 2632 trace_binder_write_done(ret); 2633 if (ret < 0) { 2634 bwr.read_consumed = 0; 2635 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2636 ret = -EFAULT; 2637 goto out; 2638 } 2639 } 2640 if (bwr.read_size > 0) { 2641 ret = binder_thread_read(proc, thread, bwr.read_buffer, 2642 bwr.read_size, 2643 &bwr.read_consumed, 2644 filp->f_flags & O_NONBLOCK); 2645 trace_binder_read_done(ret); 2646 if (!list_empty(&proc->todo)) 2647 wake_up_interruptible(&proc->wait); 2648 if (ret < 0) { 2649 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2650 ret = -EFAULT; 2651 goto out; 2652 } 2653 } 2654 binder_debug(BINDER_DEBUG_READ_WRITE, 2655 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 2656 proc->pid, thread->pid, 2657 (u64)bwr.write_consumed, (u64)bwr.write_size, 2658 (u64)bwr.read_consumed, (u64)bwr.read_size); 2659 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2660 ret = -EFAULT; 2661 goto out; 2662 } 2663 out: 2664 return ret; 2665 } 2666 2667 static int binder_ioctl_set_ctx_mgr(struct file *filp) 2668 { 2669 int ret = 0; 2670 struct binder_proc *proc = filp->private_data; 2671 kuid_t curr_euid = current_euid(); 2672 2673 if (binder_context_mgr_node != NULL) { 2674 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 2675 ret = -EBUSY; 2676 goto out; 2677 } 2678 if (uid_valid(binder_context_mgr_uid)) { 2679 if (!uid_eq(binder_context_mgr_uid, curr_euid)) { 2680 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 2681 from_kuid(&init_user_ns, curr_euid), 2682 from_kuid(&init_user_ns, 2683 binder_context_mgr_uid)); 2684 ret = -EPERM; 2685 goto out; 2686 } 2687 } else { 2688 binder_context_mgr_uid = curr_euid; 2689 } 2690 binder_context_mgr_node = binder_new_node(proc, 0, 0); 2691 if (binder_context_mgr_node == NULL) { 2692 ret = -ENOMEM; 2693 goto out; 2694 } 2695 binder_context_mgr_node->local_weak_refs++; 2696 binder_context_mgr_node->local_strong_refs++; 2697 binder_context_mgr_node->has_strong_ref = 1; 2698 binder_context_mgr_node->has_weak_ref = 1; 2699 out: 2700 return ret; 2701 } 2702 2703 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2704 { 2705 int ret; 2706 struct binder_proc *proc = filp->private_data; 2707 struct binder_thread *thread; 2708 unsigned int size = _IOC_SIZE(cmd); 2709 void __user *ubuf = (void __user *)arg; 2710 2711 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 2712 proc->pid, current->pid, cmd, arg);*/ 2713 2714 trace_binder_ioctl(cmd, arg); 2715 2716 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2717 if (ret) 2718 goto err_unlocked; 2719 2720 binder_lock(__func__); 2721 thread = binder_get_thread(proc); 2722 if (thread == NULL) { 2723 ret = -ENOMEM; 2724 goto err; 2725 } 2726 2727 switch (cmd) { 2728 case BINDER_WRITE_READ: 2729 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 2730 if (ret) 2731 goto err; 2732 break; 2733 case BINDER_SET_MAX_THREADS: 2734 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2735 ret = -EINVAL; 2736 goto err; 2737 } 2738 break; 2739 case BINDER_SET_CONTEXT_MGR: 2740 ret = binder_ioctl_set_ctx_mgr(filp); 2741 if (ret) 2742 goto err; 2743 break; 2744 case BINDER_THREAD_EXIT: 2745 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 2746 proc->pid, thread->pid); 2747 binder_free_thread(proc, thread); 2748 thread = NULL; 2749 break; 2750 case BINDER_VERSION: { 2751 struct binder_version __user *ver = ubuf; 2752 2753 if (size != sizeof(struct binder_version)) { 2754 ret = -EINVAL; 2755 goto err; 2756 } 2757 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 2758 &ver->protocol_version)) { 2759 ret = -EINVAL; 2760 goto err; 2761 } 2762 break; 2763 } 2764 default: 2765 ret = -EINVAL; 2766 goto err; 2767 } 2768 ret = 0; 2769 err: 2770 if (thread) 2771 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2772 binder_unlock(__func__); 2773 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2774 if (ret && ret != -ERESTARTSYS) 2775 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2776 err_unlocked: 2777 trace_binder_ioctl_done(ret); 2778 return ret; 2779 } 2780 2781 static void binder_vma_open(struct vm_area_struct *vma) 2782 { 2783 struct binder_proc *proc = vma->vm_private_data; 2784 2785 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2786 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2787 proc->pid, vma->vm_start, vma->vm_end, 2788 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2789 (unsigned long)pgprot_val(vma->vm_page_prot)); 2790 } 2791 2792 static void binder_vma_close(struct vm_area_struct *vma) 2793 { 2794 struct binder_proc *proc = vma->vm_private_data; 2795 2796 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2797 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2798 proc->pid, vma->vm_start, vma->vm_end, 2799 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2800 (unsigned long)pgprot_val(vma->vm_page_prot)); 2801 proc->vma = NULL; 2802 proc->vma_vm_mm = NULL; 2803 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2804 } 2805 2806 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2807 { 2808 return VM_FAULT_SIGBUS; 2809 } 2810 2811 static struct vm_operations_struct binder_vm_ops = { 2812 .open = binder_vma_open, 2813 .close = binder_vma_close, 2814 .fault = binder_vm_fault, 2815 }; 2816 2817 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2818 { 2819 int ret; 2820 struct vm_struct *area; 2821 struct binder_proc *proc = filp->private_data; 2822 const char *failure_string; 2823 struct binder_buffer *buffer; 2824 2825 if (proc->tsk != current) 2826 return -EINVAL; 2827 2828 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2829 vma->vm_end = vma->vm_start + SZ_4M; 2830 2831 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2832 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2833 proc->pid, vma->vm_start, vma->vm_end, 2834 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2835 (unsigned long)pgprot_val(vma->vm_page_prot)); 2836 2837 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2838 ret = -EPERM; 2839 failure_string = "bad vm_flags"; 2840 goto err_bad_arg; 2841 } 2842 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2843 2844 mutex_lock(&binder_mmap_lock); 2845 if (proc->buffer) { 2846 ret = -EBUSY; 2847 failure_string = "already mapped"; 2848 goto err_already_mapped; 2849 } 2850 2851 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2852 if (area == NULL) { 2853 ret = -ENOMEM; 2854 failure_string = "get_vm_area"; 2855 goto err_get_vm_area_failed; 2856 } 2857 proc->buffer = area->addr; 2858 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2859 mutex_unlock(&binder_mmap_lock); 2860 2861 #ifdef CONFIG_CPU_CACHE_VIPT 2862 if (cache_is_vipt_aliasing()) { 2863 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2864 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2865 vma->vm_start += PAGE_SIZE; 2866 } 2867 } 2868 #endif 2869 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2870 if (proc->pages == NULL) { 2871 ret = -ENOMEM; 2872 failure_string = "alloc page array"; 2873 goto err_alloc_pages_failed; 2874 } 2875 proc->buffer_size = vma->vm_end - vma->vm_start; 2876 2877 vma->vm_ops = &binder_vm_ops; 2878 vma->vm_private_data = proc; 2879 2880 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2881 ret = -ENOMEM; 2882 failure_string = "alloc small buf"; 2883 goto err_alloc_small_buf_failed; 2884 } 2885 buffer = proc->buffer; 2886 INIT_LIST_HEAD(&proc->buffers); 2887 list_add(&buffer->entry, &proc->buffers); 2888 buffer->free = 1; 2889 binder_insert_free_buffer(proc, buffer); 2890 proc->free_async_space = proc->buffer_size / 2; 2891 barrier(); 2892 proc->files = get_files_struct(current); 2893 proc->vma = vma; 2894 proc->vma_vm_mm = vma->vm_mm; 2895 2896 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 2897 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2898 return 0; 2899 2900 err_alloc_small_buf_failed: 2901 kfree(proc->pages); 2902 proc->pages = NULL; 2903 err_alloc_pages_failed: 2904 mutex_lock(&binder_mmap_lock); 2905 vfree(proc->buffer); 2906 proc->buffer = NULL; 2907 err_get_vm_area_failed: 2908 err_already_mapped: 2909 mutex_unlock(&binder_mmap_lock); 2910 err_bad_arg: 2911 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 2912 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2913 return ret; 2914 } 2915 2916 static int binder_open(struct inode *nodp, struct file *filp) 2917 { 2918 struct binder_proc *proc; 2919 2920 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 2921 current->group_leader->pid, current->pid); 2922 2923 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2924 if (proc == NULL) 2925 return -ENOMEM; 2926 get_task_struct(current); 2927 proc->tsk = current; 2928 INIT_LIST_HEAD(&proc->todo); 2929 init_waitqueue_head(&proc->wait); 2930 proc->default_priority = task_nice(current); 2931 2932 binder_lock(__func__); 2933 2934 binder_stats_created(BINDER_STAT_PROC); 2935 hlist_add_head(&proc->proc_node, &binder_procs); 2936 proc->pid = current->group_leader->pid; 2937 INIT_LIST_HEAD(&proc->delivered_death); 2938 filp->private_data = proc; 2939 2940 binder_unlock(__func__); 2941 2942 if (binder_debugfs_dir_entry_proc) { 2943 char strbuf[11]; 2944 2945 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2946 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 2947 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); 2948 } 2949 2950 return 0; 2951 } 2952 2953 static int binder_flush(struct file *filp, fl_owner_t id) 2954 { 2955 struct binder_proc *proc = filp->private_data; 2956 2957 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2958 2959 return 0; 2960 } 2961 2962 static void binder_deferred_flush(struct binder_proc *proc) 2963 { 2964 struct rb_node *n; 2965 int wake_count = 0; 2966 2967 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2968 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2969 2970 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2971 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2972 wake_up_interruptible(&thread->wait); 2973 wake_count++; 2974 } 2975 } 2976 wake_up_interruptible_all(&proc->wait); 2977 2978 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2979 "binder_flush: %d woke %d threads\n", proc->pid, 2980 wake_count); 2981 } 2982 2983 static int binder_release(struct inode *nodp, struct file *filp) 2984 { 2985 struct binder_proc *proc = filp->private_data; 2986 2987 debugfs_remove(proc->debugfs_entry); 2988 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2989 2990 return 0; 2991 } 2992 2993 static int binder_node_release(struct binder_node *node, int refs) 2994 { 2995 struct binder_ref *ref; 2996 int death = 0; 2997 2998 list_del_init(&node->work.entry); 2999 binder_release_work(&node->async_todo); 3000 3001 if (hlist_empty(&node->refs)) { 3002 kfree(node); 3003 binder_stats_deleted(BINDER_STAT_NODE); 3004 3005 return refs; 3006 } 3007 3008 node->proc = NULL; 3009 node->local_strong_refs = 0; 3010 node->local_weak_refs = 0; 3011 hlist_add_head(&node->dead_node, &binder_dead_nodes); 3012 3013 hlist_for_each_entry(ref, &node->refs, node_entry) { 3014 refs++; 3015 3016 if (!ref->death) 3017 continue; 3018 3019 death++; 3020 3021 if (list_empty(&ref->death->work.entry)) { 3022 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3023 list_add_tail(&ref->death->work.entry, 3024 &ref->proc->todo); 3025 wake_up_interruptible(&ref->proc->wait); 3026 } else 3027 BUG(); 3028 } 3029 3030 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3031 "node %d now dead, refs %d, death %d\n", 3032 node->debug_id, refs, death); 3033 3034 return refs; 3035 } 3036 3037 static void binder_deferred_release(struct binder_proc *proc) 3038 { 3039 struct binder_transaction *t; 3040 struct rb_node *n; 3041 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3042 active_transactions, page_count; 3043 3044 BUG_ON(proc->vma); 3045 BUG_ON(proc->files); 3046 3047 hlist_del(&proc->proc_node); 3048 3049 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 3050 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3051 "%s: %d context_mgr_node gone\n", 3052 __func__, proc->pid); 3053 binder_context_mgr_node = NULL; 3054 } 3055 3056 threads = 0; 3057 active_transactions = 0; 3058 while ((n = rb_first(&proc->threads))) { 3059 struct binder_thread *thread; 3060 3061 thread = rb_entry(n, struct binder_thread, rb_node); 3062 threads++; 3063 active_transactions += binder_free_thread(proc, thread); 3064 } 3065 3066 nodes = 0; 3067 incoming_refs = 0; 3068 while ((n = rb_first(&proc->nodes))) { 3069 struct binder_node *node; 3070 3071 node = rb_entry(n, struct binder_node, rb_node); 3072 nodes++; 3073 rb_erase(&node->rb_node, &proc->nodes); 3074 incoming_refs = binder_node_release(node, incoming_refs); 3075 } 3076 3077 outgoing_refs = 0; 3078 while ((n = rb_first(&proc->refs_by_desc))) { 3079 struct binder_ref *ref; 3080 3081 ref = rb_entry(n, struct binder_ref, rb_node_desc); 3082 outgoing_refs++; 3083 binder_delete_ref(ref); 3084 } 3085 3086 binder_release_work(&proc->todo); 3087 binder_release_work(&proc->delivered_death); 3088 3089 buffers = 0; 3090 while ((n = rb_first(&proc->allocated_buffers))) { 3091 struct binder_buffer *buffer; 3092 3093 buffer = rb_entry(n, struct binder_buffer, rb_node); 3094 3095 t = buffer->transaction; 3096 if (t) { 3097 t->buffer = NULL; 3098 buffer->transaction = NULL; 3099 pr_err("release proc %d, transaction %d, not freed\n", 3100 proc->pid, t->debug_id); 3101 /*BUG();*/ 3102 } 3103 3104 binder_free_buf(proc, buffer); 3105 buffers++; 3106 } 3107 3108 binder_stats_deleted(BINDER_STAT_PROC); 3109 3110 page_count = 0; 3111 if (proc->pages) { 3112 int i; 3113 3114 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3115 void *page_addr; 3116 3117 if (!proc->pages[i]) 3118 continue; 3119 3120 page_addr = proc->buffer + i * PAGE_SIZE; 3121 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3122 "%s: %d: page %d at %p not freed\n", 3123 __func__, proc->pid, i, page_addr); 3124 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3125 __free_page(proc->pages[i]); 3126 page_count++; 3127 } 3128 kfree(proc->pages); 3129 vfree(proc->buffer); 3130 } 3131 3132 put_task_struct(proc->tsk); 3133 3134 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3135 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 3136 __func__, proc->pid, threads, nodes, incoming_refs, 3137 outgoing_refs, active_transactions, buffers, page_count); 3138 3139 kfree(proc); 3140 } 3141 3142 static void binder_deferred_func(struct work_struct *work) 3143 { 3144 struct binder_proc *proc; 3145 struct files_struct *files; 3146 3147 int defer; 3148 3149 do { 3150 binder_lock(__func__); 3151 mutex_lock(&binder_deferred_lock); 3152 if (!hlist_empty(&binder_deferred_list)) { 3153 proc = hlist_entry(binder_deferred_list.first, 3154 struct binder_proc, deferred_work_node); 3155 hlist_del_init(&proc->deferred_work_node); 3156 defer = proc->deferred_work; 3157 proc->deferred_work = 0; 3158 } else { 3159 proc = NULL; 3160 defer = 0; 3161 } 3162 mutex_unlock(&binder_deferred_lock); 3163 3164 files = NULL; 3165 if (defer & BINDER_DEFERRED_PUT_FILES) { 3166 files = proc->files; 3167 if (files) 3168 proc->files = NULL; 3169 } 3170 3171 if (defer & BINDER_DEFERRED_FLUSH) 3172 binder_deferred_flush(proc); 3173 3174 if (defer & BINDER_DEFERRED_RELEASE) 3175 binder_deferred_release(proc); /* frees proc */ 3176 3177 binder_unlock(__func__); 3178 if (files) 3179 put_files_struct(files); 3180 } while (proc); 3181 } 3182 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3183 3184 static void 3185 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3186 { 3187 mutex_lock(&binder_deferred_lock); 3188 proc->deferred_work |= defer; 3189 if (hlist_unhashed(&proc->deferred_work_node)) { 3190 hlist_add_head(&proc->deferred_work_node, 3191 &binder_deferred_list); 3192 queue_work(binder_deferred_workqueue, &binder_deferred_work); 3193 } 3194 mutex_unlock(&binder_deferred_lock); 3195 } 3196 3197 static void print_binder_transaction(struct seq_file *m, const char *prefix, 3198 struct binder_transaction *t) 3199 { 3200 seq_printf(m, 3201 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3202 prefix, t->debug_id, t, 3203 t->from ? t->from->proc->pid : 0, 3204 t->from ? t->from->pid : 0, 3205 t->to_proc ? t->to_proc->pid : 0, 3206 t->to_thread ? t->to_thread->pid : 0, 3207 t->code, t->flags, t->priority, t->need_reply); 3208 if (t->buffer == NULL) { 3209 seq_puts(m, " buffer free\n"); 3210 return; 3211 } 3212 if (t->buffer->target_node) 3213 seq_printf(m, " node %d", 3214 t->buffer->target_node->debug_id); 3215 seq_printf(m, " size %zd:%zd data %p\n", 3216 t->buffer->data_size, t->buffer->offsets_size, 3217 t->buffer->data); 3218 } 3219 3220 static void print_binder_buffer(struct seq_file *m, const char *prefix, 3221 struct binder_buffer *buffer) 3222 { 3223 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3224 prefix, buffer->debug_id, buffer->data, 3225 buffer->data_size, buffer->offsets_size, 3226 buffer->transaction ? "active" : "delivered"); 3227 } 3228 3229 static void print_binder_work(struct seq_file *m, const char *prefix, 3230 const char *transaction_prefix, 3231 struct binder_work *w) 3232 { 3233 struct binder_node *node; 3234 struct binder_transaction *t; 3235 3236 switch (w->type) { 3237 case BINDER_WORK_TRANSACTION: 3238 t = container_of(w, struct binder_transaction, work); 3239 print_binder_transaction(m, transaction_prefix, t); 3240 break; 3241 case BINDER_WORK_TRANSACTION_COMPLETE: 3242 seq_printf(m, "%stransaction complete\n", prefix); 3243 break; 3244 case BINDER_WORK_NODE: 3245 node = container_of(w, struct binder_node, work); 3246 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3247 prefix, node->debug_id, 3248 (u64)node->ptr, (u64)node->cookie); 3249 break; 3250 case BINDER_WORK_DEAD_BINDER: 3251 seq_printf(m, "%shas dead binder\n", prefix); 3252 break; 3253 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3254 seq_printf(m, "%shas cleared dead binder\n", prefix); 3255 break; 3256 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3257 seq_printf(m, "%shas cleared death notification\n", prefix); 3258 break; 3259 default: 3260 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3261 break; 3262 } 3263 } 3264 3265 static void print_binder_thread(struct seq_file *m, 3266 struct binder_thread *thread, 3267 int print_always) 3268 { 3269 struct binder_transaction *t; 3270 struct binder_work *w; 3271 size_t start_pos = m->count; 3272 size_t header_pos; 3273 3274 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3275 header_pos = m->count; 3276 t = thread->transaction_stack; 3277 while (t) { 3278 if (t->from == thread) { 3279 print_binder_transaction(m, 3280 " outgoing transaction", t); 3281 t = t->from_parent; 3282 } else if (t->to_thread == thread) { 3283 print_binder_transaction(m, 3284 " incoming transaction", t); 3285 t = t->to_parent; 3286 } else { 3287 print_binder_transaction(m, " bad transaction", t); 3288 t = NULL; 3289 } 3290 } 3291 list_for_each_entry(w, &thread->todo, entry) { 3292 print_binder_work(m, " ", " pending transaction", w); 3293 } 3294 if (!print_always && m->count == header_pos) 3295 m->count = start_pos; 3296 } 3297 3298 static void print_binder_node(struct seq_file *m, struct binder_node *node) 3299 { 3300 struct binder_ref *ref; 3301 struct binder_work *w; 3302 int count; 3303 3304 count = 0; 3305 hlist_for_each_entry(ref, &node->refs, node_entry) 3306 count++; 3307 3308 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3309 node->debug_id, (u64)node->ptr, (u64)node->cookie, 3310 node->has_strong_ref, node->has_weak_ref, 3311 node->local_strong_refs, node->local_weak_refs, 3312 node->internal_strong_refs, count); 3313 if (count) { 3314 seq_puts(m, " proc"); 3315 hlist_for_each_entry(ref, &node->refs, node_entry) 3316 seq_printf(m, " %d", ref->proc->pid); 3317 } 3318 seq_puts(m, "\n"); 3319 list_for_each_entry(w, &node->async_todo, entry) 3320 print_binder_work(m, " ", 3321 " pending async transaction", w); 3322 } 3323 3324 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3325 { 3326 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3327 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3328 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3329 } 3330 3331 static void print_binder_proc(struct seq_file *m, 3332 struct binder_proc *proc, int print_all) 3333 { 3334 struct binder_work *w; 3335 struct rb_node *n; 3336 size_t start_pos = m->count; 3337 size_t header_pos; 3338 3339 seq_printf(m, "proc %d\n", proc->pid); 3340 header_pos = m->count; 3341 3342 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3343 print_binder_thread(m, rb_entry(n, struct binder_thread, 3344 rb_node), print_all); 3345 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3346 struct binder_node *node = rb_entry(n, struct binder_node, 3347 rb_node); 3348 if (print_all || node->has_async_transaction) 3349 print_binder_node(m, node); 3350 } 3351 if (print_all) { 3352 for (n = rb_first(&proc->refs_by_desc); 3353 n != NULL; 3354 n = rb_next(n)) 3355 print_binder_ref(m, rb_entry(n, struct binder_ref, 3356 rb_node_desc)); 3357 } 3358 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3359 print_binder_buffer(m, " buffer", 3360 rb_entry(n, struct binder_buffer, rb_node)); 3361 list_for_each_entry(w, &proc->todo, entry) 3362 print_binder_work(m, " ", " pending transaction", w); 3363 list_for_each_entry(w, &proc->delivered_death, entry) { 3364 seq_puts(m, " has delivered dead binder\n"); 3365 break; 3366 } 3367 if (!print_all && m->count == header_pos) 3368 m->count = start_pos; 3369 } 3370 3371 static const char * const binder_return_strings[] = { 3372 "BR_ERROR", 3373 "BR_OK", 3374 "BR_TRANSACTION", 3375 "BR_REPLY", 3376 "BR_ACQUIRE_RESULT", 3377 "BR_DEAD_REPLY", 3378 "BR_TRANSACTION_COMPLETE", 3379 "BR_INCREFS", 3380 "BR_ACQUIRE", 3381 "BR_RELEASE", 3382 "BR_DECREFS", 3383 "BR_ATTEMPT_ACQUIRE", 3384 "BR_NOOP", 3385 "BR_SPAWN_LOOPER", 3386 "BR_FINISHED", 3387 "BR_DEAD_BINDER", 3388 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3389 "BR_FAILED_REPLY" 3390 }; 3391 3392 static const char * const binder_command_strings[] = { 3393 "BC_TRANSACTION", 3394 "BC_REPLY", 3395 "BC_ACQUIRE_RESULT", 3396 "BC_FREE_BUFFER", 3397 "BC_INCREFS", 3398 "BC_ACQUIRE", 3399 "BC_RELEASE", 3400 "BC_DECREFS", 3401 "BC_INCREFS_DONE", 3402 "BC_ACQUIRE_DONE", 3403 "BC_ATTEMPT_ACQUIRE", 3404 "BC_REGISTER_LOOPER", 3405 "BC_ENTER_LOOPER", 3406 "BC_EXIT_LOOPER", 3407 "BC_REQUEST_DEATH_NOTIFICATION", 3408 "BC_CLEAR_DEATH_NOTIFICATION", 3409 "BC_DEAD_BINDER_DONE" 3410 }; 3411 3412 static const char * const binder_objstat_strings[] = { 3413 "proc", 3414 "thread", 3415 "node", 3416 "ref", 3417 "death", 3418 "transaction", 3419 "transaction_complete" 3420 }; 3421 3422 static void print_binder_stats(struct seq_file *m, const char *prefix, 3423 struct binder_stats *stats) 3424 { 3425 int i; 3426 3427 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3428 ARRAY_SIZE(binder_command_strings)); 3429 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3430 if (stats->bc[i]) 3431 seq_printf(m, "%s%s: %d\n", prefix, 3432 binder_command_strings[i], stats->bc[i]); 3433 } 3434 3435 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3436 ARRAY_SIZE(binder_return_strings)); 3437 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3438 if (stats->br[i]) 3439 seq_printf(m, "%s%s: %d\n", prefix, 3440 binder_return_strings[i], stats->br[i]); 3441 } 3442 3443 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3444 ARRAY_SIZE(binder_objstat_strings)); 3445 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3446 ARRAY_SIZE(stats->obj_deleted)); 3447 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3448 if (stats->obj_created[i] || stats->obj_deleted[i]) 3449 seq_printf(m, "%s%s: active %d total %d\n", prefix, 3450 binder_objstat_strings[i], 3451 stats->obj_created[i] - stats->obj_deleted[i], 3452 stats->obj_created[i]); 3453 } 3454 } 3455 3456 static void print_binder_proc_stats(struct seq_file *m, 3457 struct binder_proc *proc) 3458 { 3459 struct binder_work *w; 3460 struct rb_node *n; 3461 int count, strong, weak; 3462 3463 seq_printf(m, "proc %d\n", proc->pid); 3464 count = 0; 3465 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3466 count++; 3467 seq_printf(m, " threads: %d\n", count); 3468 seq_printf(m, " requested threads: %d+%d/%d\n" 3469 " ready threads %d\n" 3470 " free async space %zd\n", proc->requested_threads, 3471 proc->requested_threads_started, proc->max_threads, 3472 proc->ready_threads, proc->free_async_space); 3473 count = 0; 3474 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3475 count++; 3476 seq_printf(m, " nodes: %d\n", count); 3477 count = 0; 3478 strong = 0; 3479 weak = 0; 3480 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3481 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3482 rb_node_desc); 3483 count++; 3484 strong += ref->strong; 3485 weak += ref->weak; 3486 } 3487 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3488 3489 count = 0; 3490 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3491 count++; 3492 seq_printf(m, " buffers: %d\n", count); 3493 3494 count = 0; 3495 list_for_each_entry(w, &proc->todo, entry) { 3496 switch (w->type) { 3497 case BINDER_WORK_TRANSACTION: 3498 count++; 3499 break; 3500 default: 3501 break; 3502 } 3503 } 3504 seq_printf(m, " pending transactions: %d\n", count); 3505 3506 print_binder_stats(m, " ", &proc->stats); 3507 } 3508 3509 3510 static int binder_state_show(struct seq_file *m, void *unused) 3511 { 3512 struct binder_proc *proc; 3513 struct binder_node *node; 3514 int do_lock = !binder_debug_no_lock; 3515 3516 if (do_lock) 3517 binder_lock(__func__); 3518 3519 seq_puts(m, "binder state:\n"); 3520 3521 if (!hlist_empty(&binder_dead_nodes)) 3522 seq_puts(m, "dead nodes:\n"); 3523 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 3524 print_binder_node(m, node); 3525 3526 hlist_for_each_entry(proc, &binder_procs, proc_node) 3527 print_binder_proc(m, proc, 1); 3528 if (do_lock) 3529 binder_unlock(__func__); 3530 return 0; 3531 } 3532 3533 static int binder_stats_show(struct seq_file *m, void *unused) 3534 { 3535 struct binder_proc *proc; 3536 int do_lock = !binder_debug_no_lock; 3537 3538 if (do_lock) 3539 binder_lock(__func__); 3540 3541 seq_puts(m, "binder stats:\n"); 3542 3543 print_binder_stats(m, "", &binder_stats); 3544 3545 hlist_for_each_entry(proc, &binder_procs, proc_node) 3546 print_binder_proc_stats(m, proc); 3547 if (do_lock) 3548 binder_unlock(__func__); 3549 return 0; 3550 } 3551 3552 static int binder_transactions_show(struct seq_file *m, void *unused) 3553 { 3554 struct binder_proc *proc; 3555 int do_lock = !binder_debug_no_lock; 3556 3557 if (do_lock) 3558 binder_lock(__func__); 3559 3560 seq_puts(m, "binder transactions:\n"); 3561 hlist_for_each_entry(proc, &binder_procs, proc_node) 3562 print_binder_proc(m, proc, 0); 3563 if (do_lock) 3564 binder_unlock(__func__); 3565 return 0; 3566 } 3567 3568 static int binder_proc_show(struct seq_file *m, void *unused) 3569 { 3570 struct binder_proc *proc = m->private; 3571 int do_lock = !binder_debug_no_lock; 3572 3573 if (do_lock) 3574 binder_lock(__func__); 3575 seq_puts(m, "binder proc state:\n"); 3576 print_binder_proc(m, proc, 1); 3577 if (do_lock) 3578 binder_unlock(__func__); 3579 return 0; 3580 } 3581 3582 static void print_binder_transaction_log_entry(struct seq_file *m, 3583 struct binder_transaction_log_entry *e) 3584 { 3585 seq_printf(m, 3586 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 3587 e->debug_id, (e->call_type == 2) ? "reply" : 3588 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3589 e->from_thread, e->to_proc, e->to_thread, e->to_node, 3590 e->target_handle, e->data_size, e->offsets_size); 3591 } 3592 3593 static int binder_transaction_log_show(struct seq_file *m, void *unused) 3594 { 3595 struct binder_transaction_log *log = m->private; 3596 int i; 3597 3598 if (log->full) { 3599 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 3600 print_binder_transaction_log_entry(m, &log->entry[i]); 3601 } 3602 for (i = 0; i < log->next; i++) 3603 print_binder_transaction_log_entry(m, &log->entry[i]); 3604 return 0; 3605 } 3606 3607 static const struct file_operations binder_fops = { 3608 .owner = THIS_MODULE, 3609 .poll = binder_poll, 3610 .unlocked_ioctl = binder_ioctl, 3611 .compat_ioctl = binder_ioctl, 3612 .mmap = binder_mmap, 3613 .open = binder_open, 3614 .flush = binder_flush, 3615 .release = binder_release, 3616 }; 3617 3618 static struct miscdevice binder_miscdev = { 3619 .minor = MISC_DYNAMIC_MINOR, 3620 .name = "binder", 3621 .fops = &binder_fops 3622 }; 3623 3624 BINDER_DEBUG_ENTRY(state); 3625 BINDER_DEBUG_ENTRY(stats); 3626 BINDER_DEBUG_ENTRY(transactions); 3627 BINDER_DEBUG_ENTRY(transaction_log); 3628 3629 static int __init binder_init(void) 3630 { 3631 int ret; 3632 3633 binder_deferred_workqueue = create_singlethread_workqueue("binder"); 3634 if (!binder_deferred_workqueue) 3635 return -ENOMEM; 3636 3637 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3638 if (binder_debugfs_dir_entry_root) 3639 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3640 binder_debugfs_dir_entry_root); 3641 ret = misc_register(&binder_miscdev); 3642 if (binder_debugfs_dir_entry_root) { 3643 debugfs_create_file("state", 3644 S_IRUGO, 3645 binder_debugfs_dir_entry_root, 3646 NULL, 3647 &binder_state_fops); 3648 debugfs_create_file("stats", 3649 S_IRUGO, 3650 binder_debugfs_dir_entry_root, 3651 NULL, 3652 &binder_stats_fops); 3653 debugfs_create_file("transactions", 3654 S_IRUGO, 3655 binder_debugfs_dir_entry_root, 3656 NULL, 3657 &binder_transactions_fops); 3658 debugfs_create_file("transaction_log", 3659 S_IRUGO, 3660 binder_debugfs_dir_entry_root, 3661 &binder_transaction_log, 3662 &binder_transaction_log_fops); 3663 debugfs_create_file("failed_transaction_log", 3664 S_IRUGO, 3665 binder_debugfs_dir_entry_root, 3666 &binder_transaction_log_failed, 3667 &binder_transaction_log_fops); 3668 } 3669 return ret; 3670 } 3671 3672 device_initcall(binder_init); 3673 3674 #define CREATE_TRACE_POINTS 3675 #include "binder_trace.h" 3676 3677 MODULE_LICENSE("GPL v2"); 3678