1/* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include <asm/cacheflush.h> 21#include <linux/fdtable.h> 22#include <linux/file.h> 23#include <linux/freezer.h> 24#include <linux/fs.h> 25#include <linux/list.h> 26#include <linux/miscdevice.h> 27#include <linux/mm.h> 28#include <linux/module.h> 29#include <linux/rtmutex.h> 30#include <linux/mutex.h> 31#include <linux/nsproxy.h> 32#include <linux/poll.h> 33#include <linux/debugfs.h> 34#include <linux/rbtree.h> 35#include <linux/sched.h> 36#include <linux/seq_file.h> 37#include <linux/uaccess.h> 38#include <linux/vmalloc.h> 39#include <linux/slab.h> 40#include <linux/pid_namespace.h> 41#include <linux/security.h> 42 43#include "binder.h" 44#include "binder_trace.h" 45 46static DEFINE_RT_MUTEX(binder_main_lock); 47static DEFINE_MUTEX(binder_deferred_lock); 48static DEFINE_MUTEX(binder_mmap_lock); 49 50static HLIST_HEAD(binder_procs); 51static HLIST_HEAD(binder_deferred_list); 52static HLIST_HEAD(binder_dead_nodes); 53 54static struct dentry *binder_debugfs_dir_entry_root; 55static struct dentry *binder_debugfs_dir_entry_proc; 56static struct binder_node *binder_context_mgr_node; 57static kuid_t binder_context_mgr_uid = INVALID_UID; 58static int binder_last_id; 59static struct workqueue_struct *binder_deferred_workqueue; 60 61#define BINDER_DEBUG_ENTRY(name) \ 62static int binder_##name##_open(struct inode *inode, struct file *file) \ 63{ \ 64 return single_open(file, binder_##name##_show, inode->i_private); \ 65} \ 66\ 67static const struct file_operations binder_##name##_fops = { \ 68 .owner = THIS_MODULE, \ 69 .open = binder_##name##_open, \ 70 .read = seq_read, \ 71 .llseek = seq_lseek, \ 72 .release = single_release, \ 73} 74 75static int binder_proc_show(struct seq_file *m, void *unused); 76BINDER_DEBUG_ENTRY(proc); 77 78/* This is only defined in include/asm-arm/sizes.h */ 79#ifndef SZ_1K 80#define SZ_1K 0x400 81#endif 82 83#ifndef SZ_4M 84#define SZ_4M 0x400000 85#endif 86 87#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 88 89#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 90 91enum { 92 BINDER_DEBUG_USER_ERROR = 1U << 0, 93 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 94 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 95 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 96 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 97 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 98 BINDER_DEBUG_READ_WRITE = 1U << 6, 99 BINDER_DEBUG_USER_REFS = 1U << 7, 100 BINDER_DEBUG_THREADS = 1U << 8, 101 BINDER_DEBUG_TRANSACTION = 1U << 9, 102 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 103 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 104 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 105 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 106 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 107 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 108}; 109static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 110 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 111module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 112 113static bool binder_debug_no_lock; 114module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 115 116static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 117static int binder_stop_on_user_error; 118 119static int binder_set_stop_on_user_error(const char *val, 120 struct kernel_param *kp) 121{ 122 int ret; 123 124 ret = param_set_int(val, kp); 125 if (binder_stop_on_user_error < 2) 126 wake_up(&binder_user_error_wait); 127 return ret; 128} 129module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 130 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 131 132#define binder_debug(mask, x...) \ 133 do { \ 134 if (binder_debug_mask & mask) \ 135 pr_info(x); \ 136 } while (0) 137 138#define binder_user_error(x...) \ 139 do { \ 140 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 141 pr_info(x); \ 142 if (binder_stop_on_user_error) \ 143 binder_stop_on_user_error = 2; \ 144 } while (0) 145 146enum binder_stat_types { 147 BINDER_STAT_PROC, 148 BINDER_STAT_THREAD, 149 BINDER_STAT_NODE, 150 BINDER_STAT_REF, 151 BINDER_STAT_DEATH, 152 BINDER_STAT_TRANSACTION, 153 BINDER_STAT_TRANSACTION_COMPLETE, 154 BINDER_STAT_COUNT 155}; 156 157struct binder_stats { 158 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 159 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 160 int obj_created[BINDER_STAT_COUNT]; 161 int obj_deleted[BINDER_STAT_COUNT]; 162}; 163 164static struct binder_stats binder_stats; 165 166static inline void binder_stats_deleted(enum binder_stat_types type) 167{ 168 binder_stats.obj_deleted[type]++; 169} 170 171static inline void binder_stats_created(enum binder_stat_types type) 172{ 173 binder_stats.obj_created[type]++; 174} 175 176struct binder_transaction_log_entry { 177 int debug_id; 178 int call_type; 179 int from_proc; 180 int from_thread; 181 int target_handle; 182 int to_proc; 183 int to_thread; 184 int to_node; 185 int data_size; 186 int offsets_size; 187}; 188struct binder_transaction_log { 189 int next; 190 int full; 191 struct binder_transaction_log_entry entry[32]; 192}; 193static struct binder_transaction_log binder_transaction_log; 194static struct binder_transaction_log binder_transaction_log_failed; 195 196static struct binder_transaction_log_entry *binder_transaction_log_add( 197 struct binder_transaction_log *log) 198{ 199 struct binder_transaction_log_entry *e; 200 201 e = &log->entry[log->next]; 202 memset(e, 0, sizeof(*e)); 203 log->next++; 204 if (log->next == ARRAY_SIZE(log->entry)) { 205 log->next = 0; 206 log->full = 1; 207 } 208 return e; 209} 210 211struct binder_work { 212 struct list_head entry; 213 enum { 214 BINDER_WORK_TRANSACTION = 1, 215 BINDER_WORK_TRANSACTION_COMPLETE, 216 BINDER_WORK_NODE, 217 BINDER_WORK_DEAD_BINDER, 218 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 219 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 220 } type; 221}; 222 223struct binder_node { 224 int debug_id; 225 struct binder_work work; 226 union { 227 struct rb_node rb_node; 228 struct hlist_node dead_node; 229 }; 230 struct binder_proc *proc; 231 struct hlist_head refs; 232 int internal_strong_refs; 233 int local_weak_refs; 234 int local_strong_refs; 235 binder_uintptr_t ptr; 236 binder_uintptr_t cookie; 237 unsigned has_strong_ref:1; 238 unsigned pending_strong_ref:1; 239 unsigned has_weak_ref:1; 240 unsigned pending_weak_ref:1; 241 unsigned has_async_transaction:1; 242 unsigned accept_fds:1; 243 unsigned min_priority:8; 244 struct list_head async_todo; 245}; 246 247struct binder_ref_death { 248 struct binder_work work; 249 binder_uintptr_t cookie; 250}; 251 252struct binder_ref { 253 /* Lookups needed: */ 254 /* node + proc => ref (transaction) */ 255 /* desc + proc => ref (transaction, inc/dec ref) */ 256 /* node => refs + procs (proc exit) */ 257 int debug_id; 258 struct rb_node rb_node_desc; 259 struct rb_node rb_node_node; 260 struct hlist_node node_entry; 261 struct binder_proc *proc; 262 struct binder_node *node; 263 uint32_t desc; 264 int strong; 265 int weak; 266 struct binder_ref_death *death; 267}; 268 269struct binder_buffer { 270 struct list_head entry; /* free and allocated entries by address */ 271 struct rb_node rb_node; /* free entry by size or allocated entry */ 272 /* by address */ 273 unsigned free:1; 274 unsigned allow_user_free:1; 275 unsigned async_transaction:1; 276 unsigned debug_id:29; 277 278 struct binder_transaction *transaction; 279 280 struct binder_node *target_node; 281 size_t data_size; 282 size_t offsets_size; 283 uint8_t data[0]; 284}; 285 286enum binder_deferred_state { 287 BINDER_DEFERRED_PUT_FILES = 0x01, 288 BINDER_DEFERRED_FLUSH = 0x02, 289 BINDER_DEFERRED_RELEASE = 0x04, 290}; 291 292struct binder_proc { 293 struct hlist_node proc_node; 294 struct rb_root threads; 295 struct rb_root nodes; 296 struct rb_root refs_by_desc; 297 struct rb_root refs_by_node; 298 int pid; 299 struct vm_area_struct *vma; 300 struct mm_struct *vma_vm_mm; 301 struct task_struct *tsk; 302 struct files_struct *files; 303 struct hlist_node deferred_work_node; 304 int deferred_work; 305 void *buffer; 306 ptrdiff_t user_buffer_offset; 307 308 struct list_head buffers; 309 struct rb_root free_buffers; 310 struct rb_root allocated_buffers; 311 size_t free_async_space; 312 313 struct page **pages; 314 size_t buffer_size; 315 uint32_t buffer_free; 316 struct list_head todo; 317 wait_queue_head_t wait; 318 struct binder_stats stats; 319 struct list_head delivered_death; 320 int max_threads; 321 int requested_threads; 322 int requested_threads_started; 323 int ready_threads; 324 long default_priority; 325 struct dentry *debugfs_entry; 326}; 327 328enum { 329 BINDER_LOOPER_STATE_REGISTERED = 0x01, 330 BINDER_LOOPER_STATE_ENTERED = 0x02, 331 BINDER_LOOPER_STATE_EXITED = 0x04, 332 BINDER_LOOPER_STATE_INVALID = 0x08, 333 BINDER_LOOPER_STATE_WAITING = 0x10, 334 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 335}; 336 337struct binder_thread { 338 struct binder_proc *proc; 339 struct rb_node rb_node; 340 int pid; 341 int looper; 342 struct binder_transaction *transaction_stack; 343 struct list_head todo; 344 uint32_t return_error; /* Write failed, return error code in read buf */ 345 uint32_t return_error2; /* Write failed, return error code in read */ 346 /* buffer. Used when sending a reply to a dead process that */ 347 /* we are also waiting on */ 348 wait_queue_head_t wait; 349 struct binder_stats stats; 350}; 351 352struct binder_transaction { 353 int debug_id; 354 struct binder_work work; 355 struct binder_thread *from; 356 struct binder_transaction *from_parent; 357 struct binder_proc *to_proc; 358 struct binder_thread *to_thread; 359 struct binder_transaction *to_parent; 360 unsigned need_reply:1; 361 /* unsigned is_dead:1; */ /* not used at the moment */ 362 363 struct binder_buffer *buffer; 364 unsigned int code; 365 unsigned int flags; 366 long priority; 367 long saved_priority; 368 kuid_t sender_euid; 369}; 370 371static void 372binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 373 374static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 375{ 376 struct files_struct *files = proc->files; 377 unsigned long rlim_cur; 378 unsigned long irqs; 379 380 if (files == NULL) 381 return -ESRCH; 382 383 if (!lock_task_sighand(proc->tsk, &irqs)) 384 return -EMFILE; 385 386 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 387 unlock_task_sighand(proc->tsk, &irqs); 388 389 return __alloc_fd(files, 0, rlim_cur, flags); 390} 391 392/* 393 * copied from fd_install 394 */ 395static void task_fd_install( 396 struct binder_proc *proc, unsigned int fd, struct file *file) 397{ 398 if (proc->files) 399 __fd_install(proc->files, fd, file); 400} 401 402/* 403 * copied from sys_close 404 */ 405static long task_close_fd(struct binder_proc *proc, unsigned int fd) 406{ 407 int retval; 408 409 if (proc->files == NULL) 410 return -ESRCH; 411 412 retval = __close_fd(proc->files, fd); 413 /* can't restart close syscall because file table entry was cleared */ 414 if (unlikely(retval == -ERESTARTSYS || 415 retval == -ERESTARTNOINTR || 416 retval == -ERESTARTNOHAND || 417 retval == -ERESTART_RESTARTBLOCK)) 418 retval = -EINTR; 419 420 return retval; 421} 422 423static inline void binder_lock(const char *tag) 424{ 425 trace_binder_lock(tag); 426 rt_mutex_lock(&binder_main_lock); 427 trace_binder_locked(tag); 428} 429 430static inline void binder_unlock(const char *tag) 431{ 432 trace_binder_unlock(tag); 433 rt_mutex_unlock(&binder_main_lock); 434} 435 436static void binder_set_nice(long nice) 437{ 438 long min_nice; 439 440 if (can_nice(current, nice)) { 441 set_user_nice(current, nice); 442 return; 443 } 444 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 445 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 446 "%d: nice value %ld not allowed use %ld instead\n", 447 current->pid, nice, min_nice); 448 set_user_nice(current, min_nice); 449 if (min_nice <= MAX_NICE) 450 return; 451 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 452} 453 454static size_t binder_buffer_size(struct binder_proc *proc, 455 struct binder_buffer *buffer) 456{ 457 if (list_is_last(&buffer->entry, &proc->buffers)) 458 return proc->buffer + proc->buffer_size - (void *)buffer->data; 459 return (size_t)list_entry(buffer->entry.next, 460 struct binder_buffer, entry) - (size_t)buffer->data; 461} 462 463static void binder_insert_free_buffer(struct binder_proc *proc, 464 struct binder_buffer *new_buffer) 465{ 466 struct rb_node **p = &proc->free_buffers.rb_node; 467 struct rb_node *parent = NULL; 468 struct binder_buffer *buffer; 469 size_t buffer_size; 470 size_t new_buffer_size; 471 472 BUG_ON(!new_buffer->free); 473 474 new_buffer_size = binder_buffer_size(proc, new_buffer); 475 476 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 477 "%d: add free buffer, size %zd, at %p\n", 478 proc->pid, new_buffer_size, new_buffer); 479 480 while (*p) { 481 parent = *p; 482 buffer = rb_entry(parent, struct binder_buffer, rb_node); 483 BUG_ON(!buffer->free); 484 485 buffer_size = binder_buffer_size(proc, buffer); 486 487 if (new_buffer_size < buffer_size) 488 p = &parent->rb_left; 489 else 490 p = &parent->rb_right; 491 } 492 rb_link_node(&new_buffer->rb_node, parent, p); 493 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 494} 495 496static void binder_insert_allocated_buffer(struct binder_proc *proc, 497 struct binder_buffer *new_buffer) 498{ 499 struct rb_node **p = &proc->allocated_buffers.rb_node; 500 struct rb_node *parent = NULL; 501 struct binder_buffer *buffer; 502 503 BUG_ON(new_buffer->free); 504 505 while (*p) { 506 parent = *p; 507 buffer = rb_entry(parent, struct binder_buffer, rb_node); 508 BUG_ON(buffer->free); 509 510 if (new_buffer < buffer) 511 p = &parent->rb_left; 512 else if (new_buffer > buffer) 513 p = &parent->rb_right; 514 else 515 BUG(); 516 } 517 rb_link_node(&new_buffer->rb_node, parent, p); 518 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 519} 520 521static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 522 uintptr_t user_ptr) 523{ 524 struct rb_node *n = proc->allocated_buffers.rb_node; 525 struct binder_buffer *buffer; 526 struct binder_buffer *kern_ptr; 527 528 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 529 - offsetof(struct binder_buffer, data)); 530 531 while (n) { 532 buffer = rb_entry(n, struct binder_buffer, rb_node); 533 BUG_ON(buffer->free); 534 535 if (kern_ptr < buffer) 536 n = n->rb_left; 537 else if (kern_ptr > buffer) 538 n = n->rb_right; 539 else 540 return buffer; 541 } 542 return NULL; 543} 544 545static int binder_update_page_range(struct binder_proc *proc, int allocate, 546 void *start, void *end, 547 struct vm_area_struct *vma) 548{ 549 void *page_addr; 550 unsigned long user_page_addr; 551 struct vm_struct tmp_area; 552 struct page **page; 553 struct mm_struct *mm; 554 555 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 556 "%d: %s pages %p-%p\n", proc->pid, 557 allocate ? "allocate" : "free", start, end); 558 559 if (end <= start) 560 return 0; 561 562 trace_binder_update_page_range(proc, allocate, start, end); 563 564 if (vma) 565 mm = NULL; 566 else 567 mm = get_task_mm(proc->tsk); 568 569 if (mm) { 570 down_write(&mm->mmap_sem); 571 vma = proc->vma; 572 if (vma && mm != proc->vma_vm_mm) { 573 pr_err("%d: vma mm and task mm mismatch\n", 574 proc->pid); 575 vma = NULL; 576 } 577 } 578 579 if (allocate == 0) 580 goto free_range; 581 582 if (vma == NULL) { 583 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 584 proc->pid); 585 goto err_no_vma; 586 } 587 588 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 589 int ret; 590 591 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 592 593 BUG_ON(*page); 594 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 595 if (*page == NULL) { 596 pr_err("%d: binder_alloc_buf failed for page at %p\n", 597 proc->pid, page_addr); 598 goto err_alloc_page_failed; 599 } 600 tmp_area.addr = page_addr; 601 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 602 ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); 603 if (ret) { 604 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 605 proc->pid, page_addr); 606 goto err_map_kernel_failed; 607 } 608 user_page_addr = 609 (uintptr_t)page_addr + proc->user_buffer_offset; 610 ret = vm_insert_page(vma, user_page_addr, page[0]); 611 if (ret) { 612 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 613 proc->pid, user_page_addr); 614 goto err_vm_insert_page_failed; 615 } 616 /* vm_insert_page does not seem to increment the refcount */ 617 } 618 if (mm) { 619 up_write(&mm->mmap_sem); 620 mmput(mm); 621 } 622 return 0; 623 624free_range: 625 for (page_addr = end - PAGE_SIZE; page_addr >= start; 626 page_addr -= PAGE_SIZE) { 627 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 628 if (vma) 629 zap_page_range(vma, (uintptr_t)page_addr + 630 proc->user_buffer_offset, PAGE_SIZE, NULL); 631err_vm_insert_page_failed: 632 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 633err_map_kernel_failed: 634 __free_page(*page); 635 *page = NULL; 636err_alloc_page_failed: 637 ; 638 } 639err_no_vma: 640 if (mm) { 641 up_write(&mm->mmap_sem); 642 mmput(mm); 643 } 644 return -ENOMEM; 645} 646 647static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 648 size_t data_size, 649 size_t offsets_size, int is_async) 650{ 651 struct rb_node *n = proc->free_buffers.rb_node; 652 struct binder_buffer *buffer; 653 size_t buffer_size; 654 struct rb_node *best_fit = NULL; 655 void *has_page_addr; 656 void *end_page_addr; 657 size_t size; 658 659 if (proc->vma == NULL) { 660 pr_err("%d: binder_alloc_buf, no vma\n", 661 proc->pid); 662 return NULL; 663 } 664 665 size = ALIGN(data_size, sizeof(void *)) + 666 ALIGN(offsets_size, sizeof(void *)); 667 668 if (size < data_size || size < offsets_size) { 669 binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 670 proc->pid, data_size, offsets_size); 671 return NULL; 672 } 673 674 if (is_async && 675 proc->free_async_space < size + sizeof(struct binder_buffer)) { 676 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 677 "%d: binder_alloc_buf size %zd failed, no async space left\n", 678 proc->pid, size); 679 return NULL; 680 } 681 682 while (n) { 683 buffer = rb_entry(n, struct binder_buffer, rb_node); 684 BUG_ON(!buffer->free); 685 buffer_size = binder_buffer_size(proc, buffer); 686 687 if (size < buffer_size) { 688 best_fit = n; 689 n = n->rb_left; 690 } else if (size > buffer_size) 691 n = n->rb_right; 692 else { 693 best_fit = n; 694 break; 695 } 696 } 697 if (best_fit == NULL) { 698 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", 699 proc->pid, size); 700 return NULL; 701 } 702 if (n == NULL) { 703 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 704 buffer_size = binder_buffer_size(proc, buffer); 705 } 706 707 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 708 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", 709 proc->pid, size, buffer, buffer_size); 710 711 has_page_addr = 712 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 713 if (n == NULL) { 714 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 715 buffer_size = size; /* no room for other buffers */ 716 else 717 buffer_size = size + sizeof(struct binder_buffer); 718 } 719 end_page_addr = 720 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 721 if (end_page_addr > has_page_addr) 722 end_page_addr = has_page_addr; 723 if (binder_update_page_range(proc, 1, 724 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 725 return NULL; 726 727 rb_erase(best_fit, &proc->free_buffers); 728 buffer->free = 0; 729 binder_insert_allocated_buffer(proc, buffer); 730 if (buffer_size != size) { 731 struct binder_buffer *new_buffer = (void *)buffer->data + size; 732 733 list_add(&new_buffer->entry, &buffer->entry); 734 new_buffer->free = 1; 735 binder_insert_free_buffer(proc, new_buffer); 736 } 737 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 738 "%d: binder_alloc_buf size %zd got %p\n", 739 proc->pid, size, buffer); 740 buffer->data_size = data_size; 741 buffer->offsets_size = offsets_size; 742 buffer->async_transaction = is_async; 743 if (is_async) { 744 proc->free_async_space -= size + sizeof(struct binder_buffer); 745 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 746 "%d: binder_alloc_buf size %zd async free %zd\n", 747 proc->pid, size, proc->free_async_space); 748 } 749 750 return buffer; 751} 752 753static void *buffer_start_page(struct binder_buffer *buffer) 754{ 755 return (void *)((uintptr_t)buffer & PAGE_MASK); 756} 757 758static void *buffer_end_page(struct binder_buffer *buffer) 759{ 760 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 761} 762 763static void binder_delete_free_buffer(struct binder_proc *proc, 764 struct binder_buffer *buffer) 765{ 766 struct binder_buffer *prev, *next = NULL; 767 int free_page_end = 1; 768 int free_page_start = 1; 769 770 BUG_ON(proc->buffers.next == &buffer->entry); 771 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 772 BUG_ON(!prev->free); 773 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 774 free_page_start = 0; 775 if (buffer_end_page(prev) == buffer_end_page(buffer)) 776 free_page_end = 0; 777 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 778 "%d: merge free, buffer %p share page with %p\n", 779 proc->pid, buffer, prev); 780 } 781 782 if (!list_is_last(&buffer->entry, &proc->buffers)) { 783 next = list_entry(buffer->entry.next, 784 struct binder_buffer, entry); 785 if (buffer_start_page(next) == buffer_end_page(buffer)) { 786 free_page_end = 0; 787 if (buffer_start_page(next) == 788 buffer_start_page(buffer)) 789 free_page_start = 0; 790 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 791 "%d: merge free, buffer %p share page with %p\n", 792 proc->pid, buffer, prev); 793 } 794 } 795 list_del(&buffer->entry); 796 if (free_page_start || free_page_end) { 797 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 798 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", 799 proc->pid, buffer, free_page_start ? "" : " end", 800 free_page_end ? "" : " start", prev, next); 801 binder_update_page_range(proc, 0, free_page_start ? 802 buffer_start_page(buffer) : buffer_end_page(buffer), 803 (free_page_end ? buffer_end_page(buffer) : 804 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 805 } 806} 807 808static void binder_free_buf(struct binder_proc *proc, 809 struct binder_buffer *buffer) 810{ 811 size_t size, buffer_size; 812 813 buffer_size = binder_buffer_size(proc, buffer); 814 815 size = ALIGN(buffer->data_size, sizeof(void *)) + 816 ALIGN(buffer->offsets_size, sizeof(void *)); 817 818 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 819 "%d: binder_free_buf %p size %zd buffer_size %zd\n", 820 proc->pid, buffer, size, buffer_size); 821 822 BUG_ON(buffer->free); 823 BUG_ON(size > buffer_size); 824 BUG_ON(buffer->transaction != NULL); 825 BUG_ON((void *)buffer < proc->buffer); 826 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 827 828 if (buffer->async_transaction) { 829 proc->free_async_space += size + sizeof(struct binder_buffer); 830 831 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 832 "%d: binder_free_buf size %zd async free %zd\n", 833 proc->pid, size, proc->free_async_space); 834 } 835 836 binder_update_page_range(proc, 0, 837 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 838 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 839 NULL); 840 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 841 buffer->free = 1; 842 if (!list_is_last(&buffer->entry, &proc->buffers)) { 843 struct binder_buffer *next = list_entry(buffer->entry.next, 844 struct binder_buffer, entry); 845 846 if (next->free) { 847 rb_erase(&next->rb_node, &proc->free_buffers); 848 binder_delete_free_buffer(proc, next); 849 } 850 } 851 if (proc->buffers.next != &buffer->entry) { 852 struct binder_buffer *prev = list_entry(buffer->entry.prev, 853 struct binder_buffer, entry); 854 855 if (prev->free) { 856 binder_delete_free_buffer(proc, buffer); 857 rb_erase(&prev->rb_node, &proc->free_buffers); 858 buffer = prev; 859 } 860 } 861 binder_insert_free_buffer(proc, buffer); 862} 863 864static struct binder_node *binder_get_node(struct binder_proc *proc, 865 binder_uintptr_t ptr) 866{ 867 struct rb_node *n = proc->nodes.rb_node; 868 struct binder_node *node; 869 870 while (n) { 871 node = rb_entry(n, struct binder_node, rb_node); 872 873 if (ptr < node->ptr) 874 n = n->rb_left; 875 else if (ptr > node->ptr) 876 n = n->rb_right; 877 else 878 return node; 879 } 880 return NULL; 881} 882 883static struct binder_node *binder_new_node(struct binder_proc *proc, 884 binder_uintptr_t ptr, 885 binder_uintptr_t cookie) 886{ 887 struct rb_node **p = &proc->nodes.rb_node; 888 struct rb_node *parent = NULL; 889 struct binder_node *node; 890 891 while (*p) { 892 parent = *p; 893 node = rb_entry(parent, struct binder_node, rb_node); 894 895 if (ptr < node->ptr) 896 p = &(*p)->rb_left; 897 else if (ptr > node->ptr) 898 p = &(*p)->rb_right; 899 else 900 return NULL; 901 } 902 903 node = kzalloc(sizeof(*node), GFP_KERNEL); 904 if (node == NULL) 905 return NULL; 906 binder_stats_created(BINDER_STAT_NODE); 907 rb_link_node(&node->rb_node, parent, p); 908 rb_insert_color(&node->rb_node, &proc->nodes); 909 node->debug_id = ++binder_last_id; 910 node->proc = proc; 911 node->ptr = ptr; 912 node->cookie = cookie; 913 node->work.type = BINDER_WORK_NODE; 914 INIT_LIST_HEAD(&node->work.entry); 915 INIT_LIST_HEAD(&node->async_todo); 916 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 917 "%d:%d node %d u%016llx c%016llx created\n", 918 proc->pid, current->pid, node->debug_id, 919 (u64)node->ptr, (u64)node->cookie); 920 return node; 921} 922 923static int binder_inc_node(struct binder_node *node, int strong, int internal, 924 struct list_head *target_list) 925{ 926 if (strong) { 927 if (internal) { 928 if (target_list == NULL && 929 node->internal_strong_refs == 0 && 930 !(node == binder_context_mgr_node && 931 node->has_strong_ref)) { 932 pr_err("invalid inc strong node for %d\n", 933 node->debug_id); 934 return -EINVAL; 935 } 936 node->internal_strong_refs++; 937 } else 938 node->local_strong_refs++; 939 if (!node->has_strong_ref && target_list) { 940 list_del_init(&node->work.entry); 941 list_add_tail(&node->work.entry, target_list); 942 } 943 } else { 944 if (!internal) 945 node->local_weak_refs++; 946 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 947 if (target_list == NULL) { 948 pr_err("invalid inc weak node for %d\n", 949 node->debug_id); 950 return -EINVAL; 951 } 952 list_add_tail(&node->work.entry, target_list); 953 } 954 } 955 return 0; 956} 957 958static int binder_dec_node(struct binder_node *node, int strong, int internal) 959{ 960 if (strong) { 961 if (internal) 962 node->internal_strong_refs--; 963 else 964 node->local_strong_refs--; 965 if (node->local_strong_refs || node->internal_strong_refs) 966 return 0; 967 } else { 968 if (!internal) 969 node->local_weak_refs--; 970 if (node->local_weak_refs || !hlist_empty(&node->refs)) 971 return 0; 972 } 973 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 974 if (list_empty(&node->work.entry)) { 975 list_add_tail(&node->work.entry, &node->proc->todo); 976 wake_up_interruptible(&node->proc->wait); 977 } 978 } else { 979 if (hlist_empty(&node->refs) && !node->local_strong_refs && 980 !node->local_weak_refs) { 981 list_del_init(&node->work.entry); 982 if (node->proc) { 983 rb_erase(&node->rb_node, &node->proc->nodes); 984 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 985 "refless node %d deleted\n", 986 node->debug_id); 987 } else { 988 hlist_del(&node->dead_node); 989 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 990 "dead node %d deleted\n", 991 node->debug_id); 992 } 993 kfree(node); 994 binder_stats_deleted(BINDER_STAT_NODE); 995 } 996 } 997 998 return 0; 999} 1000 1001 1002static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1003 uint32_t desc) 1004{ 1005 struct rb_node *n = proc->refs_by_desc.rb_node; 1006 struct binder_ref *ref; 1007 1008 while (n) { 1009 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1010 1011 if (desc < ref->desc) 1012 n = n->rb_left; 1013 else if (desc > ref->desc) 1014 n = n->rb_right; 1015 else 1016 return ref; 1017 } 1018 return NULL; 1019} 1020 1021static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1022 struct binder_node *node) 1023{ 1024 struct rb_node *n; 1025 struct rb_node **p = &proc->refs_by_node.rb_node; 1026 struct rb_node *parent = NULL; 1027 struct binder_ref *ref, *new_ref; 1028 1029 while (*p) { 1030 parent = *p; 1031 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1032 1033 if (node < ref->node) 1034 p = &(*p)->rb_left; 1035 else if (node > ref->node) 1036 p = &(*p)->rb_right; 1037 else 1038 return ref; 1039 } 1040 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1041 if (new_ref == NULL) 1042 return NULL; 1043 binder_stats_created(BINDER_STAT_REF); 1044 new_ref->debug_id = ++binder_last_id; 1045 new_ref->proc = proc; 1046 new_ref->node = node; 1047 rb_link_node(&new_ref->rb_node_node, parent, p); 1048 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1049 1050 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1051 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1052 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1053 if (ref->desc > new_ref->desc) 1054 break; 1055 new_ref->desc = ref->desc + 1; 1056 } 1057 1058 p = &proc->refs_by_desc.rb_node; 1059 while (*p) { 1060 parent = *p; 1061 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1062 1063 if (new_ref->desc < ref->desc) 1064 p = &(*p)->rb_left; 1065 else if (new_ref->desc > ref->desc) 1066 p = &(*p)->rb_right; 1067 else 1068 BUG(); 1069 } 1070 rb_link_node(&new_ref->rb_node_desc, parent, p); 1071 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1072 if (node) { 1073 hlist_add_head(&new_ref->node_entry, &node->refs); 1074 1075 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1076 "%d new ref %d desc %d for node %d\n", 1077 proc->pid, new_ref->debug_id, new_ref->desc, 1078 node->debug_id); 1079 } else { 1080 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1081 "%d new ref %d desc %d for dead node\n", 1082 proc->pid, new_ref->debug_id, new_ref->desc); 1083 } 1084 return new_ref; 1085} 1086 1087static void binder_delete_ref(struct binder_ref *ref) 1088{ 1089 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1090 "%d delete ref %d desc %d for node %d\n", 1091 ref->proc->pid, ref->debug_id, ref->desc, 1092 ref->node->debug_id); 1093 1094 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1095 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1096 if (ref->strong) 1097 binder_dec_node(ref->node, 1, 1); 1098 hlist_del(&ref->node_entry); 1099 binder_dec_node(ref->node, 0, 1); 1100 if (ref->death) { 1101 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1102 "%d delete ref %d desc %d has death notification\n", 1103 ref->proc->pid, ref->debug_id, ref->desc); 1104 list_del(&ref->death->work.entry); 1105 kfree(ref->death); 1106 binder_stats_deleted(BINDER_STAT_DEATH); 1107 } 1108 kfree(ref); 1109 binder_stats_deleted(BINDER_STAT_REF); 1110} 1111 1112static int binder_inc_ref(struct binder_ref *ref, int strong, 1113 struct list_head *target_list) 1114{ 1115 int ret; 1116 1117 if (strong) { 1118 if (ref->strong == 0) { 1119 ret = binder_inc_node(ref->node, 1, 1, target_list); 1120 if (ret) 1121 return ret; 1122 } 1123 ref->strong++; 1124 } else { 1125 if (ref->weak == 0) { 1126 ret = binder_inc_node(ref->node, 0, 1, target_list); 1127 if (ret) 1128 return ret; 1129 } 1130 ref->weak++; 1131 } 1132 return 0; 1133} 1134 1135 1136static int binder_dec_ref(struct binder_ref *ref, int strong) 1137{ 1138 if (strong) { 1139 if (ref->strong == 0) { 1140 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1141 ref->proc->pid, ref->debug_id, 1142 ref->desc, ref->strong, ref->weak); 1143 return -EINVAL; 1144 } 1145 ref->strong--; 1146 if (ref->strong == 0) { 1147 int ret; 1148 1149 ret = binder_dec_node(ref->node, strong, 1); 1150 if (ret) 1151 return ret; 1152 } 1153 } else { 1154 if (ref->weak == 0) { 1155 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1156 ref->proc->pid, ref->debug_id, 1157 ref->desc, ref->strong, ref->weak); 1158 return -EINVAL; 1159 } 1160 ref->weak--; 1161 } 1162 if (ref->strong == 0 && ref->weak == 0) 1163 binder_delete_ref(ref); 1164 return 0; 1165} 1166 1167static void binder_pop_transaction(struct binder_thread *target_thread, 1168 struct binder_transaction *t) 1169{ 1170 if (target_thread) { 1171 BUG_ON(target_thread->transaction_stack != t); 1172 BUG_ON(target_thread->transaction_stack->from != target_thread); 1173 target_thread->transaction_stack = 1174 target_thread->transaction_stack->from_parent; 1175 t->from = NULL; 1176 } 1177 t->need_reply = 0; 1178 if (t->buffer) 1179 t->buffer->transaction = NULL; 1180 kfree(t); 1181 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1182} 1183 1184static void binder_send_failed_reply(struct binder_transaction *t, 1185 uint32_t error_code) 1186{ 1187 struct binder_thread *target_thread; 1188 struct binder_transaction *next; 1189 1190 BUG_ON(t->flags & TF_ONE_WAY); 1191 while (1) { 1192 target_thread = t->from; 1193 if (target_thread) { 1194 if (target_thread->return_error != BR_OK && 1195 target_thread->return_error2 == BR_OK) { 1196 target_thread->return_error2 = 1197 target_thread->return_error; 1198 target_thread->return_error = BR_OK; 1199 } 1200 if (target_thread->return_error == BR_OK) { 1201 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1202 "send failed reply for transaction %d to %d:%d\n", 1203 t->debug_id, 1204 target_thread->proc->pid, 1205 target_thread->pid); 1206 1207 binder_pop_transaction(target_thread, t); 1208 target_thread->return_error = error_code; 1209 wake_up_interruptible(&target_thread->wait); 1210 } else { 1211 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 1212 target_thread->proc->pid, 1213 target_thread->pid, 1214 target_thread->return_error); 1215 } 1216 return; 1217 } 1218 next = t->from_parent; 1219 1220 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1221 "send failed reply for transaction %d, target dead\n", 1222 t->debug_id); 1223 1224 binder_pop_transaction(target_thread, t); 1225 if (next == NULL) { 1226 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1227 "reply failed, no target thread at root\n"); 1228 return; 1229 } 1230 t = next; 1231 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1232 "reply failed, no target thread -- retry %d\n", 1233 t->debug_id); 1234 } 1235} 1236 1237static void binder_transaction_buffer_release(struct binder_proc *proc, 1238 struct binder_buffer *buffer, 1239 binder_size_t *failed_at) 1240{ 1241 binder_size_t *offp, *off_end; 1242 int debug_id = buffer->debug_id; 1243 1244 binder_debug(BINDER_DEBUG_TRANSACTION, 1245 "%d buffer release %d, size %zd-%zd, failed at %p\n", 1246 proc->pid, buffer->debug_id, 1247 buffer->data_size, buffer->offsets_size, failed_at); 1248 1249 if (buffer->target_node) 1250 binder_dec_node(buffer->target_node, 1, 0); 1251 1252 offp = (binder_size_t *)(buffer->data + 1253 ALIGN(buffer->data_size, sizeof(void *))); 1254 if (failed_at) 1255 off_end = failed_at; 1256 else 1257 off_end = (void *)offp + buffer->offsets_size; 1258 for (; offp < off_end; offp++) { 1259 struct flat_binder_object *fp; 1260 1261 if (*offp > buffer->data_size - sizeof(*fp) || 1262 buffer->data_size < sizeof(*fp) || 1263 !IS_ALIGNED(*offp, sizeof(u32))) { 1264 pr_err("transaction release %d bad offset %lld, size %zd\n", 1265 debug_id, (u64)*offp, buffer->data_size); 1266 continue; 1267 } 1268 fp = (struct flat_binder_object *)(buffer->data + *offp); 1269 switch (fp->type) { 1270 case BINDER_TYPE_BINDER: 1271 case BINDER_TYPE_WEAK_BINDER: { 1272 struct binder_node *node = binder_get_node(proc, fp->binder); 1273 1274 if (node == NULL) { 1275 pr_err("transaction release %d bad node %016llx\n", 1276 debug_id, (u64)fp->binder); 1277 break; 1278 } 1279 binder_debug(BINDER_DEBUG_TRANSACTION, 1280 " node %d u%016llx\n", 1281 node->debug_id, (u64)node->ptr); 1282 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1283 } break; 1284 case BINDER_TYPE_HANDLE: 1285 case BINDER_TYPE_WEAK_HANDLE: { 1286 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1287 1288 if (ref == NULL) { 1289 pr_err("transaction release %d bad handle %d\n", 1290 debug_id, fp->handle); 1291 break; 1292 } 1293 binder_debug(BINDER_DEBUG_TRANSACTION, 1294 " ref %d desc %d (node %d)\n", 1295 ref->debug_id, ref->desc, ref->node->debug_id); 1296 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1297 } break; 1298 1299 case BINDER_TYPE_FD: 1300 binder_debug(BINDER_DEBUG_TRANSACTION, 1301 " fd %d\n", fp->handle); 1302 if (failed_at) 1303 task_close_fd(proc, fp->handle); 1304 break; 1305 1306 default: 1307 pr_err("transaction release %d bad object type %x\n", 1308 debug_id, fp->type); 1309 break; 1310 } 1311 } 1312} 1313 1314static void binder_transaction(struct binder_proc *proc, 1315 struct binder_thread *thread, 1316 struct binder_transaction_data *tr, int reply) 1317{ 1318 struct binder_transaction *t; 1319 struct binder_work *tcomplete; 1320 binder_size_t *offp, *off_end; 1321 binder_size_t off_min; 1322 struct binder_proc *target_proc; 1323 struct binder_thread *target_thread = NULL; 1324 struct binder_node *target_node = NULL; 1325 struct list_head *target_list; 1326 wait_queue_head_t *target_wait; 1327 struct binder_transaction *in_reply_to = NULL; 1328 struct binder_transaction_log_entry *e; 1329 uint32_t return_error; 1330 1331 e = binder_transaction_log_add(&binder_transaction_log); 1332 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1333 e->from_proc = proc->pid; 1334 e->from_thread = thread->pid; 1335 e->target_handle = tr->target.handle; 1336 e->data_size = tr->data_size; 1337 e->offsets_size = tr->offsets_size; 1338 1339 if (reply) { 1340 in_reply_to = thread->transaction_stack; 1341 if (in_reply_to == NULL) { 1342 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1343 proc->pid, thread->pid); 1344 return_error = BR_FAILED_REPLY; 1345 goto err_empty_call_stack; 1346 } 1347 binder_set_nice(in_reply_to->saved_priority); 1348 if (in_reply_to->to_thread != thread) { 1349 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1350 proc->pid, thread->pid, in_reply_to->debug_id, 1351 in_reply_to->to_proc ? 1352 in_reply_to->to_proc->pid : 0, 1353 in_reply_to->to_thread ? 1354 in_reply_to->to_thread->pid : 0); 1355 return_error = BR_FAILED_REPLY; 1356 in_reply_to = NULL; 1357 goto err_bad_call_stack; 1358 } 1359 thread->transaction_stack = in_reply_to->to_parent; 1360 target_thread = in_reply_to->from; 1361 if (target_thread == NULL) { 1362 return_error = BR_DEAD_REPLY; 1363 goto err_dead_binder; 1364 } 1365 if (target_thread->transaction_stack != in_reply_to) { 1366 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1367 proc->pid, thread->pid, 1368 target_thread->transaction_stack ? 1369 target_thread->transaction_stack->debug_id : 0, 1370 in_reply_to->debug_id); 1371 return_error = BR_FAILED_REPLY; 1372 in_reply_to = NULL; 1373 target_thread = NULL; 1374 goto err_dead_binder; 1375 } 1376 target_proc = target_thread->proc; 1377 } else { 1378 if (tr->target.handle) { 1379 struct binder_ref *ref; 1380 1381 ref = binder_get_ref(proc, tr->target.handle); 1382 if (ref == NULL) { 1383 binder_user_error("%d:%d got transaction to invalid handle\n", 1384 proc->pid, thread->pid); 1385 return_error = BR_FAILED_REPLY; 1386 goto err_invalid_target_handle; 1387 } 1388 target_node = ref->node; 1389 } else { 1390 target_node = binder_context_mgr_node; 1391 if (target_node == NULL) { 1392 return_error = BR_DEAD_REPLY; 1393 goto err_no_context_mgr_node; 1394 } 1395 } 1396 e->to_node = target_node->debug_id; 1397 target_proc = target_node->proc; 1398 if (target_proc == NULL) { 1399 return_error = BR_DEAD_REPLY; 1400 goto err_dead_binder; 1401 } 1402 if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { 1403 return_error = BR_FAILED_REPLY; 1404 goto err_invalid_target_handle; 1405 } 1406 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1407 struct binder_transaction *tmp; 1408 1409 tmp = thread->transaction_stack; 1410 if (tmp->to_thread != thread) { 1411 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1412 proc->pid, thread->pid, tmp->debug_id, 1413 tmp->to_proc ? tmp->to_proc->pid : 0, 1414 tmp->to_thread ? 1415 tmp->to_thread->pid : 0); 1416 return_error = BR_FAILED_REPLY; 1417 goto err_bad_call_stack; 1418 } 1419 while (tmp) { 1420 if (tmp->from && tmp->from->proc == target_proc) 1421 target_thread = tmp->from; 1422 tmp = tmp->from_parent; 1423 } 1424 } 1425 } 1426 if (target_thread) { 1427 e->to_thread = target_thread->pid; 1428 target_list = &target_thread->todo; 1429 target_wait = &target_thread->wait; 1430 } else { 1431 target_list = &target_proc->todo; 1432 target_wait = &target_proc->wait; 1433 } 1434 e->to_proc = target_proc->pid; 1435 1436 /* TODO: reuse incoming transaction for reply */ 1437 t = kzalloc(sizeof(*t), GFP_KERNEL); 1438 if (t == NULL) { 1439 return_error = BR_FAILED_REPLY; 1440 goto err_alloc_t_failed; 1441 } 1442 binder_stats_created(BINDER_STAT_TRANSACTION); 1443 1444 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1445 if (tcomplete == NULL) { 1446 return_error = BR_FAILED_REPLY; 1447 goto err_alloc_tcomplete_failed; 1448 } 1449 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1450 1451 t->debug_id = ++binder_last_id; 1452 e->debug_id = t->debug_id; 1453 1454 if (reply) 1455 binder_debug(BINDER_DEBUG_TRANSACTION, 1456 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", 1457 proc->pid, thread->pid, t->debug_id, 1458 target_proc->pid, target_thread->pid, 1459 (u64)tr->data.ptr.buffer, 1460 (u64)tr->data.ptr.offsets, 1461 (u64)tr->data_size, (u64)tr->offsets_size); 1462 else 1463 binder_debug(BINDER_DEBUG_TRANSACTION, 1464 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", 1465 proc->pid, thread->pid, t->debug_id, 1466 target_proc->pid, target_node->debug_id, 1467 (u64)tr->data.ptr.buffer, 1468 (u64)tr->data.ptr.offsets, 1469 (u64)tr->data_size, (u64)tr->offsets_size); 1470 1471 if (!reply && !(tr->flags & TF_ONE_WAY)) 1472 t->from = thread; 1473 else 1474 t->from = NULL; 1475 t->sender_euid = task_euid(proc->tsk); 1476 t->to_proc = target_proc; 1477 t->to_thread = target_thread; 1478 t->code = tr->code; 1479 t->flags = tr->flags; 1480 t->priority = task_nice(current); 1481 1482 trace_binder_transaction(reply, t, target_node); 1483 1484 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1485 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1486 if (t->buffer == NULL) { 1487 return_error = BR_FAILED_REPLY; 1488 goto err_binder_alloc_buf_failed; 1489 } 1490 t->buffer->allow_user_free = 0; 1491 t->buffer->debug_id = t->debug_id; 1492 t->buffer->transaction = t; 1493 t->buffer->target_node = target_node; 1494 trace_binder_transaction_alloc_buf(t->buffer); 1495 if (target_node) 1496 binder_inc_node(target_node, 1, 0, NULL); 1497 1498 offp = (binder_size_t *)(t->buffer->data + 1499 ALIGN(tr->data_size, sizeof(void *))); 1500 1501 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 1502 tr->data.ptr.buffer, tr->data_size)) { 1503 binder_user_error("%d:%d got transaction with invalid data ptr\n", 1504 proc->pid, thread->pid); 1505 return_error = BR_FAILED_REPLY; 1506 goto err_copy_data_failed; 1507 } 1508 if (copy_from_user(offp, (const void __user *)(uintptr_t) 1509 tr->data.ptr.offsets, tr->offsets_size)) { 1510 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 1511 proc->pid, thread->pid); 1512 return_error = BR_FAILED_REPLY; 1513 goto err_copy_data_failed; 1514 } 1515 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 1516 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 1517 proc->pid, thread->pid, (u64)tr->offsets_size); 1518 return_error = BR_FAILED_REPLY; 1519 goto err_bad_offset; 1520 } 1521 off_end = (void *)offp + tr->offsets_size; 1522 off_min = 0; 1523 for (; offp < off_end; offp++) { 1524 struct flat_binder_object *fp; 1525 1526 if (*offp > t->buffer->data_size - sizeof(*fp) || 1527 *offp < off_min || 1528 t->buffer->data_size < sizeof(*fp) || 1529 !IS_ALIGNED(*offp, sizeof(u32))) { 1530 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n", 1531 proc->pid, thread->pid, (u64)*offp, 1532 (u64)off_min, 1533 (u64)(t->buffer->data_size - 1534 sizeof(*fp))); 1535 return_error = BR_FAILED_REPLY; 1536 goto err_bad_offset; 1537 } 1538 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1539 off_min = *offp + sizeof(struct flat_binder_object); 1540 switch (fp->type) { 1541 case BINDER_TYPE_BINDER: 1542 case BINDER_TYPE_WEAK_BINDER: { 1543 struct binder_ref *ref; 1544 struct binder_node *node = binder_get_node(proc, fp->binder); 1545 1546 if (node == NULL) { 1547 node = binder_new_node(proc, fp->binder, fp->cookie); 1548 if (node == NULL) { 1549 return_error = BR_FAILED_REPLY; 1550 goto err_binder_new_node_failed; 1551 } 1552 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1553 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1554 } 1555 if (fp->cookie != node->cookie) { 1556 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1557 proc->pid, thread->pid, 1558 (u64)fp->binder, node->debug_id, 1559 (u64)fp->cookie, (u64)node->cookie); 1560 return_error = BR_FAILED_REPLY; 1561 goto err_binder_get_ref_for_node_failed; 1562 } 1563 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 1564 return_error = BR_FAILED_REPLY; 1565 goto err_binder_get_ref_for_node_failed; 1566 } 1567 ref = binder_get_ref_for_node(target_proc, node); 1568 if (ref == NULL) { 1569 return_error = BR_FAILED_REPLY; 1570 goto err_binder_get_ref_for_node_failed; 1571 } 1572 if (fp->type == BINDER_TYPE_BINDER) 1573 fp->type = BINDER_TYPE_HANDLE; 1574 else 1575 fp->type = BINDER_TYPE_WEAK_HANDLE; 1576 fp->handle = ref->desc; 1577 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1578 &thread->todo); 1579 1580 trace_binder_transaction_node_to_ref(t, node, ref); 1581 binder_debug(BINDER_DEBUG_TRANSACTION, 1582 " node %d u%016llx -> ref %d desc %d\n", 1583 node->debug_id, (u64)node->ptr, 1584 ref->debug_id, ref->desc); 1585 } break; 1586 case BINDER_TYPE_HANDLE: 1587 case BINDER_TYPE_WEAK_HANDLE: { 1588 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1589 1590 if (ref == NULL) { 1591 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1592 proc->pid, 1593 thread->pid, fp->handle); 1594 return_error = BR_FAILED_REPLY; 1595 goto err_binder_get_ref_failed; 1596 } 1597 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 1598 return_error = BR_FAILED_REPLY; 1599 goto err_binder_get_ref_failed; 1600 } 1601 if (ref->node->proc == target_proc) { 1602 if (fp->type == BINDER_TYPE_HANDLE) 1603 fp->type = BINDER_TYPE_BINDER; 1604 else 1605 fp->type = BINDER_TYPE_WEAK_BINDER; 1606 fp->binder = ref->node->ptr; 1607 fp->cookie = ref->node->cookie; 1608 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1609 trace_binder_transaction_ref_to_node(t, ref); 1610 binder_debug(BINDER_DEBUG_TRANSACTION, 1611 " ref %d desc %d -> node %d u%016llx\n", 1612 ref->debug_id, ref->desc, ref->node->debug_id, 1613 (u64)ref->node->ptr); 1614 } else { 1615 struct binder_ref *new_ref; 1616 1617 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1618 if (new_ref == NULL) { 1619 return_error = BR_FAILED_REPLY; 1620 goto err_binder_get_ref_for_node_failed; 1621 } 1622 fp->handle = new_ref->desc; 1623 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1624 trace_binder_transaction_ref_to_ref(t, ref, 1625 new_ref); 1626 binder_debug(BINDER_DEBUG_TRANSACTION, 1627 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1628 ref->debug_id, ref->desc, new_ref->debug_id, 1629 new_ref->desc, ref->node->debug_id); 1630 } 1631 } break; 1632 1633 case BINDER_TYPE_FD: { 1634 int target_fd; 1635 struct file *file; 1636 1637 if (reply) { 1638 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1639 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", 1640 proc->pid, thread->pid, fp->handle); 1641 return_error = BR_FAILED_REPLY; 1642 goto err_fd_not_allowed; 1643 } 1644 } else if (!target_node->accept_fds) { 1645 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", 1646 proc->pid, thread->pid, fp->handle); 1647 return_error = BR_FAILED_REPLY; 1648 goto err_fd_not_allowed; 1649 } 1650 1651 file = fget(fp->handle); 1652 if (file == NULL) { 1653 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1654 proc->pid, thread->pid, fp->handle); 1655 return_error = BR_FAILED_REPLY; 1656 goto err_fget_failed; 1657 } 1658 if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) { 1659 fput(file); 1660 return_error = BR_FAILED_REPLY; 1661 goto err_get_unused_fd_failed; 1662 } 1663 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1664 if (target_fd < 0) { 1665 fput(file); 1666 return_error = BR_FAILED_REPLY; 1667 goto err_get_unused_fd_failed; 1668 } 1669 task_fd_install(target_proc, target_fd, file); 1670 trace_binder_transaction_fd(t, fp->handle, target_fd); 1671 binder_debug(BINDER_DEBUG_TRANSACTION, 1672 " fd %d -> %d\n", fp->handle, target_fd); 1673 /* TODO: fput? */ 1674 fp->handle = target_fd; 1675 } break; 1676 1677 default: 1678 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 1679 proc->pid, thread->pid, fp->type); 1680 return_error = BR_FAILED_REPLY; 1681 goto err_bad_object_type; 1682 } 1683 } 1684 if (reply) { 1685 BUG_ON(t->buffer->async_transaction != 0); 1686 binder_pop_transaction(target_thread, in_reply_to); 1687 } else if (!(t->flags & TF_ONE_WAY)) { 1688 BUG_ON(t->buffer->async_transaction != 0); 1689 t->need_reply = 1; 1690 t->from_parent = thread->transaction_stack; 1691 thread->transaction_stack = t; 1692 } else { 1693 BUG_ON(target_node == NULL); 1694 BUG_ON(t->buffer->async_transaction != 1); 1695 if (target_node->has_async_transaction) { 1696 target_list = &target_node->async_todo; 1697 target_wait = NULL; 1698 } else 1699 target_node->has_async_transaction = 1; 1700 } 1701 t->work.type = BINDER_WORK_TRANSACTION; 1702 list_add_tail(&t->work.entry, target_list); 1703 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1704 list_add_tail(&tcomplete->entry, &thread->todo); 1705 if (target_wait) 1706 wake_up_interruptible(target_wait); 1707 return; 1708 1709err_get_unused_fd_failed: 1710err_fget_failed: 1711err_fd_not_allowed: 1712err_binder_get_ref_for_node_failed: 1713err_binder_get_ref_failed: 1714err_binder_new_node_failed: 1715err_bad_object_type: 1716err_bad_offset: 1717err_copy_data_failed: 1718 trace_binder_transaction_failed_buffer_release(t->buffer); 1719 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1720 t->buffer->transaction = NULL; 1721 binder_free_buf(target_proc, t->buffer); 1722err_binder_alloc_buf_failed: 1723 kfree(tcomplete); 1724 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1725err_alloc_tcomplete_failed: 1726 kfree(t); 1727 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1728err_alloc_t_failed: 1729err_bad_call_stack: 1730err_empty_call_stack: 1731err_dead_binder: 1732err_invalid_target_handle: 1733err_no_context_mgr_node: 1734 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1735 "%d:%d transaction failed %d, size %lld-%lld\n", 1736 proc->pid, thread->pid, return_error, 1737 (u64)tr->data_size, (u64)tr->offsets_size); 1738 1739 { 1740 struct binder_transaction_log_entry *fe; 1741 1742 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1743 *fe = *e; 1744 } 1745 1746 BUG_ON(thread->return_error != BR_OK); 1747 if (in_reply_to) { 1748 thread->return_error = BR_TRANSACTION_COMPLETE; 1749 binder_send_failed_reply(in_reply_to, return_error); 1750 } else 1751 thread->return_error = return_error; 1752} 1753 1754static int binder_thread_write(struct binder_proc *proc, 1755 struct binder_thread *thread, 1756 binder_uintptr_t binder_buffer, size_t size, 1757 binder_size_t *consumed) 1758{ 1759 uint32_t cmd; 1760 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 1761 void __user *ptr = buffer + *consumed; 1762 void __user *end = buffer + size; 1763 1764 while (ptr < end && thread->return_error == BR_OK) { 1765 if (get_user(cmd, (uint32_t __user *)ptr)) 1766 return -EFAULT; 1767 ptr += sizeof(uint32_t); 1768 trace_binder_command(cmd); 1769 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1770 binder_stats.bc[_IOC_NR(cmd)]++; 1771 proc->stats.bc[_IOC_NR(cmd)]++; 1772 thread->stats.bc[_IOC_NR(cmd)]++; 1773 } 1774 switch (cmd) { 1775 case BC_INCREFS: 1776 case BC_ACQUIRE: 1777 case BC_RELEASE: 1778 case BC_DECREFS: { 1779 uint32_t target; 1780 struct binder_ref *ref; 1781 const char *debug_string; 1782 1783 if (get_user(target, (uint32_t __user *)ptr)) 1784 return -EFAULT; 1785 ptr += sizeof(uint32_t); 1786 if (target == 0 && binder_context_mgr_node && 1787 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1788 ref = binder_get_ref_for_node(proc, 1789 binder_context_mgr_node); 1790 if (ref->desc != target) { 1791 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 1792 proc->pid, thread->pid, 1793 ref->desc); 1794 } 1795 } else 1796 ref = binder_get_ref(proc, target); 1797 if (ref == NULL) { 1798 binder_user_error("%d:%d refcount change on invalid ref %d\n", 1799 proc->pid, thread->pid, target); 1800 break; 1801 } 1802 switch (cmd) { 1803 case BC_INCREFS: 1804 debug_string = "IncRefs"; 1805 binder_inc_ref(ref, 0, NULL); 1806 break; 1807 case BC_ACQUIRE: 1808 debug_string = "Acquire"; 1809 binder_inc_ref(ref, 1, NULL); 1810 break; 1811 case BC_RELEASE: 1812 debug_string = "Release"; 1813 binder_dec_ref(ref, 1); 1814 break; 1815 case BC_DECREFS: 1816 default: 1817 debug_string = "DecRefs"; 1818 binder_dec_ref(ref, 0); 1819 break; 1820 } 1821 binder_debug(BINDER_DEBUG_USER_REFS, 1822 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 1823 proc->pid, thread->pid, debug_string, ref->debug_id, 1824 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1825 break; 1826 } 1827 case BC_INCREFS_DONE: 1828 case BC_ACQUIRE_DONE: { 1829 binder_uintptr_t node_ptr; 1830 binder_uintptr_t cookie; 1831 struct binder_node *node; 1832 1833 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 1834 return -EFAULT; 1835 ptr += sizeof(binder_uintptr_t); 1836 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1837 return -EFAULT; 1838 ptr += sizeof(binder_uintptr_t); 1839 node = binder_get_node(proc, node_ptr); 1840 if (node == NULL) { 1841 binder_user_error("%d:%d %s u%016llx no match\n", 1842 proc->pid, thread->pid, 1843 cmd == BC_INCREFS_DONE ? 1844 "BC_INCREFS_DONE" : 1845 "BC_ACQUIRE_DONE", 1846 (u64)node_ptr); 1847 break; 1848 } 1849 if (cookie != node->cookie) { 1850 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 1851 proc->pid, thread->pid, 1852 cmd == BC_INCREFS_DONE ? 1853 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1854 (u64)node_ptr, node->debug_id, 1855 (u64)cookie, (u64)node->cookie); 1856 break; 1857 } 1858 if (cmd == BC_ACQUIRE_DONE) { 1859 if (node->pending_strong_ref == 0) { 1860 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 1861 proc->pid, thread->pid, 1862 node->debug_id); 1863 break; 1864 } 1865 node->pending_strong_ref = 0; 1866 } else { 1867 if (node->pending_weak_ref == 0) { 1868 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 1869 proc->pid, thread->pid, 1870 node->debug_id); 1871 break; 1872 } 1873 node->pending_weak_ref = 0; 1874 } 1875 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1876 binder_debug(BINDER_DEBUG_USER_REFS, 1877 "%d:%d %s node %d ls %d lw %d\n", 1878 proc->pid, thread->pid, 1879 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1880 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1881 break; 1882 } 1883 case BC_ATTEMPT_ACQUIRE: 1884 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 1885 return -EINVAL; 1886 case BC_ACQUIRE_RESULT: 1887 pr_err("BC_ACQUIRE_RESULT not supported\n"); 1888 return -EINVAL; 1889 1890 case BC_FREE_BUFFER: { 1891 binder_uintptr_t data_ptr; 1892 struct binder_buffer *buffer; 1893 1894 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 1895 return -EFAULT; 1896 ptr += sizeof(binder_uintptr_t); 1897 1898 buffer = binder_buffer_lookup(proc, data_ptr); 1899 if (buffer == NULL) { 1900 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 1901 proc->pid, thread->pid, (u64)data_ptr); 1902 break; 1903 } 1904 if (!buffer->allow_user_free) { 1905 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 1906 proc->pid, thread->pid, (u64)data_ptr); 1907 break; 1908 } 1909 binder_debug(BINDER_DEBUG_FREE_BUFFER, 1910 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 1911 proc->pid, thread->pid, (u64)data_ptr, 1912 buffer->debug_id, 1913 buffer->transaction ? "active" : "finished"); 1914 1915 if (buffer->transaction) { 1916 buffer->transaction->buffer = NULL; 1917 buffer->transaction = NULL; 1918 } 1919 if (buffer->async_transaction && buffer->target_node) { 1920 BUG_ON(!buffer->target_node->has_async_transaction); 1921 if (list_empty(&buffer->target_node->async_todo)) 1922 buffer->target_node->has_async_transaction = 0; 1923 else 1924 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1925 } 1926 trace_binder_transaction_buffer_release(buffer); 1927 binder_transaction_buffer_release(proc, buffer, NULL); 1928 binder_free_buf(proc, buffer); 1929 break; 1930 } 1931 1932 case BC_TRANSACTION: 1933 case BC_REPLY: { 1934 struct binder_transaction_data tr; 1935 1936 if (copy_from_user(&tr, ptr, sizeof(tr))) 1937 return -EFAULT; 1938 ptr += sizeof(tr); 1939 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1940 break; 1941 } 1942 1943 case BC_REGISTER_LOOPER: 1944 binder_debug(BINDER_DEBUG_THREADS, 1945 "%d:%d BC_REGISTER_LOOPER\n", 1946 proc->pid, thread->pid); 1947 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1948 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1949 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 1950 proc->pid, thread->pid); 1951 } else if (proc->requested_threads == 0) { 1952 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1953 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 1954 proc->pid, thread->pid); 1955 } else { 1956 proc->requested_threads--; 1957 proc->requested_threads_started++; 1958 } 1959 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 1960 break; 1961 case BC_ENTER_LOOPER: 1962 binder_debug(BINDER_DEBUG_THREADS, 1963 "%d:%d BC_ENTER_LOOPER\n", 1964 proc->pid, thread->pid); 1965 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 1966 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1967 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 1968 proc->pid, thread->pid); 1969 } 1970 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 1971 break; 1972 case BC_EXIT_LOOPER: 1973 binder_debug(BINDER_DEBUG_THREADS, 1974 "%d:%d BC_EXIT_LOOPER\n", 1975 proc->pid, thread->pid); 1976 thread->looper |= BINDER_LOOPER_STATE_EXITED; 1977 break; 1978 1979 case BC_REQUEST_DEATH_NOTIFICATION: 1980 case BC_CLEAR_DEATH_NOTIFICATION: { 1981 uint32_t target; 1982 binder_uintptr_t cookie; 1983 struct binder_ref *ref; 1984 struct binder_ref_death *death; 1985 1986 if (get_user(target, (uint32_t __user *)ptr)) 1987 return -EFAULT; 1988 ptr += sizeof(uint32_t); 1989 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1990 return -EFAULT; 1991 ptr += sizeof(binder_uintptr_t); 1992 ref = binder_get_ref(proc, target); 1993 if (ref == NULL) { 1994 binder_user_error("%d:%d %s invalid ref %d\n", 1995 proc->pid, thread->pid, 1996 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1997 "BC_REQUEST_DEATH_NOTIFICATION" : 1998 "BC_CLEAR_DEATH_NOTIFICATION", 1999 target); 2000 break; 2001 } 2002 2003 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2004 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 2005 proc->pid, thread->pid, 2006 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2007 "BC_REQUEST_DEATH_NOTIFICATION" : 2008 "BC_CLEAR_DEATH_NOTIFICATION", 2009 (u64)cookie, ref->debug_id, ref->desc, 2010 ref->strong, ref->weak, ref->node->debug_id); 2011 2012 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2013 if (ref->death) { 2014 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 2015 proc->pid, thread->pid); 2016 break; 2017 } 2018 death = kzalloc(sizeof(*death), GFP_KERNEL); 2019 if (death == NULL) { 2020 thread->return_error = BR_ERROR; 2021 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2022 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2023 proc->pid, thread->pid); 2024 break; 2025 } 2026 binder_stats_created(BINDER_STAT_DEATH); 2027 INIT_LIST_HEAD(&death->work.entry); 2028 death->cookie = cookie; 2029 ref->death = death; 2030 if (ref->node->proc == NULL) { 2031 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2032 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2033 list_add_tail(&ref->death->work.entry, &thread->todo); 2034 } else { 2035 list_add_tail(&ref->death->work.entry, &proc->todo); 2036 wake_up_interruptible(&proc->wait); 2037 } 2038 } 2039 } else { 2040 if (ref->death == NULL) { 2041 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2042 proc->pid, thread->pid); 2043 break; 2044 } 2045 death = ref->death; 2046 if (death->cookie != cookie) { 2047 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2048 proc->pid, thread->pid, 2049 (u64)death->cookie, 2050 (u64)cookie); 2051 break; 2052 } 2053 ref->death = NULL; 2054 if (list_empty(&death->work.entry)) { 2055 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2056 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2057 list_add_tail(&death->work.entry, &thread->todo); 2058 } else { 2059 list_add_tail(&death->work.entry, &proc->todo); 2060 wake_up_interruptible(&proc->wait); 2061 } 2062 } else { 2063 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2064 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2065 } 2066 } 2067 } break; 2068 case BC_DEAD_BINDER_DONE: { 2069 struct binder_work *w; 2070 binder_uintptr_t cookie; 2071 struct binder_ref_death *death = NULL; 2072 2073 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2074 return -EFAULT; 2075 2076 ptr += sizeof(void *); 2077 list_for_each_entry(w, &proc->delivered_death, entry) { 2078 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2079 2080 if (tmp_death->cookie == cookie) { 2081 death = tmp_death; 2082 break; 2083 } 2084 } 2085 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2086 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2087 proc->pid, thread->pid, (u64)cookie, 2088 death); 2089 if (death == NULL) { 2090 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2091 proc->pid, thread->pid, (u64)cookie); 2092 break; 2093 } 2094 2095 list_del_init(&death->work.entry); 2096 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2097 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2098 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2099 list_add_tail(&death->work.entry, &thread->todo); 2100 } else { 2101 list_add_tail(&death->work.entry, &proc->todo); 2102 wake_up_interruptible(&proc->wait); 2103 } 2104 } 2105 } break; 2106 2107 default: 2108 pr_err("%d:%d unknown command %d\n", 2109 proc->pid, thread->pid, cmd); 2110 return -EINVAL; 2111 } 2112 *consumed = ptr - buffer; 2113 } 2114 return 0; 2115} 2116 2117static void binder_stat_br(struct binder_proc *proc, 2118 struct binder_thread *thread, uint32_t cmd) 2119{ 2120 trace_binder_return(cmd); 2121 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2122 binder_stats.br[_IOC_NR(cmd)]++; 2123 proc->stats.br[_IOC_NR(cmd)]++; 2124 thread->stats.br[_IOC_NR(cmd)]++; 2125 } 2126} 2127 2128static int binder_has_proc_work(struct binder_proc *proc, 2129 struct binder_thread *thread) 2130{ 2131 return !list_empty(&proc->todo) || 2132 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2133} 2134 2135static int binder_has_thread_work(struct binder_thread *thread) 2136{ 2137 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2138 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2139} 2140 2141static int binder_thread_read(struct binder_proc *proc, 2142 struct binder_thread *thread, 2143 binder_uintptr_t binder_buffer, size_t size, 2144 binder_size_t *consumed, int non_block) 2145{ 2146 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2147 void __user *ptr = buffer + *consumed; 2148 void __user *end = buffer + size; 2149 2150 int ret = 0; 2151 int wait_for_proc_work; 2152 2153 if (*consumed == 0) { 2154 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2155 return -EFAULT; 2156 ptr += sizeof(uint32_t); 2157 } 2158 2159retry: 2160 wait_for_proc_work = thread->transaction_stack == NULL && 2161 list_empty(&thread->todo); 2162 2163 if (thread->return_error != BR_OK && ptr < end) { 2164 if (thread->return_error2 != BR_OK) { 2165 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2166 return -EFAULT; 2167 ptr += sizeof(uint32_t); 2168 binder_stat_br(proc, thread, thread->return_error2); 2169 if (ptr == end) 2170 goto done; 2171 thread->return_error2 = BR_OK; 2172 } 2173 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2174 return -EFAULT; 2175 ptr += sizeof(uint32_t); 2176 binder_stat_br(proc, thread, thread->return_error); 2177 thread->return_error = BR_OK; 2178 goto done; 2179 } 2180 2181 2182 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2183 if (wait_for_proc_work) 2184 proc->ready_threads++; 2185 2186 binder_unlock(__func__); 2187 2188 trace_binder_wait_for_work(wait_for_proc_work, 2189 !!thread->transaction_stack, 2190 !list_empty(&thread->todo)); 2191 if (wait_for_proc_work) { 2192 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2193 BINDER_LOOPER_STATE_ENTERED))) { 2194 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2195 proc->pid, thread->pid, thread->looper); 2196 wait_event_interruptible(binder_user_error_wait, 2197 binder_stop_on_user_error < 2); 2198 } 2199 binder_set_nice(proc->default_priority); 2200 if (non_block) { 2201 if (!binder_has_proc_work(proc, thread)) 2202 ret = -EAGAIN; 2203 } else 2204 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2205 } else { 2206 if (non_block) { 2207 if (!binder_has_thread_work(thread)) 2208 ret = -EAGAIN; 2209 } else 2210 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2211 } 2212 2213 binder_lock(__func__); 2214 2215 if (wait_for_proc_work) 2216 proc->ready_threads--; 2217 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2218 2219 if (ret) 2220 return ret; 2221 2222 while (1) { 2223 uint32_t cmd; 2224 struct binder_transaction_data tr; 2225 struct binder_work *w; 2226 struct binder_transaction *t = NULL; 2227 2228 if (!list_empty(&thread->todo)) { 2229 w = list_first_entry(&thread->todo, struct binder_work, 2230 entry); 2231 } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2232 w = list_first_entry(&proc->todo, struct binder_work, 2233 entry); 2234 } else { 2235 /* no data added */ 2236 if (ptr - buffer == 4 && 2237 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) 2238 goto retry; 2239 break; 2240 } 2241 2242 if (end - ptr < sizeof(tr) + 4) 2243 break; 2244 2245 switch (w->type) { 2246 case BINDER_WORK_TRANSACTION: { 2247 t = container_of(w, struct binder_transaction, work); 2248 } break; 2249 case BINDER_WORK_TRANSACTION_COMPLETE: { 2250 cmd = BR_TRANSACTION_COMPLETE; 2251 if (put_user(cmd, (uint32_t __user *)ptr)) 2252 return -EFAULT; 2253 ptr += sizeof(uint32_t); 2254 2255 binder_stat_br(proc, thread, cmd); 2256 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2257 "%d:%d BR_TRANSACTION_COMPLETE\n", 2258 proc->pid, thread->pid); 2259 2260 list_del(&w->entry); 2261 kfree(w); 2262 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2263 } break; 2264 case BINDER_WORK_NODE: { 2265 struct binder_node *node = container_of(w, struct binder_node, work); 2266 uint32_t cmd = BR_NOOP; 2267 const char *cmd_name; 2268 int strong = node->internal_strong_refs || node->local_strong_refs; 2269 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2270 2271 if (weak && !node->has_weak_ref) { 2272 cmd = BR_INCREFS; 2273 cmd_name = "BR_INCREFS"; 2274 node->has_weak_ref = 1; 2275 node->pending_weak_ref = 1; 2276 node->local_weak_refs++; 2277 } else if (strong && !node->has_strong_ref) { 2278 cmd = BR_ACQUIRE; 2279 cmd_name = "BR_ACQUIRE"; 2280 node->has_strong_ref = 1; 2281 node->pending_strong_ref = 1; 2282 node->local_strong_refs++; 2283 } else if (!strong && node->has_strong_ref) { 2284 cmd = BR_RELEASE; 2285 cmd_name = "BR_RELEASE"; 2286 node->has_strong_ref = 0; 2287 } else if (!weak && node->has_weak_ref) { 2288 cmd = BR_DECREFS; 2289 cmd_name = "BR_DECREFS"; 2290 node->has_weak_ref = 0; 2291 } 2292 if (cmd != BR_NOOP) { 2293 if (put_user(cmd, (uint32_t __user *)ptr)) 2294 return -EFAULT; 2295 ptr += sizeof(uint32_t); 2296 if (put_user(node->ptr, 2297 (binder_uintptr_t __user *)ptr)) 2298 return -EFAULT; 2299 ptr += sizeof(binder_uintptr_t); 2300 if (put_user(node->cookie, 2301 (binder_uintptr_t __user *)ptr)) 2302 return -EFAULT; 2303 ptr += sizeof(binder_uintptr_t); 2304 2305 binder_stat_br(proc, thread, cmd); 2306 binder_debug(BINDER_DEBUG_USER_REFS, 2307 "%d:%d %s %d u%016llx c%016llx\n", 2308 proc->pid, thread->pid, cmd_name, 2309 node->debug_id, 2310 (u64)node->ptr, (u64)node->cookie); 2311 } else { 2312 list_del_init(&w->entry); 2313 if (!weak && !strong) { 2314 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2315 "%d:%d node %d u%016llx c%016llx deleted\n", 2316 proc->pid, thread->pid, 2317 node->debug_id, 2318 (u64)node->ptr, 2319 (u64)node->cookie); 2320 rb_erase(&node->rb_node, &proc->nodes); 2321 kfree(node); 2322 binder_stats_deleted(BINDER_STAT_NODE); 2323 } else { 2324 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2325 "%d:%d node %d u%016llx c%016llx state unchanged\n", 2326 proc->pid, thread->pid, 2327 node->debug_id, 2328 (u64)node->ptr, 2329 (u64)node->cookie); 2330 } 2331 } 2332 } break; 2333 case BINDER_WORK_DEAD_BINDER: 2334 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2335 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2336 struct binder_ref_death *death; 2337 uint32_t cmd; 2338 2339 death = container_of(w, struct binder_ref_death, work); 2340 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2341 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2342 else 2343 cmd = BR_DEAD_BINDER; 2344 if (put_user(cmd, (uint32_t __user *)ptr)) 2345 return -EFAULT; 2346 ptr += sizeof(uint32_t); 2347 if (put_user(death->cookie, 2348 (binder_uintptr_t __user *)ptr)) 2349 return -EFAULT; 2350 ptr += sizeof(binder_uintptr_t); 2351 binder_stat_br(proc, thread, cmd); 2352 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2353 "%d:%d %s %016llx\n", 2354 proc->pid, thread->pid, 2355 cmd == BR_DEAD_BINDER ? 2356 "BR_DEAD_BINDER" : 2357 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2358 (u64)death->cookie); 2359 2360 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2361 list_del(&w->entry); 2362 kfree(death); 2363 binder_stats_deleted(BINDER_STAT_DEATH); 2364 } else 2365 list_move(&w->entry, &proc->delivered_death); 2366 if (cmd == BR_DEAD_BINDER) 2367 goto done; /* DEAD_BINDER notifications can cause transactions */ 2368 } break; 2369 } 2370 2371 if (!t) 2372 continue; 2373 2374 BUG_ON(t->buffer == NULL); 2375 if (t->buffer->target_node) { 2376 struct binder_node *target_node = t->buffer->target_node; 2377 2378 tr.target.ptr = target_node->ptr; 2379 tr.cookie = target_node->cookie; 2380 t->saved_priority = task_nice(current); 2381 if (t->priority < target_node->min_priority && 2382 !(t->flags & TF_ONE_WAY)) 2383 binder_set_nice(t->priority); 2384 else if (!(t->flags & TF_ONE_WAY) || 2385 t->saved_priority > target_node->min_priority) 2386 binder_set_nice(target_node->min_priority); 2387 cmd = BR_TRANSACTION; 2388 } else { 2389 tr.target.ptr = 0; 2390 tr.cookie = 0; 2391 cmd = BR_REPLY; 2392 } 2393 tr.code = t->code; 2394 tr.flags = t->flags; 2395 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2396 2397 if (t->from) { 2398 struct task_struct *sender = t->from->proc->tsk; 2399 2400 tr.sender_pid = task_tgid_nr_ns(sender, 2401 task_active_pid_ns(current)); 2402 } else { 2403 tr.sender_pid = 0; 2404 } 2405 2406 tr.data_size = t->buffer->data_size; 2407 tr.offsets_size = t->buffer->offsets_size; 2408 tr.data.ptr.buffer = (binder_uintptr_t)( 2409 (uintptr_t)t->buffer->data + 2410 proc->user_buffer_offset); 2411 tr.data.ptr.offsets = tr.data.ptr.buffer + 2412 ALIGN(t->buffer->data_size, 2413 sizeof(void *)); 2414 2415 if (put_user(cmd, (uint32_t __user *)ptr)) 2416 return -EFAULT; 2417 ptr += sizeof(uint32_t); 2418 if (copy_to_user(ptr, &tr, sizeof(tr))) 2419 return -EFAULT; 2420 ptr += sizeof(tr); 2421 2422 trace_binder_transaction_received(t); 2423 binder_stat_br(proc, thread, cmd); 2424 binder_debug(BINDER_DEBUG_TRANSACTION, 2425 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2426 proc->pid, thread->pid, 2427 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2428 "BR_REPLY", 2429 t->debug_id, t->from ? t->from->proc->pid : 0, 2430 t->from ? t->from->pid : 0, cmd, 2431 t->buffer->data_size, t->buffer->offsets_size, 2432 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2433 2434 list_del(&t->work.entry); 2435 t->buffer->allow_user_free = 1; 2436 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2437 t->to_parent = thread->transaction_stack; 2438 t->to_thread = thread; 2439 thread->transaction_stack = t; 2440 } else { 2441 t->buffer->transaction = NULL; 2442 kfree(t); 2443 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2444 } 2445 break; 2446 } 2447 2448done: 2449 2450 *consumed = ptr - buffer; 2451 if (proc->requested_threads + proc->ready_threads == 0 && 2452 proc->requested_threads_started < proc->max_threads && 2453 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2454 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2455 /*spawn a new thread if we leave this out */) { 2456 proc->requested_threads++; 2457 binder_debug(BINDER_DEBUG_THREADS, 2458 "%d:%d BR_SPAWN_LOOPER\n", 2459 proc->pid, thread->pid); 2460 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2461 return -EFAULT; 2462 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 2463 } 2464 return 0; 2465} 2466 2467static void binder_release_work(struct list_head *list) 2468{ 2469 struct binder_work *w; 2470 2471 while (!list_empty(list)) { 2472 w = list_first_entry(list, struct binder_work, entry); 2473 list_del_init(&w->entry); 2474 switch (w->type) { 2475 case BINDER_WORK_TRANSACTION: { 2476 struct binder_transaction *t; 2477 2478 t = container_of(w, struct binder_transaction, work); 2479 if (t->buffer->target_node && 2480 !(t->flags & TF_ONE_WAY)) { 2481 binder_send_failed_reply(t, BR_DEAD_REPLY); 2482 } else { 2483 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2484 "undelivered transaction %d\n", 2485 t->debug_id); 2486 t->buffer->transaction = NULL; 2487 kfree(t); 2488 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2489 } 2490 } break; 2491 case BINDER_WORK_TRANSACTION_COMPLETE: { 2492 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2493 "undelivered TRANSACTION_COMPLETE\n"); 2494 kfree(w); 2495 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2496 } break; 2497 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2498 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2499 struct binder_ref_death *death; 2500 2501 death = container_of(w, struct binder_ref_death, work); 2502 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2503 "undelivered death notification, %016llx\n", 2504 (u64)death->cookie); 2505 kfree(death); 2506 binder_stats_deleted(BINDER_STAT_DEATH); 2507 } break; 2508 default: 2509 pr_err("unexpected work type, %d, not freed\n", 2510 w->type); 2511 break; 2512 } 2513 } 2514 2515} 2516 2517static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2518{ 2519 struct binder_thread *thread = NULL; 2520 struct rb_node *parent = NULL; 2521 struct rb_node **p = &proc->threads.rb_node; 2522 2523 while (*p) { 2524 parent = *p; 2525 thread = rb_entry(parent, struct binder_thread, rb_node); 2526 2527 if (current->pid < thread->pid) 2528 p = &(*p)->rb_left; 2529 else if (current->pid > thread->pid) 2530 p = &(*p)->rb_right; 2531 else 2532 break; 2533 } 2534 if (*p == NULL) { 2535 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2536 if (thread == NULL) 2537 return NULL; 2538 binder_stats_created(BINDER_STAT_THREAD); 2539 thread->proc = proc; 2540 thread->pid = current->pid; 2541 init_waitqueue_head(&thread->wait); 2542 INIT_LIST_HEAD(&thread->todo); 2543 rb_link_node(&thread->rb_node, parent, p); 2544 rb_insert_color(&thread->rb_node, &proc->threads); 2545 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2546 thread->return_error = BR_OK; 2547 thread->return_error2 = BR_OK; 2548 } 2549 return thread; 2550} 2551 2552static int binder_free_thread(struct binder_proc *proc, 2553 struct binder_thread *thread) 2554{ 2555 struct binder_transaction *t; 2556 struct binder_transaction *send_reply = NULL; 2557 int active_transactions = 0; 2558 2559 rb_erase(&thread->rb_node, &proc->threads); 2560 t = thread->transaction_stack; 2561 if (t && t->to_thread == thread) 2562 send_reply = t; 2563 while (t) { 2564 active_transactions++; 2565 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2566 "release %d:%d transaction %d %s, still active\n", 2567 proc->pid, thread->pid, 2568 t->debug_id, 2569 (t->to_thread == thread) ? "in" : "out"); 2570 2571 if (t->to_thread == thread) { 2572 t->to_proc = NULL; 2573 t->to_thread = NULL; 2574 if (t->buffer) { 2575 t->buffer->transaction = NULL; 2576 t->buffer = NULL; 2577 } 2578 t = t->to_parent; 2579 } else if (t->from == thread) { 2580 t->from = NULL; 2581 t = t->from_parent; 2582 } else 2583 BUG(); 2584 } 2585 if (send_reply) 2586 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2587 binder_release_work(&thread->todo); 2588 kfree(thread); 2589 binder_stats_deleted(BINDER_STAT_THREAD); 2590 return active_transactions; 2591} 2592 2593static unsigned int binder_poll(struct file *filp, 2594 struct poll_table_struct *wait) 2595{ 2596 struct binder_proc *proc = filp->private_data; 2597 struct binder_thread *thread = NULL; 2598 int wait_for_proc_work; 2599 2600 binder_lock(__func__); 2601 2602 thread = binder_get_thread(proc); 2603 2604 wait_for_proc_work = thread->transaction_stack == NULL && 2605 list_empty(&thread->todo) && thread->return_error == BR_OK; 2606 2607 binder_unlock(__func__); 2608 2609 if (wait_for_proc_work) { 2610 if (binder_has_proc_work(proc, thread)) 2611 return POLLIN; 2612 poll_wait(filp, &proc->wait, wait); 2613 if (binder_has_proc_work(proc, thread)) 2614 return POLLIN; 2615 } else { 2616 if (binder_has_thread_work(thread)) 2617 return POLLIN; 2618 poll_wait(filp, &thread->wait, wait); 2619 if (binder_has_thread_work(thread)) 2620 return POLLIN; 2621 } 2622 return 0; 2623} 2624 2625static int binder_ioctl_write_read(struct file *filp, 2626 unsigned int cmd, unsigned long arg, 2627 struct binder_thread *thread) 2628{ 2629 int ret = 0; 2630 struct binder_proc *proc = filp->private_data; 2631 unsigned int size = _IOC_SIZE(cmd); 2632 void __user *ubuf = (void __user *)arg; 2633 struct binder_write_read bwr; 2634 2635 if (size != sizeof(struct binder_write_read)) { 2636 ret = -EINVAL; 2637 goto out; 2638 } 2639 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2640 ret = -EFAULT; 2641 goto out; 2642 } 2643 binder_debug(BINDER_DEBUG_READ_WRITE, 2644 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 2645 proc->pid, thread->pid, 2646 (u64)bwr.write_size, (u64)bwr.write_buffer, 2647 (u64)bwr.read_size, (u64)bwr.read_buffer); 2648 2649 if (bwr.write_size > 0) { 2650 ret = binder_thread_write(proc, thread, 2651 bwr.write_buffer, 2652 bwr.write_size, 2653 &bwr.write_consumed); 2654 trace_binder_write_done(ret); 2655 if (ret < 0) { 2656 bwr.read_consumed = 0; 2657 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2658 ret = -EFAULT; 2659 goto out; 2660 } 2661 } 2662 if (bwr.read_size > 0) { 2663 ret = binder_thread_read(proc, thread, bwr.read_buffer, 2664 bwr.read_size, 2665 &bwr.read_consumed, 2666 filp->f_flags & O_NONBLOCK); 2667 trace_binder_read_done(ret); 2668 if (!list_empty(&proc->todo)) 2669 wake_up_interruptible(&proc->wait); 2670 if (ret < 0) { 2671 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2672 ret = -EFAULT; 2673 goto out; 2674 } 2675 } 2676 binder_debug(BINDER_DEBUG_READ_WRITE, 2677 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 2678 proc->pid, thread->pid, 2679 (u64)bwr.write_consumed, (u64)bwr.write_size, 2680 (u64)bwr.read_consumed, (u64)bwr.read_size); 2681 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2682 ret = -EFAULT; 2683 goto out; 2684 } 2685out: 2686 return ret; 2687} 2688 2689static int binder_ioctl_set_ctx_mgr(struct file *filp) 2690{ 2691 int ret = 0; 2692 struct binder_proc *proc = filp->private_data; 2693 kuid_t curr_euid = current_euid(); 2694 2695 if (binder_context_mgr_node != NULL) { 2696 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 2697 ret = -EBUSY; 2698 goto out; 2699 } 2700 if (uid_valid(binder_context_mgr_uid)) { 2701 if (!uid_eq(binder_context_mgr_uid, curr_euid)) { 2702 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 2703 from_kuid(&init_user_ns, curr_euid), 2704 from_kuid(&init_user_ns, 2705 binder_context_mgr_uid)); 2706 ret = -EPERM; 2707 goto out; 2708 } 2709 } else { 2710 binder_context_mgr_uid = curr_euid; 2711 } 2712 binder_context_mgr_node = binder_new_node(proc, 0, 0); 2713 if (binder_context_mgr_node == NULL) { 2714 ret = -ENOMEM; 2715 goto out; 2716 } 2717 binder_context_mgr_node->local_weak_refs++; 2718 binder_context_mgr_node->local_strong_refs++; 2719 binder_context_mgr_node->has_strong_ref = 1; 2720 binder_context_mgr_node->has_weak_ref = 1; 2721out: 2722 return ret; 2723} 2724 2725static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2726{ 2727 int ret; 2728 struct binder_proc *proc = filp->private_data; 2729 struct binder_thread *thread; 2730 unsigned int size = _IOC_SIZE(cmd); 2731 void __user *ubuf = (void __user *)arg; 2732 2733 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 2734 proc->pid, current->pid, cmd, arg);*/ 2735 2736 trace_binder_ioctl(cmd, arg); 2737 2738 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2739 if (ret) 2740 goto err_unlocked; 2741 2742 binder_lock(__func__); 2743 thread = binder_get_thread(proc); 2744 if (thread == NULL) { 2745 ret = -ENOMEM; 2746 goto err; 2747 } 2748 2749 switch (cmd) { 2750 case BINDER_WRITE_READ: 2751 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 2752 if (ret) 2753 goto err; 2754 break; 2755 case BINDER_SET_MAX_THREADS: 2756 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2757 ret = -EINVAL; 2758 goto err; 2759 } 2760 break; 2761 case BINDER_SET_CONTEXT_MGR: 2762 ret = binder_ioctl_set_ctx_mgr(filp); 2763 if (ret) 2764 goto err; 2765 ret = security_binder_set_context_mgr(proc->tsk); 2766 if (ret < 0) 2767 goto err; 2768 break; 2769 case BINDER_THREAD_EXIT: 2770 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 2771 proc->pid, thread->pid); 2772 binder_free_thread(proc, thread); 2773 thread = NULL; 2774 break; 2775 case BINDER_VERSION: { 2776 struct binder_version __user *ver = ubuf; 2777 2778 if (size != sizeof(struct binder_version)) { 2779 ret = -EINVAL; 2780 goto err; 2781 } 2782 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 2783 &ver->protocol_version)) { 2784 ret = -EINVAL; 2785 goto err; 2786 } 2787 break; 2788 } 2789 default: 2790 ret = -EINVAL; 2791 goto err; 2792 } 2793 ret = 0; 2794err: 2795 if (thread) 2796 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2797 binder_unlock(__func__); 2798 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2799 if (ret && ret != -ERESTARTSYS) 2800 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2801err_unlocked: 2802 trace_binder_ioctl_done(ret); 2803 return ret; 2804} 2805 2806static void binder_vma_open(struct vm_area_struct *vma) 2807{ 2808 struct binder_proc *proc = vma->vm_private_data; 2809 2810 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2811 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2812 proc->pid, vma->vm_start, vma->vm_end, 2813 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2814 (unsigned long)pgprot_val(vma->vm_page_prot)); 2815} 2816 2817static void binder_vma_close(struct vm_area_struct *vma) 2818{ 2819 struct binder_proc *proc = vma->vm_private_data; 2820 2821 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2822 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2823 proc->pid, vma->vm_start, vma->vm_end, 2824 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2825 (unsigned long)pgprot_val(vma->vm_page_prot)); 2826 proc->vma = NULL; 2827 proc->vma_vm_mm = NULL; 2828 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2829} 2830 2831static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2832{ 2833 return VM_FAULT_SIGBUS; 2834} 2835 2836static struct vm_operations_struct binder_vm_ops = { 2837 .open = binder_vma_open, 2838 .close = binder_vma_close, 2839 .fault = binder_vm_fault, 2840}; 2841 2842static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2843{ 2844 int ret; 2845 struct vm_struct *area; 2846 struct binder_proc *proc = filp->private_data; 2847 const char *failure_string; 2848 struct binder_buffer *buffer; 2849 2850 if (proc->tsk != current) 2851 return -EINVAL; 2852 2853 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2854 vma->vm_end = vma->vm_start + SZ_4M; 2855 2856 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2857 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2858 proc->pid, vma->vm_start, vma->vm_end, 2859 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2860 (unsigned long)pgprot_val(vma->vm_page_prot)); 2861 2862 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2863 ret = -EPERM; 2864 failure_string = "bad vm_flags"; 2865 goto err_bad_arg; 2866 } 2867 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2868 2869 mutex_lock(&binder_mmap_lock); 2870 if (proc->buffer) { 2871 ret = -EBUSY; 2872 failure_string = "already mapped"; 2873 goto err_already_mapped; 2874 } 2875 2876 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2877 if (area == NULL) { 2878 ret = -ENOMEM; 2879 failure_string = "get_vm_area"; 2880 goto err_get_vm_area_failed; 2881 } 2882 proc->buffer = area->addr; 2883 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2884 mutex_unlock(&binder_mmap_lock); 2885 2886#ifdef CONFIG_CPU_CACHE_VIPT 2887 if (cache_is_vipt_aliasing()) { 2888 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2889 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2890 vma->vm_start += PAGE_SIZE; 2891 } 2892 } 2893#endif 2894 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2895 if (proc->pages == NULL) { 2896 ret = -ENOMEM; 2897 failure_string = "alloc page array"; 2898 goto err_alloc_pages_failed; 2899 } 2900 proc->buffer_size = vma->vm_end - vma->vm_start; 2901 2902 vma->vm_ops = &binder_vm_ops; 2903 vma->vm_private_data = proc; 2904 2905 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2906 ret = -ENOMEM; 2907 failure_string = "alloc small buf"; 2908 goto err_alloc_small_buf_failed; 2909 } 2910 buffer = proc->buffer; 2911 INIT_LIST_HEAD(&proc->buffers); 2912 list_add(&buffer->entry, &proc->buffers); 2913 buffer->free = 1; 2914 binder_insert_free_buffer(proc, buffer); 2915 proc->free_async_space = proc->buffer_size / 2; 2916 barrier(); 2917 proc->files = get_files_struct(current); 2918 proc->vma = vma; 2919 proc->vma_vm_mm = vma->vm_mm; 2920 2921 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 2922 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2923 return 0; 2924 2925err_alloc_small_buf_failed: 2926 kfree(proc->pages); 2927 proc->pages = NULL; 2928err_alloc_pages_failed: 2929 mutex_lock(&binder_mmap_lock); 2930 vfree(proc->buffer); 2931 proc->buffer = NULL; 2932err_get_vm_area_failed: 2933err_already_mapped: 2934 mutex_unlock(&binder_mmap_lock); 2935err_bad_arg: 2936 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 2937 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2938 return ret; 2939} 2940 2941static int binder_open(struct inode *nodp, struct file *filp) 2942{ 2943 struct binder_proc *proc; 2944 2945 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 2946 current->group_leader->pid, current->pid); 2947 2948 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2949 if (proc == NULL) 2950 return -ENOMEM; 2951 get_task_struct(current); 2952 proc->tsk = current; 2953 INIT_LIST_HEAD(&proc->todo); 2954 init_waitqueue_head(&proc->wait); 2955 proc->default_priority = task_nice(current); 2956 2957 binder_lock(__func__); 2958 2959 binder_stats_created(BINDER_STAT_PROC); 2960 hlist_add_head(&proc->proc_node, &binder_procs); 2961 proc->pid = current->group_leader->pid; 2962 INIT_LIST_HEAD(&proc->delivered_death); 2963 filp->private_data = proc; 2964 2965 binder_unlock(__func__); 2966 2967 if (binder_debugfs_dir_entry_proc) { 2968 char strbuf[11]; 2969 2970 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2971 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 2972 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); 2973 } 2974 2975 return 0; 2976} 2977 2978static int binder_flush(struct file *filp, fl_owner_t id) 2979{ 2980 struct binder_proc *proc = filp->private_data; 2981 2982 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2983 2984 return 0; 2985} 2986 2987static void binder_deferred_flush(struct binder_proc *proc) 2988{ 2989 struct rb_node *n; 2990 int wake_count = 0; 2991 2992 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2993 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2994 2995 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2996 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2997 wake_up_interruptible(&thread->wait); 2998 wake_count++; 2999 } 3000 } 3001 wake_up_interruptible_all(&proc->wait); 3002 3003 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3004 "binder_flush: %d woke %d threads\n", proc->pid, 3005 wake_count); 3006} 3007 3008static int binder_release(struct inode *nodp, struct file *filp) 3009{ 3010 struct binder_proc *proc = filp->private_data; 3011 3012 debugfs_remove(proc->debugfs_entry); 3013 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 3014 3015 return 0; 3016} 3017 3018static int binder_node_release(struct binder_node *node, int refs) 3019{ 3020 struct binder_ref *ref; 3021 int death = 0; 3022 3023 list_del_init(&node->work.entry); 3024 binder_release_work(&node->async_todo); 3025 3026 if (hlist_empty(&node->refs)) { 3027 kfree(node); 3028 binder_stats_deleted(BINDER_STAT_NODE); 3029 3030 return refs; 3031 } 3032 3033 node->proc = NULL; 3034 node->local_strong_refs = 0; 3035 node->local_weak_refs = 0; 3036 hlist_add_head(&node->dead_node, &binder_dead_nodes); 3037 3038 hlist_for_each_entry(ref, &node->refs, node_entry) { 3039 refs++; 3040 3041 if (!ref->death) 3042 continue; 3043 3044 death++; 3045 3046 if (list_empty(&ref->death->work.entry)) { 3047 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3048 list_add_tail(&ref->death->work.entry, 3049 &ref->proc->todo); 3050 wake_up_interruptible(&ref->proc->wait); 3051 } else 3052 BUG(); 3053 } 3054 3055 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3056 "node %d now dead, refs %d, death %d\n", 3057 node->debug_id, refs, death); 3058 3059 return refs; 3060} 3061 3062static void binder_deferred_release(struct binder_proc *proc) 3063{ 3064 struct binder_transaction *t; 3065 struct rb_node *n; 3066 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3067 active_transactions, page_count; 3068 3069 BUG_ON(proc->vma); 3070 BUG_ON(proc->files); 3071 3072 hlist_del(&proc->proc_node); 3073 3074 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 3075 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3076 "%s: %d context_mgr_node gone\n", 3077 __func__, proc->pid); 3078 binder_context_mgr_node = NULL; 3079 } 3080 3081 threads = 0; 3082 active_transactions = 0; 3083 while ((n = rb_first(&proc->threads))) { 3084 struct binder_thread *thread; 3085 3086 thread = rb_entry(n, struct binder_thread, rb_node); 3087 threads++; 3088 active_transactions += binder_free_thread(proc, thread); 3089 } 3090 3091 nodes = 0; 3092 incoming_refs = 0; 3093 while ((n = rb_first(&proc->nodes))) { 3094 struct binder_node *node; 3095 3096 node = rb_entry(n, struct binder_node, rb_node); 3097 nodes++; 3098 rb_erase(&node->rb_node, &proc->nodes); 3099 incoming_refs = binder_node_release(node, incoming_refs); 3100 } 3101 3102 outgoing_refs = 0; 3103 while ((n = rb_first(&proc->refs_by_desc))) { 3104 struct binder_ref *ref; 3105 3106 ref = rb_entry(n, struct binder_ref, rb_node_desc); 3107 outgoing_refs++; 3108 binder_delete_ref(ref); 3109 } 3110 3111 binder_release_work(&proc->todo); 3112 binder_release_work(&proc->delivered_death); 3113 3114 buffers = 0; 3115 while ((n = rb_first(&proc->allocated_buffers))) { 3116 struct binder_buffer *buffer; 3117 3118 buffer = rb_entry(n, struct binder_buffer, rb_node); 3119 3120 t = buffer->transaction; 3121 if (t) { 3122 t->buffer = NULL; 3123 buffer->transaction = NULL; 3124 pr_err("release proc %d, transaction %d, not freed\n", 3125 proc->pid, t->debug_id); 3126 /*BUG();*/ 3127 } 3128 3129 binder_free_buf(proc, buffer); 3130 buffers++; 3131 } 3132 3133 binder_stats_deleted(BINDER_STAT_PROC); 3134 3135 page_count = 0; 3136 if (proc->pages) { 3137 int i; 3138 3139 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3140 void *page_addr; 3141 3142 if (!proc->pages[i]) 3143 continue; 3144 3145 page_addr = proc->buffer + i * PAGE_SIZE; 3146 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3147 "%s: %d: page %d at %p not freed\n", 3148 __func__, proc->pid, i, page_addr); 3149 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3150 __free_page(proc->pages[i]); 3151 page_count++; 3152 } 3153 kfree(proc->pages); 3154 vfree(proc->buffer); 3155 } 3156 3157 put_task_struct(proc->tsk); 3158 3159 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3160 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 3161 __func__, proc->pid, threads, nodes, incoming_refs, 3162 outgoing_refs, active_transactions, buffers, page_count); 3163 3164 kfree(proc); 3165} 3166 3167static void binder_deferred_func(struct work_struct *work) 3168{ 3169 struct binder_proc *proc; 3170 struct files_struct *files; 3171 3172 int defer; 3173 3174 do { 3175 binder_lock(__func__); 3176 mutex_lock(&binder_deferred_lock); 3177 if (!hlist_empty(&binder_deferred_list)) { 3178 proc = hlist_entry(binder_deferred_list.first, 3179 struct binder_proc, deferred_work_node); 3180 hlist_del_init(&proc->deferred_work_node); 3181 defer = proc->deferred_work; 3182 proc->deferred_work = 0; 3183 } else { 3184 proc = NULL; 3185 defer = 0; 3186 } 3187 mutex_unlock(&binder_deferred_lock); 3188 3189 files = NULL; 3190 if (defer & BINDER_DEFERRED_PUT_FILES) { 3191 files = proc->files; 3192 if (files) 3193 proc->files = NULL; 3194 } 3195 3196 if (defer & BINDER_DEFERRED_FLUSH) 3197 binder_deferred_flush(proc); 3198 3199 if (defer & BINDER_DEFERRED_RELEASE) 3200 binder_deferred_release(proc); /* frees proc */ 3201 3202 binder_unlock(__func__); 3203 if (files) 3204 put_files_struct(files); 3205 } while (proc); 3206} 3207static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3208 3209static void 3210binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3211{ 3212 mutex_lock(&binder_deferred_lock); 3213 proc->deferred_work |= defer; 3214 if (hlist_unhashed(&proc->deferred_work_node)) { 3215 hlist_add_head(&proc->deferred_work_node, 3216 &binder_deferred_list); 3217 queue_work(binder_deferred_workqueue, &binder_deferred_work); 3218 } 3219 mutex_unlock(&binder_deferred_lock); 3220} 3221 3222static void print_binder_transaction(struct seq_file *m, const char *prefix, 3223 struct binder_transaction *t) 3224{ 3225 seq_printf(m, 3226 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3227 prefix, t->debug_id, t, 3228 t->from ? t->from->proc->pid : 0, 3229 t->from ? t->from->pid : 0, 3230 t->to_proc ? t->to_proc->pid : 0, 3231 t->to_thread ? t->to_thread->pid : 0, 3232 t->code, t->flags, t->priority, t->need_reply); 3233 if (t->buffer == NULL) { 3234 seq_puts(m, " buffer free\n"); 3235 return; 3236 } 3237 if (t->buffer->target_node) 3238 seq_printf(m, " node %d", 3239 t->buffer->target_node->debug_id); 3240 seq_printf(m, " size %zd:%zd data %p\n", 3241 t->buffer->data_size, t->buffer->offsets_size, 3242 t->buffer->data); 3243} 3244 3245static void print_binder_buffer(struct seq_file *m, const char *prefix, 3246 struct binder_buffer *buffer) 3247{ 3248 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3249 prefix, buffer->debug_id, buffer->data, 3250 buffer->data_size, buffer->offsets_size, 3251 buffer->transaction ? "active" : "delivered"); 3252} 3253 3254static void print_binder_work(struct seq_file *m, const char *prefix, 3255 const char *transaction_prefix, 3256 struct binder_work *w) 3257{ 3258 struct binder_node *node; 3259 struct binder_transaction *t; 3260 3261 switch (w->type) { 3262 case BINDER_WORK_TRANSACTION: 3263 t = container_of(w, struct binder_transaction, work); 3264 print_binder_transaction(m, transaction_prefix, t); 3265 break; 3266 case BINDER_WORK_TRANSACTION_COMPLETE: 3267 seq_printf(m, "%stransaction complete\n", prefix); 3268 break; 3269 case BINDER_WORK_NODE: 3270 node = container_of(w, struct binder_node, work); 3271 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3272 prefix, node->debug_id, 3273 (u64)node->ptr, (u64)node->cookie); 3274 break; 3275 case BINDER_WORK_DEAD_BINDER: 3276 seq_printf(m, "%shas dead binder\n", prefix); 3277 break; 3278 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3279 seq_printf(m, "%shas cleared dead binder\n", prefix); 3280 break; 3281 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3282 seq_printf(m, "%shas cleared death notification\n", prefix); 3283 break; 3284 default: 3285 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3286 break; 3287 } 3288} 3289 3290static void print_binder_thread(struct seq_file *m, 3291 struct binder_thread *thread, 3292 int print_always) 3293{ 3294 struct binder_transaction *t; 3295 struct binder_work *w; 3296 size_t start_pos = m->count; 3297 size_t header_pos; 3298 3299 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3300 header_pos = m->count; 3301 t = thread->transaction_stack; 3302 while (t) { 3303 if (t->from == thread) { 3304 print_binder_transaction(m, 3305 " outgoing transaction", t); 3306 t = t->from_parent; 3307 } else if (t->to_thread == thread) { 3308 print_binder_transaction(m, 3309 " incoming transaction", t); 3310 t = t->to_parent; 3311 } else { 3312 print_binder_transaction(m, " bad transaction", t); 3313 t = NULL; 3314 } 3315 } 3316 list_for_each_entry(w, &thread->todo, entry) { 3317 print_binder_work(m, " ", " pending transaction", w); 3318 } 3319 if (!print_always && m->count == header_pos) 3320 m->count = start_pos; 3321} 3322 3323static void print_binder_node(struct seq_file *m, struct binder_node *node) 3324{ 3325 struct binder_ref *ref; 3326 struct binder_work *w; 3327 int count; 3328 3329 count = 0; 3330 hlist_for_each_entry(ref, &node->refs, node_entry) 3331 count++; 3332 3333 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3334 node->debug_id, (u64)node->ptr, (u64)node->cookie, 3335 node->has_strong_ref, node->has_weak_ref, 3336 node->local_strong_refs, node->local_weak_refs, 3337 node->internal_strong_refs, count); 3338 if (count) { 3339 seq_puts(m, " proc"); 3340 hlist_for_each_entry(ref, &node->refs, node_entry) 3341 seq_printf(m, " %d", ref->proc->pid); 3342 } 3343 seq_puts(m, "\n"); 3344 list_for_each_entry(w, &node->async_todo, entry) 3345 print_binder_work(m, " ", 3346 " pending async transaction", w); 3347} 3348 3349static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3350{ 3351 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3352 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3353 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3354} 3355 3356static void print_binder_proc(struct seq_file *m, 3357 struct binder_proc *proc, int print_all) 3358{ 3359 struct binder_work *w; 3360 struct rb_node *n; 3361 size_t start_pos = m->count; 3362 size_t header_pos; 3363 3364 seq_printf(m, "proc %d\n", proc->pid); 3365 header_pos = m->count; 3366 3367 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3368 print_binder_thread(m, rb_entry(n, struct binder_thread, 3369 rb_node), print_all); 3370 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3371 struct binder_node *node = rb_entry(n, struct binder_node, 3372 rb_node); 3373 if (print_all || node->has_async_transaction) 3374 print_binder_node(m, node); 3375 } 3376 if (print_all) { 3377 for (n = rb_first(&proc->refs_by_desc); 3378 n != NULL; 3379 n = rb_next(n)) 3380 print_binder_ref(m, rb_entry(n, struct binder_ref, 3381 rb_node_desc)); 3382 } 3383 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3384 print_binder_buffer(m, " buffer", 3385 rb_entry(n, struct binder_buffer, rb_node)); 3386 list_for_each_entry(w, &proc->todo, entry) 3387 print_binder_work(m, " ", " pending transaction", w); 3388 list_for_each_entry(w, &proc->delivered_death, entry) { 3389 seq_puts(m, " has delivered dead binder\n"); 3390 break; 3391 } 3392 if (!print_all && m->count == header_pos) 3393 m->count = start_pos; 3394} 3395 3396static const char * const binder_return_strings[] = { 3397 "BR_ERROR", 3398 "BR_OK", 3399 "BR_TRANSACTION", 3400 "BR_REPLY", 3401 "BR_ACQUIRE_RESULT", 3402 "BR_DEAD_REPLY", 3403 "BR_TRANSACTION_COMPLETE", 3404 "BR_INCREFS", 3405 "BR_ACQUIRE", 3406 "BR_RELEASE", 3407 "BR_DECREFS", 3408 "BR_ATTEMPT_ACQUIRE", 3409 "BR_NOOP", 3410 "BR_SPAWN_LOOPER", 3411 "BR_FINISHED", 3412 "BR_DEAD_BINDER", 3413 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3414 "BR_FAILED_REPLY" 3415}; 3416 3417static const char * const binder_command_strings[] = { 3418 "BC_TRANSACTION", 3419 "BC_REPLY", 3420 "BC_ACQUIRE_RESULT", 3421 "BC_FREE_BUFFER", 3422 "BC_INCREFS", 3423 "BC_ACQUIRE", 3424 "BC_RELEASE", 3425 "BC_DECREFS", 3426 "BC_INCREFS_DONE", 3427 "BC_ACQUIRE_DONE", 3428 "BC_ATTEMPT_ACQUIRE", 3429 "BC_REGISTER_LOOPER", 3430 "BC_ENTER_LOOPER", 3431 "BC_EXIT_LOOPER", 3432 "BC_REQUEST_DEATH_NOTIFICATION", 3433 "BC_CLEAR_DEATH_NOTIFICATION", 3434 "BC_DEAD_BINDER_DONE" 3435}; 3436 3437static const char * const binder_objstat_strings[] = { 3438 "proc", 3439 "thread", 3440 "node", 3441 "ref", 3442 "death", 3443 "transaction", 3444 "transaction_complete" 3445}; 3446 3447static void print_binder_stats(struct seq_file *m, const char *prefix, 3448 struct binder_stats *stats) 3449{ 3450 int i; 3451 3452 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3453 ARRAY_SIZE(binder_command_strings)); 3454 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3455 if (stats->bc[i]) 3456 seq_printf(m, "%s%s: %d\n", prefix, 3457 binder_command_strings[i], stats->bc[i]); 3458 } 3459 3460 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3461 ARRAY_SIZE(binder_return_strings)); 3462 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3463 if (stats->br[i]) 3464 seq_printf(m, "%s%s: %d\n", prefix, 3465 binder_return_strings[i], stats->br[i]); 3466 } 3467 3468 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3469 ARRAY_SIZE(binder_objstat_strings)); 3470 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3471 ARRAY_SIZE(stats->obj_deleted)); 3472 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3473 if (stats->obj_created[i] || stats->obj_deleted[i]) 3474 seq_printf(m, "%s%s: active %d total %d\n", prefix, 3475 binder_objstat_strings[i], 3476 stats->obj_created[i] - stats->obj_deleted[i], 3477 stats->obj_created[i]); 3478 } 3479} 3480 3481static void print_binder_proc_stats(struct seq_file *m, 3482 struct binder_proc *proc) 3483{ 3484 struct binder_work *w; 3485 struct rb_node *n; 3486 int count, strong, weak; 3487 3488 seq_printf(m, "proc %d\n", proc->pid); 3489 count = 0; 3490 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3491 count++; 3492 seq_printf(m, " threads: %d\n", count); 3493 seq_printf(m, " requested threads: %d+%d/%d\n" 3494 " ready threads %d\n" 3495 " free async space %zd\n", proc->requested_threads, 3496 proc->requested_threads_started, proc->max_threads, 3497 proc->ready_threads, proc->free_async_space); 3498 count = 0; 3499 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3500 count++; 3501 seq_printf(m, " nodes: %d\n", count); 3502 count = 0; 3503 strong = 0; 3504 weak = 0; 3505 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3506 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3507 rb_node_desc); 3508 count++; 3509 strong += ref->strong; 3510 weak += ref->weak; 3511 } 3512 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3513 3514 count = 0; 3515 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3516 count++; 3517 seq_printf(m, " buffers: %d\n", count); 3518 3519 count = 0; 3520 list_for_each_entry(w, &proc->todo, entry) { 3521 switch (w->type) { 3522 case BINDER_WORK_TRANSACTION: 3523 count++; 3524 break; 3525 default: 3526 break; 3527 } 3528 } 3529 seq_printf(m, " pending transactions: %d\n", count); 3530 3531 print_binder_stats(m, " ", &proc->stats); 3532} 3533 3534 3535static int binder_state_show(struct seq_file *m, void *unused) 3536{ 3537 struct binder_proc *proc; 3538 struct binder_node *node; 3539 int do_lock = !binder_debug_no_lock; 3540 3541 if (do_lock) 3542 binder_lock(__func__); 3543 3544 seq_puts(m, "binder state:\n"); 3545 3546 if (!hlist_empty(&binder_dead_nodes)) 3547 seq_puts(m, "dead nodes:\n"); 3548 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 3549 print_binder_node(m, node); 3550 3551 hlist_for_each_entry(proc, &binder_procs, proc_node) 3552 print_binder_proc(m, proc, 1); 3553 if (do_lock) 3554 binder_unlock(__func__); 3555 return 0; 3556} 3557 3558static int binder_stats_show(struct seq_file *m, void *unused) 3559{ 3560 struct binder_proc *proc; 3561 int do_lock = !binder_debug_no_lock; 3562 3563 if (do_lock) 3564 binder_lock(__func__); 3565 3566 seq_puts(m, "binder stats:\n"); 3567 3568 print_binder_stats(m, "", &binder_stats); 3569 3570 hlist_for_each_entry(proc, &binder_procs, proc_node) 3571 print_binder_proc_stats(m, proc); 3572 if (do_lock) 3573 binder_unlock(__func__); 3574 return 0; 3575} 3576 3577static int binder_transactions_show(struct seq_file *m, void *unused) 3578{ 3579 struct binder_proc *proc; 3580 int do_lock = !binder_debug_no_lock; 3581 3582 if (do_lock) 3583 binder_lock(__func__); 3584 3585 seq_puts(m, "binder transactions:\n"); 3586 hlist_for_each_entry(proc, &binder_procs, proc_node) 3587 print_binder_proc(m, proc, 0); 3588 if (do_lock) 3589 binder_unlock(__func__); 3590 return 0; 3591} 3592 3593static int binder_proc_show(struct seq_file *m, void *unused) 3594{ 3595 struct binder_proc *proc = m->private; 3596 int do_lock = !binder_debug_no_lock; 3597 3598 if (do_lock) 3599 binder_lock(__func__); 3600 seq_puts(m, "binder proc state:\n"); 3601 print_binder_proc(m, proc, 1); 3602 if (do_lock) 3603 binder_unlock(__func__); 3604 return 0; 3605} 3606 3607static void print_binder_transaction_log_entry(struct seq_file *m, 3608 struct binder_transaction_log_entry *e) 3609{ 3610 seq_printf(m, 3611 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 3612 e->debug_id, (e->call_type == 2) ? "reply" : 3613 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3614 e->from_thread, e->to_proc, e->to_thread, e->to_node, 3615 e->target_handle, e->data_size, e->offsets_size); 3616} 3617 3618static int binder_transaction_log_show(struct seq_file *m, void *unused) 3619{ 3620 struct binder_transaction_log *log = m->private; 3621 int i; 3622 3623 if (log->full) { 3624 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 3625 print_binder_transaction_log_entry(m, &log->entry[i]); 3626 } 3627 for (i = 0; i < log->next; i++) 3628 print_binder_transaction_log_entry(m, &log->entry[i]); 3629 return 0; 3630} 3631 3632static const struct file_operations binder_fops = { 3633 .owner = THIS_MODULE, 3634 .poll = binder_poll, 3635 .unlocked_ioctl = binder_ioctl, 3636 .compat_ioctl = binder_ioctl, 3637 .mmap = binder_mmap, 3638 .open = binder_open, 3639 .flush = binder_flush, 3640 .release = binder_release, 3641}; 3642 3643static struct miscdevice binder_miscdev = { 3644 .minor = MISC_DYNAMIC_MINOR, 3645 .name = "binder", 3646 .fops = &binder_fops 3647}; 3648 3649BINDER_DEBUG_ENTRY(state); 3650BINDER_DEBUG_ENTRY(stats); 3651BINDER_DEBUG_ENTRY(transactions); 3652BINDER_DEBUG_ENTRY(transaction_log); 3653 3654static int __init binder_init(void) 3655{ 3656 int ret; 3657 3658 binder_deferred_workqueue = create_singlethread_workqueue("binder"); 3659 if (!binder_deferred_workqueue) 3660 return -ENOMEM; 3661 3662 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3663 if (binder_debugfs_dir_entry_root) 3664 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3665 binder_debugfs_dir_entry_root); 3666 ret = misc_register(&binder_miscdev); 3667 if (binder_debugfs_dir_entry_root) { 3668 debugfs_create_file("state", 3669 S_IRUGO, 3670 binder_debugfs_dir_entry_root, 3671 NULL, 3672 &binder_state_fops); 3673 debugfs_create_file("stats", 3674 S_IRUGO, 3675 binder_debugfs_dir_entry_root, 3676 NULL, 3677 &binder_stats_fops); 3678 debugfs_create_file("transactions", 3679 S_IRUGO, 3680 binder_debugfs_dir_entry_root, 3681 NULL, 3682 &binder_transactions_fops); 3683 debugfs_create_file("transaction_log", 3684 S_IRUGO, 3685 binder_debugfs_dir_entry_root, 3686 &binder_transaction_log, 3687 &binder_transaction_log_fops); 3688 debugfs_create_file("failed_transaction_log", 3689 S_IRUGO, 3690 binder_debugfs_dir_entry_root, 3691 &binder_transaction_log_failed, 3692 &binder_transaction_log_fops); 3693 } 3694 return ret; 3695} 3696 3697device_initcall(binder_init); 3698 3699#define CREATE_TRACE_POINTS 3700#include "binder_trace.h" 3701 3702MODULE_LICENSE("GPL v2"); 3703