1 /* 2 * uprobes-based tracing events 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * 17 * Copyright (C) IBM Corporation, 2010-2012 18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 19 */ 20 #define pr_fmt(fmt) "trace_kprobe: " fmt 21 22 #include <linux/module.h> 23 #include <linux/uaccess.h> 24 #include <linux/uprobes.h> 25 #include <linux/namei.h> 26 #include <linux/string.h> 27 #include <linux/rculist.h> 28 29 #include "trace_probe.h" 30 31 #define UPROBE_EVENT_SYSTEM "uprobes" 32 33 struct uprobe_trace_entry_head { 34 struct trace_entry ent; 35 unsigned long vaddr[]; 36 }; 37 38 #define SIZEOF_TRACE_ENTRY(is_return) \ 39 (sizeof(struct uprobe_trace_entry_head) + \ 40 sizeof(unsigned long) * (is_return ? 2 : 1)) 41 42 #define DATAOF_TRACE_ENTRY(entry, is_return) \ 43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) 44 45 struct trace_uprobe_filter { 46 rwlock_t rwlock; 47 int nr_systemwide; 48 struct list_head perf_events; 49 }; 50 51 /* 52 * uprobe event core functions 53 */ 54 struct trace_uprobe { 55 struct list_head list; 56 struct trace_uprobe_filter filter; 57 struct uprobe_consumer consumer; 58 struct inode *inode; 59 char *filename; 60 unsigned long offset; 61 unsigned long nhit; 62 struct trace_probe tp; 63 }; 64 65 #define SIZEOF_TRACE_UPROBE(n) \ 66 (offsetof(struct trace_uprobe, tp.args) + \ 67 (sizeof(struct probe_arg) * (n))) 68 69 static int register_uprobe_event(struct trace_uprobe *tu); 70 static int unregister_uprobe_event(struct trace_uprobe *tu); 71 72 static DEFINE_MUTEX(uprobe_lock); 73 static LIST_HEAD(uprobe_list); 74 75 struct uprobe_dispatch_data { 76 struct trace_uprobe *tu; 77 unsigned long bp_addr; 78 }; 79 80 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); 81 static int uretprobe_dispatcher(struct uprobe_consumer *con, 82 unsigned long func, struct pt_regs *regs); 83 84 #ifdef CONFIG_STACK_GROWSUP 85 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 86 { 87 return addr - (n * sizeof(long)); 88 } 89 #else 90 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 91 { 92 return addr + (n * sizeof(long)); 93 } 94 #endif 95 96 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) 97 { 98 unsigned long ret; 99 unsigned long addr = user_stack_pointer(regs); 100 101 addr = adjust_stack_addr(addr, n); 102 103 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) 104 return 0; 105 106 return ret; 107 } 108 109 /* 110 * Uprobes-specific fetch functions 111 */ 112 #define DEFINE_FETCH_stack(type) \ 113 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ 114 void *offset, void *dest) \ 115 { \ 116 *(type *)dest = (type)get_user_stack_nth(regs, \ 117 ((unsigned long)offset)); \ 118 } 119 DEFINE_BASIC_FETCH_FUNCS(stack) 120 /* No string on the stack entry */ 121 #define fetch_stack_string NULL 122 #define fetch_stack_string_size NULL 123 124 #define DEFINE_FETCH_memory(type) \ 125 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ 126 void *addr, void *dest) \ 127 { \ 128 type retval; \ 129 void __user *vaddr = (void __force __user *) addr; \ 130 \ 131 if (copy_from_user(&retval, vaddr, sizeof(type))) \ 132 *(type *)dest = 0; \ 133 else \ 134 *(type *) dest = retval; \ 135 } 136 DEFINE_BASIC_FETCH_FUNCS(memory) 137 /* 138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 139 * length and relative data location. 140 */ 141 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 142 void *addr, void *dest) 143 { 144 long ret; 145 u32 rloc = *(u32 *)dest; 146 int maxlen = get_rloc_len(rloc); 147 u8 *dst = get_rloc_data(dest); 148 void __user *src = (void __force __user *) addr; 149 150 if (!maxlen) 151 return; 152 153 ret = strncpy_from_user(dst, src, maxlen); 154 if (ret == maxlen) 155 dst[--ret] = '\0'; 156 157 if (ret < 0) { /* Failed to fetch string */ 158 ((u8 *)get_rloc_data(dest))[0] = '\0'; 159 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc)); 160 } else { 161 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc)); 162 } 163 } 164 165 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 166 void *addr, void *dest) 167 { 168 int len; 169 void __user *vaddr = (void __force __user *) addr; 170 171 len = strnlen_user(vaddr, MAX_STRING_SIZE); 172 173 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */ 174 *(u32 *)dest = 0; 175 else 176 *(u32 *)dest = len; 177 } 178 179 static unsigned long translate_user_vaddr(void *file_offset) 180 { 181 unsigned long base_addr; 182 struct uprobe_dispatch_data *udd; 183 184 udd = (void *) current->utask->vaddr; 185 186 base_addr = udd->bp_addr - udd->tu->offset; 187 return base_addr + (unsigned long)file_offset; 188 } 189 190 #define DEFINE_FETCH_file_offset(type) \ 191 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \ 192 void *offset, void *dest)\ 193 { \ 194 void *vaddr = (void *)translate_user_vaddr(offset); \ 195 \ 196 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \ 197 } 198 DEFINE_BASIC_FETCH_FUNCS(file_offset) 199 DEFINE_FETCH_file_offset(string) 200 DEFINE_FETCH_file_offset(string_size) 201 202 /* Fetch type information table */ 203 static const struct fetch_type uprobes_fetch_type_table[] = { 204 /* Special types */ 205 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, 206 sizeof(u32), 1, "__data_loc char[]"), 207 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, 208 string_size, sizeof(u32), 0, "u32"), 209 /* Basic types */ 210 ASSIGN_FETCH_TYPE(u8, u8, 0), 211 ASSIGN_FETCH_TYPE(u16, u16, 0), 212 ASSIGN_FETCH_TYPE(u32, u32, 0), 213 ASSIGN_FETCH_TYPE(u64, u64, 0), 214 ASSIGN_FETCH_TYPE(s8, u8, 1), 215 ASSIGN_FETCH_TYPE(s16, u16, 1), 216 ASSIGN_FETCH_TYPE(s32, u32, 1), 217 ASSIGN_FETCH_TYPE(s64, u64, 1), 218 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), 219 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), 220 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), 221 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), 222 223 ASSIGN_FETCH_TYPE_END 224 }; 225 226 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) 227 { 228 rwlock_init(&filter->rwlock); 229 filter->nr_systemwide = 0; 230 INIT_LIST_HEAD(&filter->perf_events); 231 } 232 233 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) 234 { 235 return !filter->nr_systemwide && list_empty(&filter->perf_events); 236 } 237 238 static inline bool is_ret_probe(struct trace_uprobe *tu) 239 { 240 return tu->consumer.ret_handler != NULL; 241 } 242 243 /* 244 * Allocate new trace_uprobe and initialize it (including uprobes). 245 */ 246 static struct trace_uprobe * 247 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) 248 { 249 struct trace_uprobe *tu; 250 251 if (!event || !is_good_name(event)) 252 return ERR_PTR(-EINVAL); 253 254 if (!group || !is_good_name(group)) 255 return ERR_PTR(-EINVAL); 256 257 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); 258 if (!tu) 259 return ERR_PTR(-ENOMEM); 260 261 tu->tp.call.class = &tu->tp.class; 262 tu->tp.call.name = kstrdup(event, GFP_KERNEL); 263 if (!tu->tp.call.name) 264 goto error; 265 266 tu->tp.class.system = kstrdup(group, GFP_KERNEL); 267 if (!tu->tp.class.system) 268 goto error; 269 270 INIT_LIST_HEAD(&tu->list); 271 INIT_LIST_HEAD(&tu->tp.files); 272 tu->consumer.handler = uprobe_dispatcher; 273 if (is_ret) 274 tu->consumer.ret_handler = uretprobe_dispatcher; 275 init_trace_uprobe_filter(&tu->filter); 276 return tu; 277 278 error: 279 kfree(tu->tp.call.name); 280 kfree(tu); 281 282 return ERR_PTR(-ENOMEM); 283 } 284 285 static void free_trace_uprobe(struct trace_uprobe *tu) 286 { 287 int i; 288 289 for (i = 0; i < tu->tp.nr_args; i++) 290 traceprobe_free_probe_arg(&tu->tp.args[i]); 291 292 iput(tu->inode); 293 kfree(tu->tp.call.class->system); 294 kfree(tu->tp.call.name); 295 kfree(tu->filename); 296 kfree(tu); 297 } 298 299 static struct trace_uprobe *find_probe_event(const char *event, const char *group) 300 { 301 struct trace_uprobe *tu; 302 303 list_for_each_entry(tu, &uprobe_list, list) 304 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 && 305 strcmp(tu->tp.call.class->system, group) == 0) 306 return tu; 307 308 return NULL; 309 } 310 311 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 312 static int unregister_trace_uprobe(struct trace_uprobe *tu) 313 { 314 int ret; 315 316 ret = unregister_uprobe_event(tu); 317 if (ret) 318 return ret; 319 320 list_del(&tu->list); 321 free_trace_uprobe(tu); 322 return 0; 323 } 324 325 /* Register a trace_uprobe and probe_event */ 326 static int register_trace_uprobe(struct trace_uprobe *tu) 327 { 328 struct trace_uprobe *old_tu; 329 int ret; 330 331 mutex_lock(&uprobe_lock); 332 333 /* register as an event */ 334 old_tu = find_probe_event(trace_event_name(&tu->tp.call), 335 tu->tp.call.class->system); 336 if (old_tu) { 337 /* delete old event */ 338 ret = unregister_trace_uprobe(old_tu); 339 if (ret) 340 goto end; 341 } 342 343 ret = register_uprobe_event(tu); 344 if (ret) { 345 pr_warn("Failed to register probe event(%d)\n", ret); 346 goto end; 347 } 348 349 list_add_tail(&tu->list, &uprobe_list); 350 351 end: 352 mutex_unlock(&uprobe_lock); 353 354 return ret; 355 } 356 357 /* 358 * Argument syntax: 359 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] 360 * 361 * - Remove uprobe: -:[GRP/]EVENT 362 */ 363 static int create_trace_uprobe(int argc, char **argv) 364 { 365 struct trace_uprobe *tu; 366 struct inode *inode; 367 char *arg, *event, *group, *filename; 368 char buf[MAX_EVENT_NAME_LEN]; 369 struct path path; 370 unsigned long offset; 371 bool is_delete, is_return; 372 int i, ret; 373 374 inode = NULL; 375 ret = 0; 376 is_delete = false; 377 is_return = false; 378 event = NULL; 379 group = NULL; 380 381 /* argc must be >= 1 */ 382 if (argv[0][0] == '-') 383 is_delete = true; 384 else if (argv[0][0] == 'r') 385 is_return = true; 386 else if (argv[0][0] != 'p') { 387 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n"); 388 return -EINVAL; 389 } 390 391 if (argv[0][1] == ':') { 392 event = &argv[0][2]; 393 arg = strchr(event, '/'); 394 395 if (arg) { 396 group = event; 397 event = arg + 1; 398 event[-1] = '\0'; 399 400 if (strlen(group) == 0) { 401 pr_info("Group name is not specified\n"); 402 return -EINVAL; 403 } 404 } 405 if (strlen(event) == 0) { 406 pr_info("Event name is not specified\n"); 407 return -EINVAL; 408 } 409 } 410 if (!group) 411 group = UPROBE_EVENT_SYSTEM; 412 413 if (is_delete) { 414 int ret; 415 416 if (!event) { 417 pr_info("Delete command needs an event name.\n"); 418 return -EINVAL; 419 } 420 mutex_lock(&uprobe_lock); 421 tu = find_probe_event(event, group); 422 423 if (!tu) { 424 mutex_unlock(&uprobe_lock); 425 pr_info("Event %s/%s doesn't exist.\n", group, event); 426 return -ENOENT; 427 } 428 /* delete an event */ 429 ret = unregister_trace_uprobe(tu); 430 mutex_unlock(&uprobe_lock); 431 return ret; 432 } 433 434 if (argc < 2) { 435 pr_info("Probe point is not specified.\n"); 436 return -EINVAL; 437 } 438 /* Find the last occurrence, in case the path contains ':' too. */ 439 arg = strrchr(argv[1], ':'); 440 if (!arg) { 441 ret = -EINVAL; 442 goto fail_address_parse; 443 } 444 445 *arg++ = '\0'; 446 filename = argv[1]; 447 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 448 if (ret) 449 goto fail_address_parse; 450 451 inode = igrab(d_inode(path.dentry)); 452 path_put(&path); 453 454 if (!inode || !S_ISREG(inode->i_mode)) { 455 ret = -EINVAL; 456 goto fail_address_parse; 457 } 458 459 ret = kstrtoul(arg, 0, &offset); 460 if (ret) 461 goto fail_address_parse; 462 463 argc -= 2; 464 argv += 2; 465 466 /* setup a probe */ 467 if (!event) { 468 char *tail; 469 char *ptr; 470 471 tail = kstrdup(kbasename(filename), GFP_KERNEL); 472 if (!tail) { 473 ret = -ENOMEM; 474 goto fail_address_parse; 475 } 476 477 ptr = strpbrk(tail, ".-_"); 478 if (ptr) 479 *ptr = '\0'; 480 481 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); 482 event = buf; 483 kfree(tail); 484 } 485 486 tu = alloc_trace_uprobe(group, event, argc, is_return); 487 if (IS_ERR(tu)) { 488 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); 489 ret = PTR_ERR(tu); 490 goto fail_address_parse; 491 } 492 tu->offset = offset; 493 tu->inode = inode; 494 tu->filename = kstrdup(filename, GFP_KERNEL); 495 496 if (!tu->filename) { 497 pr_info("Failed to allocate filename.\n"); 498 ret = -ENOMEM; 499 goto error; 500 } 501 502 /* parse arguments */ 503 ret = 0; 504 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 505 struct probe_arg *parg = &tu->tp.args[i]; 506 507 /* Increment count for freeing args in error case */ 508 tu->tp.nr_args++; 509 510 /* Parse argument name */ 511 arg = strchr(argv[i], '='); 512 if (arg) { 513 *arg++ = '\0'; 514 parg->name = kstrdup(argv[i], GFP_KERNEL); 515 } else { 516 arg = argv[i]; 517 /* If argument name is omitted, set "argN" */ 518 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); 519 parg->name = kstrdup(buf, GFP_KERNEL); 520 } 521 522 if (!parg->name) { 523 pr_info("Failed to allocate argument[%d] name.\n", i); 524 ret = -ENOMEM; 525 goto error; 526 } 527 528 if (!is_good_name(parg->name)) { 529 pr_info("Invalid argument[%d] name: %s\n", i, parg->name); 530 ret = -EINVAL; 531 goto error; 532 } 533 534 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) { 535 pr_info("Argument[%d] name '%s' conflicts with " 536 "another field.\n", i, argv[i]); 537 ret = -EINVAL; 538 goto error; 539 } 540 541 /* Parse fetch argument */ 542 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, 543 is_return, false, 544 uprobes_fetch_type_table); 545 if (ret) { 546 pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 547 goto error; 548 } 549 } 550 551 ret = register_trace_uprobe(tu); 552 if (ret) 553 goto error; 554 return 0; 555 556 error: 557 free_trace_uprobe(tu); 558 return ret; 559 560 fail_address_parse: 561 iput(inode); 562 563 pr_info("Failed to parse address or file.\n"); 564 565 return ret; 566 } 567 568 static int cleanup_all_probes(void) 569 { 570 struct trace_uprobe *tu; 571 int ret = 0; 572 573 mutex_lock(&uprobe_lock); 574 while (!list_empty(&uprobe_list)) { 575 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 576 ret = unregister_trace_uprobe(tu); 577 if (ret) 578 break; 579 } 580 mutex_unlock(&uprobe_lock); 581 return ret; 582 } 583 584 /* Probes listing interfaces */ 585 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 586 { 587 mutex_lock(&uprobe_lock); 588 return seq_list_start(&uprobe_list, *pos); 589 } 590 591 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 592 { 593 return seq_list_next(v, &uprobe_list, pos); 594 } 595 596 static void probes_seq_stop(struct seq_file *m, void *v) 597 { 598 mutex_unlock(&uprobe_lock); 599 } 600 601 static int probes_seq_show(struct seq_file *m, void *v) 602 { 603 struct trace_uprobe *tu = v; 604 char c = is_ret_probe(tu) ? 'r' : 'p'; 605 int i; 606 607 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, 608 trace_event_name(&tu->tp.call)); 609 seq_printf(m, " %s:", tu->filename); 610 611 /* Don't print "0x (null)" when offset is 0 */ 612 if (tu->offset) { 613 seq_printf(m, "0x%px", (void *)tu->offset); 614 } else { 615 switch (sizeof(void *)) { 616 case 4: 617 seq_printf(m, "0x00000000"); 618 break; 619 case 8: 620 default: 621 seq_printf(m, "0x0000000000000000"); 622 break; 623 } 624 } 625 626 for (i = 0; i < tu->tp.nr_args; i++) 627 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); 628 629 seq_putc(m, '\n'); 630 return 0; 631 } 632 633 static const struct seq_operations probes_seq_op = { 634 .start = probes_seq_start, 635 .next = probes_seq_next, 636 .stop = probes_seq_stop, 637 .show = probes_seq_show 638 }; 639 640 static int probes_open(struct inode *inode, struct file *file) 641 { 642 int ret; 643 644 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 645 ret = cleanup_all_probes(); 646 if (ret) 647 return ret; 648 } 649 650 return seq_open(file, &probes_seq_op); 651 } 652 653 static ssize_t probes_write(struct file *file, const char __user *buffer, 654 size_t count, loff_t *ppos) 655 { 656 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe); 657 } 658 659 static const struct file_operations uprobe_events_ops = { 660 .owner = THIS_MODULE, 661 .open = probes_open, 662 .read = seq_read, 663 .llseek = seq_lseek, 664 .release = seq_release, 665 .write = probes_write, 666 }; 667 668 /* Probes profiling interfaces */ 669 static int probes_profile_seq_show(struct seq_file *m, void *v) 670 { 671 struct trace_uprobe *tu = v; 672 673 seq_printf(m, " %s %-44s %15lu\n", tu->filename, 674 trace_event_name(&tu->tp.call), tu->nhit); 675 return 0; 676 } 677 678 static const struct seq_operations profile_seq_op = { 679 .start = probes_seq_start, 680 .next = probes_seq_next, 681 .stop = probes_seq_stop, 682 .show = probes_profile_seq_show 683 }; 684 685 static int profile_open(struct inode *inode, struct file *file) 686 { 687 return seq_open(file, &profile_seq_op); 688 } 689 690 static const struct file_operations uprobe_profile_ops = { 691 .owner = THIS_MODULE, 692 .open = profile_open, 693 .read = seq_read, 694 .llseek = seq_lseek, 695 .release = seq_release, 696 }; 697 698 struct uprobe_cpu_buffer { 699 struct mutex mutex; 700 void *buf; 701 }; 702 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; 703 static int uprobe_buffer_refcnt; 704 705 static int uprobe_buffer_init(void) 706 { 707 int cpu, err_cpu; 708 709 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); 710 if (uprobe_cpu_buffer == NULL) 711 return -ENOMEM; 712 713 for_each_possible_cpu(cpu) { 714 struct page *p = alloc_pages_node(cpu_to_node(cpu), 715 GFP_KERNEL, 0); 716 if (p == NULL) { 717 err_cpu = cpu; 718 goto err; 719 } 720 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); 721 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); 722 } 723 724 return 0; 725 726 err: 727 for_each_possible_cpu(cpu) { 728 if (cpu == err_cpu) 729 break; 730 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); 731 } 732 733 free_percpu(uprobe_cpu_buffer); 734 return -ENOMEM; 735 } 736 737 static int uprobe_buffer_enable(void) 738 { 739 int ret = 0; 740 741 BUG_ON(!mutex_is_locked(&event_mutex)); 742 743 if (uprobe_buffer_refcnt++ == 0) { 744 ret = uprobe_buffer_init(); 745 if (ret < 0) 746 uprobe_buffer_refcnt--; 747 } 748 749 return ret; 750 } 751 752 static void uprobe_buffer_disable(void) 753 { 754 int cpu; 755 756 BUG_ON(!mutex_is_locked(&event_mutex)); 757 758 if (--uprobe_buffer_refcnt == 0) { 759 for_each_possible_cpu(cpu) 760 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, 761 cpu)->buf); 762 763 free_percpu(uprobe_cpu_buffer); 764 uprobe_cpu_buffer = NULL; 765 } 766 } 767 768 static struct uprobe_cpu_buffer *uprobe_buffer_get(void) 769 { 770 struct uprobe_cpu_buffer *ucb; 771 int cpu; 772 773 cpu = raw_smp_processor_id(); 774 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); 775 776 /* 777 * Use per-cpu buffers for fastest access, but we might migrate 778 * so the mutex makes sure we have sole access to it. 779 */ 780 mutex_lock(&ucb->mutex); 781 782 return ucb; 783 } 784 785 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) 786 { 787 mutex_unlock(&ucb->mutex); 788 } 789 790 static void __uprobe_trace_func(struct trace_uprobe *tu, 791 unsigned long func, struct pt_regs *regs, 792 struct uprobe_cpu_buffer *ucb, int dsize, 793 struct trace_event_file *trace_file) 794 { 795 struct uprobe_trace_entry_head *entry; 796 struct ring_buffer_event *event; 797 struct ring_buffer *buffer; 798 void *data; 799 int size, esize; 800 struct trace_event_call *call = &tu->tp.call; 801 802 WARN_ON(call != trace_file->event_call); 803 804 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) 805 return; 806 807 if (trace_trigger_soft_disabled(trace_file)) 808 return; 809 810 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 811 size = esize + tu->tp.size + dsize; 812 event = trace_event_buffer_lock_reserve(&buffer, trace_file, 813 call->event.type, size, 0, 0); 814 if (!event) 815 return; 816 817 entry = ring_buffer_event_data(event); 818 if (is_ret_probe(tu)) { 819 entry->vaddr[0] = func; 820 entry->vaddr[1] = instruction_pointer(regs); 821 data = DATAOF_TRACE_ENTRY(entry, true); 822 } else { 823 entry->vaddr[0] = instruction_pointer(regs); 824 data = DATAOF_TRACE_ENTRY(entry, false); 825 } 826 827 memcpy(data, ucb->buf, tu->tp.size + dsize); 828 829 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); 830 } 831 832 /* uprobe handler */ 833 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, 834 struct uprobe_cpu_buffer *ucb, int dsize) 835 { 836 struct event_file_link *link; 837 838 if (is_ret_probe(tu)) 839 return 0; 840 841 rcu_read_lock(); 842 list_for_each_entry_rcu(link, &tu->tp.files, list) 843 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); 844 rcu_read_unlock(); 845 846 return 0; 847 } 848 849 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 850 struct pt_regs *regs, 851 struct uprobe_cpu_buffer *ucb, int dsize) 852 { 853 struct event_file_link *link; 854 855 rcu_read_lock(); 856 list_for_each_entry_rcu(link, &tu->tp.files, list) 857 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); 858 rcu_read_unlock(); 859 } 860 861 /* Event entry printers */ 862 static enum print_line_t 863 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) 864 { 865 struct uprobe_trace_entry_head *entry; 866 struct trace_seq *s = &iter->seq; 867 struct trace_uprobe *tu; 868 u8 *data; 869 int i; 870 871 entry = (struct uprobe_trace_entry_head *)iter->ent; 872 tu = container_of(event, struct trace_uprobe, tp.call.event); 873 874 if (is_ret_probe(tu)) { 875 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", 876 trace_event_name(&tu->tp.call), 877 entry->vaddr[1], entry->vaddr[0]); 878 data = DATAOF_TRACE_ENTRY(entry, true); 879 } else { 880 trace_seq_printf(s, "%s: (0x%lx)", 881 trace_event_name(&tu->tp.call), 882 entry->vaddr[0]); 883 data = DATAOF_TRACE_ENTRY(entry, false); 884 } 885 886 for (i = 0; i < tu->tp.nr_args; i++) { 887 struct probe_arg *parg = &tu->tp.args[i]; 888 889 if (!parg->type->print(s, parg->name, data + parg->offset, entry)) 890 goto out; 891 } 892 893 trace_seq_putc(s, '\n'); 894 895 out: 896 return trace_handle_return(s); 897 } 898 899 typedef bool (*filter_func_t)(struct uprobe_consumer *self, 900 enum uprobe_filter_ctx ctx, 901 struct mm_struct *mm); 902 903 static int 904 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, 905 filter_func_t filter) 906 { 907 bool enabled = trace_probe_is_enabled(&tu->tp); 908 struct event_file_link *link = NULL; 909 int ret; 910 911 if (file) { 912 if (tu->tp.flags & TP_FLAG_PROFILE) 913 return -EINTR; 914 915 link = kmalloc(sizeof(*link), GFP_KERNEL); 916 if (!link) 917 return -ENOMEM; 918 919 link->file = file; 920 list_add_tail_rcu(&link->list, &tu->tp.files); 921 922 tu->tp.flags |= TP_FLAG_TRACE; 923 } else { 924 if (tu->tp.flags & TP_FLAG_TRACE) 925 return -EINTR; 926 927 tu->tp.flags |= TP_FLAG_PROFILE; 928 } 929 930 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 931 932 if (enabled) 933 return 0; 934 935 ret = uprobe_buffer_enable(); 936 if (ret) 937 goto err_flags; 938 939 tu->consumer.filter = filter; 940 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 941 if (ret) 942 goto err_buffer; 943 944 return 0; 945 946 err_buffer: 947 uprobe_buffer_disable(); 948 949 err_flags: 950 if (file) { 951 list_del(&link->list); 952 kfree(link); 953 tu->tp.flags &= ~TP_FLAG_TRACE; 954 } else { 955 tu->tp.flags &= ~TP_FLAG_PROFILE; 956 } 957 return ret; 958 } 959 960 static void 961 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) 962 { 963 if (!trace_probe_is_enabled(&tu->tp)) 964 return; 965 966 if (file) { 967 struct event_file_link *link; 968 969 link = find_event_file_link(&tu->tp, file); 970 if (!link) 971 return; 972 973 list_del_rcu(&link->list); 974 /* synchronize with u{,ret}probe_trace_func */ 975 synchronize_sched(); 976 kfree(link); 977 978 if (!list_empty(&tu->tp.files)) 979 return; 980 } 981 982 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 983 984 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 985 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; 986 987 uprobe_buffer_disable(); 988 } 989 990 static int uprobe_event_define_fields(struct trace_event_call *event_call) 991 { 992 int ret, i, size; 993 struct uprobe_trace_entry_head field; 994 struct trace_uprobe *tu = event_call->data; 995 996 if (is_ret_probe(tu)) { 997 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); 998 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); 999 size = SIZEOF_TRACE_ENTRY(true); 1000 } else { 1001 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); 1002 size = SIZEOF_TRACE_ENTRY(false); 1003 } 1004 /* Set argument names as fields */ 1005 for (i = 0; i < tu->tp.nr_args; i++) { 1006 struct probe_arg *parg = &tu->tp.args[i]; 1007 1008 ret = trace_define_field(event_call, parg->type->fmttype, 1009 parg->name, size + parg->offset, 1010 parg->type->size, parg->type->is_signed, 1011 FILTER_OTHER); 1012 1013 if (ret) 1014 return ret; 1015 } 1016 return 0; 1017 } 1018 1019 #ifdef CONFIG_PERF_EVENTS 1020 static bool 1021 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) 1022 { 1023 struct perf_event *event; 1024 1025 if (filter->nr_systemwide) 1026 return true; 1027 1028 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { 1029 if (event->hw.target->mm == mm) 1030 return true; 1031 } 1032 1033 return false; 1034 } 1035 1036 static inline bool 1037 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) 1038 { 1039 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); 1040 } 1041 1042 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 1043 { 1044 bool done; 1045 1046 write_lock(&tu->filter.rwlock); 1047 if (event->hw.target) { 1048 list_del(&event->hw.tp_list); 1049 done = tu->filter.nr_systemwide || 1050 (event->hw.target->flags & PF_EXITING) || 1051 uprobe_filter_event(tu, event); 1052 } else { 1053 tu->filter.nr_systemwide--; 1054 done = tu->filter.nr_systemwide; 1055 } 1056 write_unlock(&tu->filter.rwlock); 1057 1058 if (!done) 1059 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); 1060 1061 return 0; 1062 } 1063 1064 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) 1065 { 1066 bool done; 1067 int err; 1068 1069 write_lock(&tu->filter.rwlock); 1070 if (event->hw.target) { 1071 /* 1072 * event->parent != NULL means copy_process(), we can avoid 1073 * uprobe_apply(). current->mm must be probed and we can rely 1074 * on dup_mmap() which preserves the already installed bp's. 1075 * 1076 * attr.enable_on_exec means that exec/mmap will install the 1077 * breakpoints we need. 1078 */ 1079 done = tu->filter.nr_systemwide || 1080 event->parent || event->attr.enable_on_exec || 1081 uprobe_filter_event(tu, event); 1082 list_add(&event->hw.tp_list, &tu->filter.perf_events); 1083 } else { 1084 done = tu->filter.nr_systemwide; 1085 tu->filter.nr_systemwide++; 1086 } 1087 write_unlock(&tu->filter.rwlock); 1088 1089 err = 0; 1090 if (!done) { 1091 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); 1092 if (err) 1093 uprobe_perf_close(tu, event); 1094 } 1095 return err; 1096 } 1097 1098 static bool uprobe_perf_filter(struct uprobe_consumer *uc, 1099 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 1100 { 1101 struct trace_uprobe *tu; 1102 int ret; 1103 1104 tu = container_of(uc, struct trace_uprobe, consumer); 1105 read_lock(&tu->filter.rwlock); 1106 ret = __uprobe_perf_filter(&tu->filter, mm); 1107 read_unlock(&tu->filter.rwlock); 1108 1109 return ret; 1110 } 1111 1112 static void __uprobe_perf_func(struct trace_uprobe *tu, 1113 unsigned long func, struct pt_regs *regs, 1114 struct uprobe_cpu_buffer *ucb, int dsize) 1115 { 1116 struct trace_event_call *call = &tu->tp.call; 1117 struct uprobe_trace_entry_head *entry; 1118 struct hlist_head *head; 1119 void *data; 1120 int size, esize; 1121 int rctx; 1122 1123 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) 1124 return; 1125 1126 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1127 1128 size = esize + tu->tp.size + dsize; 1129 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1130 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1131 return; 1132 1133 preempt_disable(); 1134 head = this_cpu_ptr(call->perf_events); 1135 if (hlist_empty(head)) 1136 goto out; 1137 1138 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1139 if (!entry) 1140 goto out; 1141 1142 if (is_ret_probe(tu)) { 1143 entry->vaddr[0] = func; 1144 entry->vaddr[1] = instruction_pointer(regs); 1145 data = DATAOF_TRACE_ENTRY(entry, true); 1146 } else { 1147 entry->vaddr[0] = instruction_pointer(regs); 1148 data = DATAOF_TRACE_ENTRY(entry, false); 1149 } 1150 1151 memcpy(data, ucb->buf, tu->tp.size + dsize); 1152 1153 if (size - esize > tu->tp.size + dsize) { 1154 int len = tu->tp.size + dsize; 1155 1156 memset(data + len, 0, size - esize - len); 1157 } 1158 1159 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1160 head, NULL); 1161 out: 1162 preempt_enable(); 1163 } 1164 1165 /* uprobe profile handler */ 1166 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, 1167 struct uprobe_cpu_buffer *ucb, int dsize) 1168 { 1169 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1170 return UPROBE_HANDLER_REMOVE; 1171 1172 if (!is_ret_probe(tu)) 1173 __uprobe_perf_func(tu, 0, regs, ucb, dsize); 1174 return 0; 1175 } 1176 1177 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1178 struct pt_regs *regs, 1179 struct uprobe_cpu_buffer *ucb, int dsize) 1180 { 1181 __uprobe_perf_func(tu, func, regs, ucb, dsize); 1182 } 1183 #endif /* CONFIG_PERF_EVENTS */ 1184 1185 static int 1186 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, 1187 void *data) 1188 { 1189 struct trace_uprobe *tu = event->data; 1190 struct trace_event_file *file = data; 1191 1192 switch (type) { 1193 case TRACE_REG_REGISTER: 1194 return probe_event_enable(tu, file, NULL); 1195 1196 case TRACE_REG_UNREGISTER: 1197 probe_event_disable(tu, file); 1198 return 0; 1199 1200 #ifdef CONFIG_PERF_EVENTS 1201 case TRACE_REG_PERF_REGISTER: 1202 return probe_event_enable(tu, NULL, uprobe_perf_filter); 1203 1204 case TRACE_REG_PERF_UNREGISTER: 1205 probe_event_disable(tu, NULL); 1206 return 0; 1207 1208 case TRACE_REG_PERF_OPEN: 1209 return uprobe_perf_open(tu, data); 1210 1211 case TRACE_REG_PERF_CLOSE: 1212 return uprobe_perf_close(tu, data); 1213 1214 #endif 1215 default: 1216 return 0; 1217 } 1218 return 0; 1219 } 1220 1221 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) 1222 { 1223 struct trace_uprobe *tu; 1224 struct uprobe_dispatch_data udd; 1225 struct uprobe_cpu_buffer *ucb; 1226 int dsize, esize; 1227 int ret = 0; 1228 1229 1230 tu = container_of(con, struct trace_uprobe, consumer); 1231 tu->nhit++; 1232 1233 udd.tu = tu; 1234 udd.bp_addr = instruction_pointer(regs); 1235 1236 current->utask->vaddr = (unsigned long) &udd; 1237 1238 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1239 return 0; 1240 1241 dsize = __get_data_size(&tu->tp, regs); 1242 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1243 1244 ucb = uprobe_buffer_get(); 1245 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1246 1247 if (tu->tp.flags & TP_FLAG_TRACE) 1248 ret |= uprobe_trace_func(tu, regs, ucb, dsize); 1249 1250 #ifdef CONFIG_PERF_EVENTS 1251 if (tu->tp.flags & TP_FLAG_PROFILE) 1252 ret |= uprobe_perf_func(tu, regs, ucb, dsize); 1253 #endif 1254 uprobe_buffer_put(ucb); 1255 return ret; 1256 } 1257 1258 static int uretprobe_dispatcher(struct uprobe_consumer *con, 1259 unsigned long func, struct pt_regs *regs) 1260 { 1261 struct trace_uprobe *tu; 1262 struct uprobe_dispatch_data udd; 1263 struct uprobe_cpu_buffer *ucb; 1264 int dsize, esize; 1265 1266 tu = container_of(con, struct trace_uprobe, consumer); 1267 1268 udd.tu = tu; 1269 udd.bp_addr = func; 1270 1271 current->utask->vaddr = (unsigned long) &udd; 1272 1273 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1274 return 0; 1275 1276 dsize = __get_data_size(&tu->tp, regs); 1277 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1278 1279 ucb = uprobe_buffer_get(); 1280 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 1281 1282 if (tu->tp.flags & TP_FLAG_TRACE) 1283 uretprobe_trace_func(tu, func, regs, ucb, dsize); 1284 1285 #ifdef CONFIG_PERF_EVENTS 1286 if (tu->tp.flags & TP_FLAG_PROFILE) 1287 uretprobe_perf_func(tu, func, regs, ucb, dsize); 1288 #endif 1289 uprobe_buffer_put(ucb); 1290 return 0; 1291 } 1292 1293 static struct trace_event_functions uprobe_funcs = { 1294 .trace = print_uprobe_event 1295 }; 1296 1297 static inline void init_trace_event_call(struct trace_uprobe *tu, 1298 struct trace_event_call *call) 1299 { 1300 INIT_LIST_HEAD(&call->class->fields); 1301 call->event.funcs = &uprobe_funcs; 1302 call->class->define_fields = uprobe_event_define_fields; 1303 1304 call->flags = TRACE_EVENT_FL_UPROBE; 1305 call->class->reg = trace_uprobe_register; 1306 call->data = tu; 1307 } 1308 1309 static int register_uprobe_event(struct trace_uprobe *tu) 1310 { 1311 struct trace_event_call *call = &tu->tp.call; 1312 int ret = 0; 1313 1314 init_trace_event_call(tu, call); 1315 1316 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) 1317 return -ENOMEM; 1318 1319 ret = register_trace_event(&call->event); 1320 if (!ret) { 1321 kfree(call->print_fmt); 1322 return -ENODEV; 1323 } 1324 1325 ret = trace_add_event_call(call); 1326 1327 if (ret) { 1328 pr_info("Failed to register uprobe event: %s\n", 1329 trace_event_name(call)); 1330 kfree(call->print_fmt); 1331 unregister_trace_event(&call->event); 1332 } 1333 1334 return ret; 1335 } 1336 1337 static int unregister_uprobe_event(struct trace_uprobe *tu) 1338 { 1339 int ret; 1340 1341 /* tu->event is unregistered in trace_remove_event_call() */ 1342 ret = trace_remove_event_call(&tu->tp.call); 1343 if (ret) 1344 return ret; 1345 kfree(tu->tp.call.print_fmt); 1346 tu->tp.call.print_fmt = NULL; 1347 return 0; 1348 } 1349 1350 #ifdef CONFIG_PERF_EVENTS 1351 struct trace_event_call * 1352 create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) 1353 { 1354 struct trace_uprobe *tu; 1355 struct inode *inode; 1356 struct path path; 1357 int ret; 1358 1359 ret = kern_path(name, LOOKUP_FOLLOW, &path); 1360 if (ret) 1361 return ERR_PTR(ret); 1362 1363 inode = igrab(d_inode(path.dentry)); 1364 path_put(&path); 1365 1366 if (!inode || !S_ISREG(inode->i_mode)) { 1367 iput(inode); 1368 return ERR_PTR(-EINVAL); 1369 } 1370 1371 /* 1372 * local trace_kprobes are not added to probe_list, so they are never 1373 * searched in find_trace_kprobe(). Therefore, there is no concern of 1374 * duplicated name "DUMMY_EVENT" here. 1375 */ 1376 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0, 1377 is_return); 1378 1379 if (IS_ERR(tu)) { 1380 pr_info("Failed to allocate trace_uprobe.(%d)\n", 1381 (int)PTR_ERR(tu)); 1382 return ERR_CAST(tu); 1383 } 1384 1385 tu->offset = offs; 1386 tu->inode = inode; 1387 tu->filename = kstrdup(name, GFP_KERNEL); 1388 init_trace_event_call(tu, &tu->tp.call); 1389 1390 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) { 1391 ret = -ENOMEM; 1392 goto error; 1393 } 1394 1395 return &tu->tp.call; 1396 error: 1397 free_trace_uprobe(tu); 1398 return ERR_PTR(ret); 1399 } 1400 1401 void destroy_local_trace_uprobe(struct trace_event_call *event_call) 1402 { 1403 struct trace_uprobe *tu; 1404 1405 tu = container_of(event_call, struct trace_uprobe, tp.call); 1406 1407 kfree(tu->tp.call.print_fmt); 1408 tu->tp.call.print_fmt = NULL; 1409 1410 free_trace_uprobe(tu); 1411 } 1412 #endif /* CONFIG_PERF_EVENTS */ 1413 1414 /* Make a trace interface for controling probe points */ 1415 static __init int init_uprobe_trace(void) 1416 { 1417 struct dentry *d_tracer; 1418 1419 d_tracer = tracing_init_dentry(); 1420 if (IS_ERR(d_tracer)) 1421 return 0; 1422 1423 trace_create_file("uprobe_events", 0644, d_tracer, 1424 NULL, &uprobe_events_ops); 1425 /* Profile interface */ 1426 trace_create_file("uprobe_profile", 0444, d_tracer, 1427 NULL, &uprobe_profile_ops); 1428 return 0; 1429 } 1430 1431 fs_initcall(init_uprobe_trace); 1432