1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * uprobes-based tracing events 4 * 5 * Copyright (C) IBM Corporation, 2010-2012 6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 7 */ 8 #define pr_fmt(fmt) "trace_uprobe: " fmt 9 10 #include <linux/bpf-cgroup.h> 11 #include <linux/security.h> 12 #include <linux/ctype.h> 13 #include <linux/module.h> 14 #include <linux/uaccess.h> 15 #include <linux/uprobes.h> 16 #include <linux/namei.h> 17 #include <linux/string.h> 18 #include <linux/rculist.h> 19 #include <linux/filter.h> 20 #include <linux/percpu.h> 21 22 #include "trace_dynevent.h" 23 #include "trace_probe.h" 24 #include "trace_probe_tmpl.h" 25 26 #define UPROBE_EVENT_SYSTEM "uprobes" 27 28 struct uprobe_trace_entry_head { 29 struct trace_entry ent; 30 unsigned long vaddr[]; 31 }; 32 33 #define SIZEOF_TRACE_ENTRY(is_return) \ 34 (sizeof(struct uprobe_trace_entry_head) + \ 35 sizeof(unsigned long) * (is_return ? 2 : 1)) 36 37 #define DATAOF_TRACE_ENTRY(entry, is_return) \ 38 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) 39 40 static int trace_uprobe_create(const char *raw_command); 41 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev); 42 static int trace_uprobe_release(struct dyn_event *ev); 43 static bool trace_uprobe_is_busy(struct dyn_event *ev); 44 static bool trace_uprobe_match(const char *system, const char *event, 45 int argc, const char **argv, struct dyn_event *ev); 46 47 static struct dyn_event_operations trace_uprobe_ops = { 48 .create = trace_uprobe_create, 49 .show = trace_uprobe_show, 50 .is_busy = trace_uprobe_is_busy, 51 .free = trace_uprobe_release, 52 .match = trace_uprobe_match, 53 }; 54 55 /* 56 * uprobe event core functions 57 */ 58 struct trace_uprobe { 59 struct dyn_event devent; 60 struct uprobe_consumer consumer; 61 struct path path; 62 char *filename; 63 struct uprobe *uprobe; 64 unsigned long offset; 65 unsigned long ref_ctr_offset; 66 unsigned long __percpu *nhits; 67 struct trace_probe tp; 68 }; 69 70 static bool is_trace_uprobe(struct dyn_event *ev) 71 { 72 return ev->ops == &trace_uprobe_ops; 73 } 74 75 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) 76 { 77 return container_of(ev, struct trace_uprobe, devent); 78 } 79 80 /** 81 * for_each_trace_uprobe - iterate over the trace_uprobe list 82 * @pos: the struct trace_uprobe * for each entry 83 * @dpos: the struct dyn_event * to use as a loop cursor 84 */ 85 #define for_each_trace_uprobe(pos, dpos) \ 86 for_each_dyn_event(dpos) \ 87 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos))) 88 89 static int register_uprobe_event(struct trace_uprobe *tu); 90 static int unregister_uprobe_event(struct trace_uprobe *tu); 91 92 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); 93 static int uretprobe_dispatcher(struct uprobe_consumer *con, 94 unsigned long func, struct pt_regs *regs); 95 96 #ifdef CONFIG_STACK_GROWSUP 97 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 98 { 99 return addr - (n * sizeof(long)); 100 } 101 #else 102 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) 103 { 104 return addr + (n * sizeof(long)); 105 } 106 #endif 107 108 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) 109 { 110 unsigned long ret; 111 unsigned long addr = user_stack_pointer(regs); 112 113 addr = adjust_stack_addr(addr, n); 114 115 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) 116 return 0; 117 118 return ret; 119 } 120 121 /* 122 * Uprobes-specific fetch functions 123 */ 124 static nokprobe_inline int 125 probe_mem_read(void *dest, void *src, size_t size) 126 { 127 void __user *vaddr = (void __force __user *)src; 128 129 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0; 130 } 131 132 static nokprobe_inline int 133 probe_mem_read_user(void *dest, void *src, size_t size) 134 { 135 return probe_mem_read(dest, src, size); 136 } 137 138 /* 139 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 140 * length and relative data location. 141 */ 142 static nokprobe_inline int 143 fetch_store_string(unsigned long addr, void *dest, void *base) 144 { 145 long ret; 146 u32 loc = *(u32 *)dest; 147 int maxlen = get_loc_len(loc); 148 u8 *dst = get_loc_data(dest, base); 149 void __user *src = (void __force __user *) addr; 150 151 if (unlikely(!maxlen)) 152 return -ENOMEM; 153 154 if (addr == FETCH_TOKEN_COMM) 155 ret = strscpy(dst, current->comm, maxlen); 156 else 157 ret = strncpy_from_user(dst, src, maxlen); 158 if (ret >= 0) { 159 if (ret == maxlen) 160 dst[ret - 1] = '\0'; 161 else 162 /* 163 * Include the terminating null byte. In this case it 164 * was copied by strncpy_from_user but not accounted 165 * for in ret. 166 */ 167 ret++; 168 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); 169 } else 170 *(u32 *)dest = make_data_loc(0, (void *)dst - base); 171 172 return ret; 173 } 174 175 static nokprobe_inline int 176 fetch_store_string_user(unsigned long addr, void *dest, void *base) 177 { 178 return fetch_store_string(addr, dest, base); 179 } 180 181 /* Return the length of string -- including null terminal byte */ 182 static nokprobe_inline int 183 fetch_store_strlen(unsigned long addr) 184 { 185 int len; 186 void __user *vaddr = (void __force __user *) addr; 187 188 if (addr == FETCH_TOKEN_COMM) 189 len = strlen(current->comm) + 1; 190 else 191 len = strnlen_user(vaddr, MAX_STRING_SIZE); 192 193 return (len > MAX_STRING_SIZE) ? 0 : len; 194 } 195 196 static nokprobe_inline int 197 fetch_store_strlen_user(unsigned long addr) 198 { 199 return fetch_store_strlen(addr); 200 } 201 202 static unsigned long translate_user_vaddr(unsigned long file_offset) 203 { 204 unsigned long base_addr; 205 struct uprobe_dispatch_data *udd; 206 207 udd = (void *) current->utask->vaddr; 208 209 base_addr = udd->bp_addr - udd->tu->offset; 210 return base_addr + file_offset; 211 } 212 213 /* Note that we don't verify it, since the code does not come from user space */ 214 static int 215 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, 216 void *dest, void *base) 217 { 218 struct pt_regs *regs = rec; 219 unsigned long val; 220 int ret; 221 222 /* 1st stage: get value from context */ 223 switch (code->op) { 224 case FETCH_OP_REG: 225 val = regs_get_register(regs, code->param); 226 break; 227 case FETCH_OP_STACK: 228 val = get_user_stack_nth(regs, code->param); 229 break; 230 case FETCH_OP_STACKP: 231 val = user_stack_pointer(regs); 232 break; 233 case FETCH_OP_RETVAL: 234 val = regs_return_value(regs); 235 break; 236 case FETCH_OP_COMM: 237 val = FETCH_TOKEN_COMM; 238 break; 239 case FETCH_OP_FOFFS: 240 val = translate_user_vaddr(code->immediate); 241 break; 242 default: 243 ret = process_common_fetch_insn(code, &val); 244 if (ret < 0) 245 return ret; 246 } 247 code++; 248 249 return process_fetch_insn_bottom(code, val, dest, base); 250 } 251 NOKPROBE_SYMBOL(process_fetch_insn) 252 253 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) 254 { 255 rwlock_init(&filter->rwlock); 256 filter->nr_systemwide = 0; 257 INIT_LIST_HEAD(&filter->perf_events); 258 } 259 260 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) 261 { 262 return !filter->nr_systemwide && list_empty(&filter->perf_events); 263 } 264 265 static inline bool is_ret_probe(struct trace_uprobe *tu) 266 { 267 return tu->consumer.ret_handler != NULL; 268 } 269 270 static bool trace_uprobe_is_busy(struct dyn_event *ev) 271 { 272 struct trace_uprobe *tu = to_trace_uprobe(ev); 273 274 return trace_probe_is_enabled(&tu->tp); 275 } 276 277 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu, 278 int argc, const char **argv) 279 { 280 char buf[MAX_ARGSTR_LEN + 1]; 281 int len; 282 283 if (!argc) 284 return true; 285 286 len = strlen(tu->filename); 287 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':') 288 return false; 289 290 if (tu->ref_ctr_offset == 0) 291 snprintf(buf, sizeof(buf), "0x%0*lx", 292 (int)(sizeof(void *) * 2), tu->offset); 293 else 294 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)", 295 (int)(sizeof(void *) * 2), tu->offset, 296 tu->ref_ctr_offset); 297 if (strcmp(buf, &argv[0][len + 1])) 298 return false; 299 300 argc--; argv++; 301 302 return trace_probe_match_command_args(&tu->tp, argc, argv); 303 } 304 305 static bool trace_uprobe_match(const char *system, const char *event, 306 int argc, const char **argv, struct dyn_event *ev) 307 { 308 struct trace_uprobe *tu = to_trace_uprobe(ev); 309 310 return (event[0] == '\0' || 311 strcmp(trace_probe_name(&tu->tp), event) == 0) && 312 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) && 313 trace_uprobe_match_command_head(tu, argc, argv); 314 } 315 316 static nokprobe_inline struct trace_uprobe * 317 trace_uprobe_primary_from_call(struct trace_event_call *call) 318 { 319 struct trace_probe *tp; 320 321 tp = trace_probe_primary_from_call(call); 322 if (WARN_ON_ONCE(!tp)) 323 return NULL; 324 325 return container_of(tp, struct trace_uprobe, tp); 326 } 327 328 /* 329 * Allocate new trace_uprobe and initialize it (including uprobes). 330 */ 331 static struct trace_uprobe * 332 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) 333 { 334 struct trace_uprobe *tu; 335 int ret; 336 337 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); 338 if (!tu) 339 return ERR_PTR(-ENOMEM); 340 341 tu->nhits = alloc_percpu(unsigned long); 342 if (!tu->nhits) { 343 ret = -ENOMEM; 344 goto error; 345 } 346 347 ret = trace_probe_init(&tu->tp, event, group, true, nargs); 348 if (ret < 0) 349 goto error; 350 351 dyn_event_init(&tu->devent, &trace_uprobe_ops); 352 tu->consumer.handler = uprobe_dispatcher; 353 if (is_ret) 354 tu->consumer.ret_handler = uretprobe_dispatcher; 355 init_trace_uprobe_filter(tu->tp.event->filter); 356 return tu; 357 358 error: 359 free_percpu(tu->nhits); 360 kfree(tu); 361 362 return ERR_PTR(ret); 363 } 364 365 static void free_trace_uprobe(struct trace_uprobe *tu) 366 { 367 if (!tu) 368 return; 369 370 path_put(&tu->path); 371 trace_probe_cleanup(&tu->tp); 372 kfree(tu->filename); 373 free_percpu(tu->nhits); 374 kfree(tu); 375 } 376 377 static struct trace_uprobe *find_probe_event(const char *event, const char *group) 378 { 379 struct dyn_event *pos; 380 struct trace_uprobe *tu; 381 382 for_each_trace_uprobe(tu, pos) 383 if (strcmp(trace_probe_name(&tu->tp), event) == 0 && 384 strcmp(trace_probe_group_name(&tu->tp), group) == 0) 385 return tu; 386 387 return NULL; 388 } 389 390 /* Unregister a trace_uprobe and probe_event */ 391 static int unregister_trace_uprobe(struct trace_uprobe *tu) 392 { 393 int ret; 394 395 if (trace_probe_has_sibling(&tu->tp)) 396 goto unreg; 397 398 /* If there's a reference to the dynamic event */ 399 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp))) 400 return -EBUSY; 401 402 ret = unregister_uprobe_event(tu); 403 if (ret) 404 return ret; 405 406 unreg: 407 dyn_event_remove(&tu->devent); 408 trace_probe_unlink(&tu->tp); 409 free_trace_uprobe(tu); 410 return 0; 411 } 412 413 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig, 414 struct trace_uprobe *comp) 415 { 416 struct trace_probe_event *tpe = orig->tp.event; 417 struct inode *comp_inode = d_real_inode(comp->path.dentry); 418 int i; 419 420 list_for_each_entry(orig, &tpe->probes, tp.list) { 421 if (comp_inode != d_real_inode(orig->path.dentry) || 422 comp->offset != orig->offset) 423 continue; 424 425 /* 426 * trace_probe_compare_arg_type() ensured that nr_args and 427 * each argument name and type are same. Let's compare comm. 428 */ 429 for (i = 0; i < orig->tp.nr_args; i++) { 430 if (strcmp(orig->tp.args[i].comm, 431 comp->tp.args[i].comm)) 432 break; 433 } 434 435 if (i == orig->tp.nr_args) 436 return true; 437 } 438 439 return false; 440 } 441 442 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to) 443 { 444 int ret; 445 446 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp); 447 if (ret) { 448 /* Note that argument starts index = 2 */ 449 trace_probe_log_set_index(ret + 1); 450 trace_probe_log_err(0, DIFF_ARG_TYPE); 451 return -EEXIST; 452 } 453 if (trace_uprobe_has_same_uprobe(to, tu)) { 454 trace_probe_log_set_index(0); 455 trace_probe_log_err(0, SAME_PROBE); 456 return -EEXIST; 457 } 458 459 /* Append to existing event */ 460 ret = trace_probe_append(&tu->tp, &to->tp); 461 if (!ret) 462 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); 463 464 return ret; 465 } 466 467 /* 468 * Uprobe with multiple reference counter is not allowed. i.e. 469 * If inode and offset matches, reference counter offset *must* 470 * match as well. Though, there is one exception: If user is 471 * replacing old trace_uprobe with new one(same group/event), 472 * then we allow same uprobe with new reference counter as far 473 * as the new one does not conflict with any other existing 474 * ones. 475 */ 476 static int validate_ref_ctr_offset(struct trace_uprobe *new) 477 { 478 struct dyn_event *pos; 479 struct trace_uprobe *tmp; 480 struct inode *new_inode = d_real_inode(new->path.dentry); 481 482 for_each_trace_uprobe(tmp, pos) { 483 if (new_inode == d_real_inode(tmp->path.dentry) && 484 new->offset == tmp->offset && 485 new->ref_ctr_offset != tmp->ref_ctr_offset) { 486 pr_warn("Reference counter offset mismatch."); 487 return -EINVAL; 488 } 489 } 490 return 0; 491 } 492 493 /* Register a trace_uprobe and probe_event */ 494 static int register_trace_uprobe(struct trace_uprobe *tu) 495 { 496 struct trace_uprobe *old_tu; 497 int ret; 498 499 mutex_lock(&event_mutex); 500 501 ret = validate_ref_ctr_offset(tu); 502 if (ret) 503 goto end; 504 505 /* register as an event */ 506 old_tu = find_probe_event(trace_probe_name(&tu->tp), 507 trace_probe_group_name(&tu->tp)); 508 if (old_tu) { 509 if (is_ret_probe(tu) != is_ret_probe(old_tu)) { 510 trace_probe_log_set_index(0); 511 trace_probe_log_err(0, DIFF_PROBE_TYPE); 512 ret = -EEXIST; 513 } else { 514 ret = append_trace_uprobe(tu, old_tu); 515 } 516 goto end; 517 } 518 519 ret = register_uprobe_event(tu); 520 if (ret) { 521 if (ret == -EEXIST) { 522 trace_probe_log_set_index(0); 523 trace_probe_log_err(0, EVENT_EXIST); 524 } else 525 pr_warn("Failed to register probe event(%d)\n", ret); 526 goto end; 527 } 528 529 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); 530 531 end: 532 mutex_unlock(&event_mutex); 533 534 return ret; 535 } 536 537 /* 538 * Argument syntax: 539 * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS] 540 */ 541 static int __trace_uprobe_create(int argc, const char **argv) 542 { 543 struct trace_uprobe *tu; 544 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM; 545 char *arg, *filename, *rctr, *rctr_end, *tmp; 546 char buf[MAX_EVENT_NAME_LEN]; 547 char gbuf[MAX_EVENT_NAME_LEN]; 548 enum probe_print_type ptype; 549 struct path path; 550 unsigned long offset, ref_ctr_offset; 551 bool is_return = false; 552 int i, ret; 553 554 ref_ctr_offset = 0; 555 556 switch (argv[0][0]) { 557 case 'r': 558 is_return = true; 559 break; 560 case 'p': 561 break; 562 default: 563 return -ECANCELED; 564 } 565 566 if (argc < 2) 567 return -ECANCELED; 568 if (argc - 2 > MAX_TRACE_ARGS) 569 return -E2BIG; 570 571 if (argv[0][1] == ':') 572 event = &argv[0][2]; 573 574 if (!strchr(argv[1], '/')) 575 return -ECANCELED; 576 577 filename = kstrdup(argv[1], GFP_KERNEL); 578 if (!filename) 579 return -ENOMEM; 580 581 /* Find the last occurrence, in case the path contains ':' too. */ 582 arg = strrchr(filename, ':'); 583 if (!arg || !isdigit(arg[1])) { 584 kfree(filename); 585 return -ECANCELED; 586 } 587 588 trace_probe_log_init("trace_uprobe", argc, argv); 589 trace_probe_log_set_index(1); /* filename is the 2nd argument */ 590 591 *arg++ = '\0'; 592 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 593 if (ret) { 594 trace_probe_log_err(0, FILE_NOT_FOUND); 595 kfree(filename); 596 trace_probe_log_clear(); 597 return ret; 598 } 599 if (!d_is_reg(path.dentry)) { 600 trace_probe_log_err(0, NO_REGULAR_FILE); 601 ret = -EINVAL; 602 goto fail_address_parse; 603 } 604 605 /* Parse reference counter offset if specified. */ 606 rctr = strchr(arg, '('); 607 if (rctr) { 608 rctr_end = strchr(rctr, ')'); 609 if (!rctr_end) { 610 ret = -EINVAL; 611 rctr_end = rctr + strlen(rctr); 612 trace_probe_log_err(rctr_end - filename, 613 REFCNT_OPEN_BRACE); 614 goto fail_address_parse; 615 } else if (rctr_end[1] != '\0') { 616 ret = -EINVAL; 617 trace_probe_log_err(rctr_end + 1 - filename, 618 BAD_REFCNT_SUFFIX); 619 goto fail_address_parse; 620 } 621 622 *rctr++ = '\0'; 623 *rctr_end = '\0'; 624 ret = kstrtoul(rctr, 0, &ref_ctr_offset); 625 if (ret) { 626 trace_probe_log_err(rctr - filename, BAD_REFCNT); 627 goto fail_address_parse; 628 } 629 } 630 631 /* Check if there is %return suffix */ 632 tmp = strchr(arg, '%'); 633 if (tmp) { 634 if (!strcmp(tmp, "%return")) { 635 *tmp = '\0'; 636 is_return = true; 637 } else { 638 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX); 639 ret = -EINVAL; 640 goto fail_address_parse; 641 } 642 } 643 644 /* Parse uprobe offset. */ 645 ret = kstrtoul(arg, 0, &offset); 646 if (ret) { 647 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS); 648 goto fail_address_parse; 649 } 650 651 /* setup a probe */ 652 trace_probe_log_set_index(0); 653 if (event) { 654 ret = traceprobe_parse_event_name(&event, &group, gbuf, 655 event - argv[0]); 656 if (ret) 657 goto fail_address_parse; 658 } 659 660 if (!event) { 661 char *tail; 662 char *ptr; 663 664 tail = kstrdup(kbasename(filename), GFP_KERNEL); 665 if (!tail) { 666 ret = -ENOMEM; 667 goto fail_address_parse; 668 } 669 670 ptr = strpbrk(tail, ".-_"); 671 if (ptr) 672 *ptr = '\0'; 673 674 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); 675 event = buf; 676 kfree(tail); 677 } 678 679 argc -= 2; 680 argv += 2; 681 682 tu = alloc_trace_uprobe(group, event, argc, is_return); 683 if (IS_ERR(tu)) { 684 ret = PTR_ERR(tu); 685 /* This must return -ENOMEM otherwise there is a bug */ 686 WARN_ON_ONCE(ret != -ENOMEM); 687 goto fail_address_parse; 688 } 689 tu->offset = offset; 690 tu->ref_ctr_offset = ref_ctr_offset; 691 tu->path = path; 692 tu->filename = filename; 693 694 /* parse arguments */ 695 for (i = 0; i < argc; i++) { 696 struct traceprobe_parse_context ctx = { 697 .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER, 698 }; 699 700 trace_probe_log_set_index(i + 2); 701 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx); 702 traceprobe_finish_parse(&ctx); 703 if (ret) 704 goto error; 705 } 706 707 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; 708 ret = traceprobe_set_print_fmt(&tu->tp, ptype); 709 if (ret < 0) 710 goto error; 711 712 ret = register_trace_uprobe(tu); 713 if (!ret) 714 goto out; 715 716 error: 717 free_trace_uprobe(tu); 718 out: 719 trace_probe_log_clear(); 720 return ret; 721 722 fail_address_parse: 723 trace_probe_log_clear(); 724 path_put(&path); 725 kfree(filename); 726 727 return ret; 728 } 729 730 int trace_uprobe_create(const char *raw_command) 731 { 732 return trace_probe_create(raw_command, __trace_uprobe_create); 733 } 734 735 static int create_or_delete_trace_uprobe(const char *raw_command) 736 { 737 int ret; 738 739 if (raw_command[0] == '-') 740 return dyn_event_release(raw_command, &trace_uprobe_ops); 741 742 ret = trace_uprobe_create(raw_command); 743 return ret == -ECANCELED ? -EINVAL : ret; 744 } 745 746 static int trace_uprobe_release(struct dyn_event *ev) 747 { 748 struct trace_uprobe *tu = to_trace_uprobe(ev); 749 750 return unregister_trace_uprobe(tu); 751 } 752 753 /* Probes listing interfaces */ 754 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev) 755 { 756 struct trace_uprobe *tu = to_trace_uprobe(ev); 757 char c = is_ret_probe(tu) ? 'r' : 'p'; 758 int i; 759 760 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp), 761 trace_probe_name(&tu->tp), tu->filename, 762 (int)(sizeof(void *) * 2), tu->offset); 763 764 if (tu->ref_ctr_offset) 765 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset); 766 767 for (i = 0; i < tu->tp.nr_args; i++) 768 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); 769 770 seq_putc(m, '\n'); 771 return 0; 772 } 773 774 static int probes_seq_show(struct seq_file *m, void *v) 775 { 776 struct dyn_event *ev = v; 777 778 if (!is_trace_uprobe(ev)) 779 return 0; 780 781 return trace_uprobe_show(m, ev); 782 } 783 784 static const struct seq_operations probes_seq_op = { 785 .start = dyn_event_seq_start, 786 .next = dyn_event_seq_next, 787 .stop = dyn_event_seq_stop, 788 .show = probes_seq_show 789 }; 790 791 static int probes_open(struct inode *inode, struct file *file) 792 { 793 int ret; 794 795 ret = security_locked_down(LOCKDOWN_TRACEFS); 796 if (ret) 797 return ret; 798 799 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 800 ret = dyn_events_release_all(&trace_uprobe_ops); 801 if (ret) 802 return ret; 803 } 804 805 return seq_open(file, &probes_seq_op); 806 } 807 808 static ssize_t probes_write(struct file *file, const char __user *buffer, 809 size_t count, loff_t *ppos) 810 { 811 return trace_parse_run_command(file, buffer, count, ppos, 812 create_or_delete_trace_uprobe); 813 } 814 815 static const struct file_operations uprobe_events_ops = { 816 .owner = THIS_MODULE, 817 .open = probes_open, 818 .read = seq_read, 819 .llseek = seq_lseek, 820 .release = seq_release, 821 .write = probes_write, 822 }; 823 824 /* Probes profiling interfaces */ 825 static int probes_profile_seq_show(struct seq_file *m, void *v) 826 { 827 struct dyn_event *ev = v; 828 struct trace_uprobe *tu; 829 unsigned long nhits; 830 int cpu; 831 832 if (!is_trace_uprobe(ev)) 833 return 0; 834 835 tu = to_trace_uprobe(ev); 836 837 nhits = 0; 838 for_each_possible_cpu(cpu) { 839 nhits += per_cpu(*tu->nhits, cpu); 840 } 841 842 seq_printf(m, " %s %-44s %15lu\n", tu->filename, 843 trace_probe_name(&tu->tp), nhits); 844 return 0; 845 } 846 847 static const struct seq_operations profile_seq_op = { 848 .start = dyn_event_seq_start, 849 .next = dyn_event_seq_next, 850 .stop = dyn_event_seq_stop, 851 .show = probes_profile_seq_show 852 }; 853 854 static int profile_open(struct inode *inode, struct file *file) 855 { 856 int ret; 857 858 ret = security_locked_down(LOCKDOWN_TRACEFS); 859 if (ret) 860 return ret; 861 862 return seq_open(file, &profile_seq_op); 863 } 864 865 static const struct file_operations uprobe_profile_ops = { 866 .owner = THIS_MODULE, 867 .open = profile_open, 868 .read = seq_read, 869 .llseek = seq_lseek, 870 .release = seq_release, 871 }; 872 873 struct uprobe_cpu_buffer { 874 struct mutex mutex; 875 void *buf; 876 int dsize; 877 }; 878 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; 879 static int uprobe_buffer_refcnt; 880 #define MAX_UCB_BUFFER_SIZE PAGE_SIZE 881 882 static int uprobe_buffer_init(void) 883 { 884 int cpu, err_cpu; 885 886 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); 887 if (uprobe_cpu_buffer == NULL) 888 return -ENOMEM; 889 890 for_each_possible_cpu(cpu) { 891 struct page *p = alloc_pages_node(cpu_to_node(cpu), 892 GFP_KERNEL, 0); 893 if (p == NULL) { 894 err_cpu = cpu; 895 goto err; 896 } 897 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); 898 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); 899 } 900 901 return 0; 902 903 err: 904 for_each_possible_cpu(cpu) { 905 if (cpu == err_cpu) 906 break; 907 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); 908 } 909 910 free_percpu(uprobe_cpu_buffer); 911 return -ENOMEM; 912 } 913 914 static int uprobe_buffer_enable(void) 915 { 916 int ret = 0; 917 918 BUG_ON(!mutex_is_locked(&event_mutex)); 919 920 if (uprobe_buffer_refcnt++ == 0) { 921 ret = uprobe_buffer_init(); 922 if (ret < 0) 923 uprobe_buffer_refcnt--; 924 } 925 926 return ret; 927 } 928 929 static void uprobe_buffer_disable(void) 930 { 931 int cpu; 932 933 BUG_ON(!mutex_is_locked(&event_mutex)); 934 935 if (--uprobe_buffer_refcnt == 0) { 936 for_each_possible_cpu(cpu) 937 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, 938 cpu)->buf); 939 940 free_percpu(uprobe_cpu_buffer); 941 uprobe_cpu_buffer = NULL; 942 } 943 } 944 945 static struct uprobe_cpu_buffer *uprobe_buffer_get(void) 946 { 947 struct uprobe_cpu_buffer *ucb; 948 int cpu; 949 950 cpu = raw_smp_processor_id(); 951 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); 952 953 /* 954 * Use per-cpu buffers for fastest access, but we might migrate 955 * so the mutex makes sure we have sole access to it. 956 */ 957 mutex_lock(&ucb->mutex); 958 959 return ucb; 960 } 961 962 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) 963 { 964 if (!ucb) 965 return; 966 mutex_unlock(&ucb->mutex); 967 } 968 969 static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu, 970 struct pt_regs *regs, 971 struct uprobe_cpu_buffer **ucbp) 972 { 973 struct uprobe_cpu_buffer *ucb; 974 int dsize, esize; 975 976 if (*ucbp) 977 return *ucbp; 978 979 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 980 dsize = __get_data_size(&tu->tp, regs, NULL); 981 982 ucb = uprobe_buffer_get(); 983 ucb->dsize = tu->tp.size + dsize; 984 985 if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) { 986 ucb->dsize = MAX_UCB_BUFFER_SIZE; 987 dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size; 988 } 989 990 store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize); 991 992 *ucbp = ucb; 993 return ucb; 994 } 995 996 static void __uprobe_trace_func(struct trace_uprobe *tu, 997 unsigned long func, struct pt_regs *regs, 998 struct uprobe_cpu_buffer *ucb, 999 struct trace_event_file *trace_file) 1000 { 1001 struct uprobe_trace_entry_head *entry; 1002 struct trace_event_buffer fbuffer; 1003 void *data; 1004 int size, esize; 1005 struct trace_event_call *call = trace_probe_event_call(&tu->tp); 1006 1007 WARN_ON(call != trace_file->event_call); 1008 1009 if (trace_trigger_soft_disabled(trace_file)) 1010 return; 1011 1012 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1013 size = esize + ucb->dsize; 1014 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size); 1015 if (!entry) 1016 return; 1017 1018 if (is_ret_probe(tu)) { 1019 entry->vaddr[0] = func; 1020 entry->vaddr[1] = instruction_pointer(regs); 1021 data = DATAOF_TRACE_ENTRY(entry, true); 1022 } else { 1023 entry->vaddr[0] = instruction_pointer(regs); 1024 data = DATAOF_TRACE_ENTRY(entry, false); 1025 } 1026 1027 memcpy(data, ucb->buf, ucb->dsize); 1028 1029 trace_event_buffer_commit(&fbuffer); 1030 } 1031 1032 /* uprobe handler */ 1033 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, 1034 struct uprobe_cpu_buffer **ucbp) 1035 { 1036 struct event_file_link *link; 1037 struct uprobe_cpu_buffer *ucb; 1038 1039 if (is_ret_probe(tu)) 1040 return 0; 1041 1042 ucb = prepare_uprobe_buffer(tu, regs, ucbp); 1043 1044 rcu_read_lock(); 1045 trace_probe_for_each_link_rcu(link, &tu->tp) 1046 __uprobe_trace_func(tu, 0, regs, ucb, link->file); 1047 rcu_read_unlock(); 1048 1049 return 0; 1050 } 1051 1052 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 1053 struct pt_regs *regs, 1054 struct uprobe_cpu_buffer **ucbp) 1055 { 1056 struct event_file_link *link; 1057 struct uprobe_cpu_buffer *ucb; 1058 1059 ucb = prepare_uprobe_buffer(tu, regs, ucbp); 1060 1061 rcu_read_lock(); 1062 trace_probe_for_each_link_rcu(link, &tu->tp) 1063 __uprobe_trace_func(tu, func, regs, ucb, link->file); 1064 rcu_read_unlock(); 1065 } 1066 1067 /* Event entry printers */ 1068 static enum print_line_t 1069 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) 1070 { 1071 struct uprobe_trace_entry_head *entry; 1072 struct trace_seq *s = &iter->seq; 1073 struct trace_uprobe *tu; 1074 u8 *data; 1075 1076 entry = (struct uprobe_trace_entry_head *)iter->ent; 1077 tu = trace_uprobe_primary_from_call( 1078 container_of(event, struct trace_event_call, event)); 1079 if (unlikely(!tu)) 1080 goto out; 1081 1082 if (is_ret_probe(tu)) { 1083 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", 1084 trace_probe_name(&tu->tp), 1085 entry->vaddr[1], entry->vaddr[0]); 1086 data = DATAOF_TRACE_ENTRY(entry, true); 1087 } else { 1088 trace_seq_printf(s, "%s: (0x%lx)", 1089 trace_probe_name(&tu->tp), 1090 entry->vaddr[0]); 1091 data = DATAOF_TRACE_ENTRY(entry, false); 1092 } 1093 1094 if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0) 1095 goto out; 1096 1097 trace_seq_putc(s, '\n'); 1098 1099 out: 1100 return trace_handle_return(s); 1101 } 1102 1103 typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm); 1104 1105 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) 1106 { 1107 struct inode *inode = d_real_inode(tu->path.dentry); 1108 struct uprobe *uprobe; 1109 1110 tu->consumer.filter = filter; 1111 uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer); 1112 if (IS_ERR(uprobe)) 1113 return PTR_ERR(uprobe); 1114 1115 tu->uprobe = uprobe; 1116 return 0; 1117 } 1118 1119 static void __probe_event_disable(struct trace_probe *tp) 1120 { 1121 struct trace_uprobe *tu; 1122 bool sync = false; 1123 1124 tu = container_of(tp, struct trace_uprobe, tp); 1125 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); 1126 1127 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { 1128 if (!tu->uprobe) 1129 continue; 1130 1131 uprobe_unregister_nosync(tu->uprobe, &tu->consumer); 1132 sync = true; 1133 tu->uprobe = NULL; 1134 } 1135 if (sync) 1136 uprobe_unregister_sync(); 1137 } 1138 1139 static int probe_event_enable(struct trace_event_call *call, 1140 struct trace_event_file *file, filter_func_t filter) 1141 { 1142 struct trace_probe *tp; 1143 struct trace_uprobe *tu; 1144 bool enabled; 1145 int ret; 1146 1147 tp = trace_probe_primary_from_call(call); 1148 if (WARN_ON_ONCE(!tp)) 1149 return -ENODEV; 1150 enabled = trace_probe_is_enabled(tp); 1151 1152 /* This may also change "enabled" state */ 1153 if (file) { 1154 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE)) 1155 return -EINTR; 1156 1157 ret = trace_probe_add_file(tp, file); 1158 if (ret < 0) 1159 return ret; 1160 } else { 1161 if (trace_probe_test_flag(tp, TP_FLAG_TRACE)) 1162 return -EINTR; 1163 1164 trace_probe_set_flag(tp, TP_FLAG_PROFILE); 1165 } 1166 1167 tu = container_of(tp, struct trace_uprobe, tp); 1168 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); 1169 1170 if (enabled) 1171 return 0; 1172 1173 ret = uprobe_buffer_enable(); 1174 if (ret) 1175 goto err_flags; 1176 1177 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { 1178 ret = trace_uprobe_enable(tu, filter); 1179 if (ret) { 1180 __probe_event_disable(tp); 1181 goto err_buffer; 1182 } 1183 } 1184 1185 return 0; 1186 1187 err_buffer: 1188 uprobe_buffer_disable(); 1189 1190 err_flags: 1191 if (file) 1192 trace_probe_remove_file(tp, file); 1193 else 1194 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 1195 1196 return ret; 1197 } 1198 1199 static void probe_event_disable(struct trace_event_call *call, 1200 struct trace_event_file *file) 1201 { 1202 struct trace_probe *tp; 1203 1204 tp = trace_probe_primary_from_call(call); 1205 if (WARN_ON_ONCE(!tp)) 1206 return; 1207 1208 if (!trace_probe_is_enabled(tp)) 1209 return; 1210 1211 if (file) { 1212 if (trace_probe_remove_file(tp, file) < 0) 1213 return; 1214 1215 if (trace_probe_is_enabled(tp)) 1216 return; 1217 } else 1218 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 1219 1220 __probe_event_disable(tp); 1221 uprobe_buffer_disable(); 1222 } 1223 1224 static int uprobe_event_define_fields(struct trace_event_call *event_call) 1225 { 1226 int ret, size; 1227 struct uprobe_trace_entry_head field; 1228 struct trace_uprobe *tu; 1229 1230 tu = trace_uprobe_primary_from_call(event_call); 1231 if (unlikely(!tu)) 1232 return -ENODEV; 1233 1234 if (is_ret_probe(tu)) { 1235 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); 1236 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); 1237 size = SIZEOF_TRACE_ENTRY(true); 1238 } else { 1239 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); 1240 size = SIZEOF_TRACE_ENTRY(false); 1241 } 1242 1243 return traceprobe_define_arg_fields(event_call, size, &tu->tp); 1244 } 1245 1246 #ifdef CONFIG_PERF_EVENTS 1247 static bool 1248 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) 1249 { 1250 struct perf_event *event; 1251 1252 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { 1253 if (event->hw.target->mm == mm) 1254 return true; 1255 } 1256 1257 return false; 1258 } 1259 1260 static inline bool 1261 trace_uprobe_filter_event(struct trace_uprobe_filter *filter, 1262 struct perf_event *event) 1263 { 1264 return __uprobe_perf_filter(filter, event->hw.target->mm); 1265 } 1266 1267 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter, 1268 struct perf_event *event) 1269 { 1270 bool done; 1271 1272 write_lock(&filter->rwlock); 1273 if (event->hw.target) { 1274 list_del(&event->hw.tp_list); 1275 done = filter->nr_systemwide || 1276 (event->hw.target->flags & PF_EXITING) || 1277 trace_uprobe_filter_event(filter, event); 1278 } else { 1279 filter->nr_systemwide--; 1280 done = filter->nr_systemwide; 1281 } 1282 write_unlock(&filter->rwlock); 1283 1284 return done; 1285 } 1286 1287 /* This returns true if the filter always covers target mm */ 1288 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter, 1289 struct perf_event *event) 1290 { 1291 bool done; 1292 1293 write_lock(&filter->rwlock); 1294 if (event->hw.target) { 1295 /* 1296 * event->parent != NULL means copy_process(), we can avoid 1297 * uprobe_apply(). current->mm must be probed and we can rely 1298 * on dup_mmap() which preserves the already installed bp's. 1299 * 1300 * attr.enable_on_exec means that exec/mmap will install the 1301 * breakpoints we need. 1302 */ 1303 done = filter->nr_systemwide || 1304 event->parent || event->attr.enable_on_exec || 1305 trace_uprobe_filter_event(filter, event); 1306 list_add(&event->hw.tp_list, &filter->perf_events); 1307 } else { 1308 done = filter->nr_systemwide; 1309 filter->nr_systemwide++; 1310 } 1311 write_unlock(&filter->rwlock); 1312 1313 return done; 1314 } 1315 1316 static int uprobe_perf_close(struct trace_event_call *call, 1317 struct perf_event *event) 1318 { 1319 struct trace_probe *tp; 1320 struct trace_uprobe *tu; 1321 int ret = 0; 1322 1323 tp = trace_probe_primary_from_call(call); 1324 if (WARN_ON_ONCE(!tp)) 1325 return -ENODEV; 1326 1327 tu = container_of(tp, struct trace_uprobe, tp); 1328 if (trace_uprobe_filter_remove(tu->tp.event->filter, event)) 1329 return 0; 1330 1331 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { 1332 ret = uprobe_apply(tu->uprobe, &tu->consumer, false); 1333 if (ret) 1334 break; 1335 } 1336 1337 return ret; 1338 } 1339 1340 static int uprobe_perf_open(struct trace_event_call *call, 1341 struct perf_event *event) 1342 { 1343 struct trace_probe *tp; 1344 struct trace_uprobe *tu; 1345 int err = 0; 1346 1347 tp = trace_probe_primary_from_call(call); 1348 if (WARN_ON_ONCE(!tp)) 1349 return -ENODEV; 1350 1351 tu = container_of(tp, struct trace_uprobe, tp); 1352 if (trace_uprobe_filter_add(tu->tp.event->filter, event)) 1353 return 0; 1354 1355 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { 1356 err = uprobe_apply(tu->uprobe, &tu->consumer, true); 1357 if (err) { 1358 uprobe_perf_close(call, event); 1359 break; 1360 } 1361 } 1362 1363 return err; 1364 } 1365 1366 static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm) 1367 { 1368 struct trace_uprobe_filter *filter; 1369 struct trace_uprobe *tu; 1370 int ret; 1371 1372 tu = container_of(uc, struct trace_uprobe, consumer); 1373 filter = tu->tp.event->filter; 1374 1375 /* 1376 * speculative short-circuiting check to avoid unnecessarily taking 1377 * filter->rwlock below, if the uprobe has system-wide consumer 1378 */ 1379 if (READ_ONCE(filter->nr_systemwide)) 1380 return true; 1381 1382 read_lock(&filter->rwlock); 1383 ret = __uprobe_perf_filter(filter, mm); 1384 read_unlock(&filter->rwlock); 1385 1386 return ret; 1387 } 1388 1389 static void __uprobe_perf_func(struct trace_uprobe *tu, 1390 unsigned long func, struct pt_regs *regs, 1391 struct uprobe_cpu_buffer **ucbp) 1392 { 1393 struct trace_event_call *call = trace_probe_event_call(&tu->tp); 1394 struct uprobe_trace_entry_head *entry; 1395 struct uprobe_cpu_buffer *ucb; 1396 struct hlist_head *head; 1397 void *data; 1398 int size, esize; 1399 int rctx; 1400 1401 #ifdef CONFIG_BPF_EVENTS 1402 if (bpf_prog_array_valid(call)) { 1403 u32 ret; 1404 1405 ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run); 1406 if (!ret) 1407 return; 1408 } 1409 #endif /* CONFIG_BPF_EVENTS */ 1410 1411 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1412 1413 ucb = prepare_uprobe_buffer(tu, regs, ucbp); 1414 size = esize + ucb->dsize; 1415 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1416 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1417 return; 1418 1419 preempt_disable(); 1420 head = this_cpu_ptr(call->perf_events); 1421 if (hlist_empty(head)) 1422 goto out; 1423 1424 entry = perf_trace_buf_alloc(size, NULL, &rctx); 1425 if (!entry) 1426 goto out; 1427 1428 if (is_ret_probe(tu)) { 1429 entry->vaddr[0] = func; 1430 entry->vaddr[1] = instruction_pointer(regs); 1431 data = DATAOF_TRACE_ENTRY(entry, true); 1432 } else { 1433 entry->vaddr[0] = instruction_pointer(regs); 1434 data = DATAOF_TRACE_ENTRY(entry, false); 1435 } 1436 1437 memcpy(data, ucb->buf, ucb->dsize); 1438 1439 if (size - esize > ucb->dsize) 1440 memset(data + ucb->dsize, 0, size - esize - ucb->dsize); 1441 1442 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1443 head, NULL); 1444 out: 1445 preempt_enable(); 1446 } 1447 1448 /* uprobe profile handler */ 1449 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, 1450 struct uprobe_cpu_buffer **ucbp) 1451 { 1452 if (!uprobe_perf_filter(&tu->consumer, current->mm)) 1453 return UPROBE_HANDLER_REMOVE; 1454 1455 if (!is_ret_probe(tu)) 1456 __uprobe_perf_func(tu, 0, regs, ucbp); 1457 return 0; 1458 } 1459 1460 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1461 struct pt_regs *regs, 1462 struct uprobe_cpu_buffer **ucbp) 1463 { 1464 __uprobe_perf_func(tu, func, regs, ucbp); 1465 } 1466 1467 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, 1468 const char **filename, u64 *probe_offset, 1469 u64 *probe_addr, bool perf_type_tracepoint) 1470 { 1471 const char *pevent = trace_event_name(event->tp_event); 1472 const char *group = event->tp_event->class->system; 1473 struct trace_uprobe *tu; 1474 1475 if (perf_type_tracepoint) 1476 tu = find_probe_event(pevent, group); 1477 else 1478 tu = trace_uprobe_primary_from_call(event->tp_event); 1479 if (!tu) 1480 return -EINVAL; 1481 1482 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE 1483 : BPF_FD_TYPE_UPROBE; 1484 *filename = tu->filename; 1485 *probe_offset = tu->offset; 1486 *probe_addr = 0; 1487 return 0; 1488 } 1489 #endif /* CONFIG_PERF_EVENTS */ 1490 1491 static int 1492 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, 1493 void *data) 1494 { 1495 struct trace_event_file *file = data; 1496 1497 switch (type) { 1498 case TRACE_REG_REGISTER: 1499 return probe_event_enable(event, file, NULL); 1500 1501 case TRACE_REG_UNREGISTER: 1502 probe_event_disable(event, file); 1503 return 0; 1504 1505 #ifdef CONFIG_PERF_EVENTS 1506 case TRACE_REG_PERF_REGISTER: 1507 return probe_event_enable(event, NULL, uprobe_perf_filter); 1508 1509 case TRACE_REG_PERF_UNREGISTER: 1510 probe_event_disable(event, NULL); 1511 return 0; 1512 1513 case TRACE_REG_PERF_OPEN: 1514 return uprobe_perf_open(event, data); 1515 1516 case TRACE_REG_PERF_CLOSE: 1517 return uprobe_perf_close(event, data); 1518 1519 #endif 1520 default: 1521 return 0; 1522 } 1523 } 1524 1525 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) 1526 { 1527 struct trace_uprobe *tu; 1528 struct uprobe_dispatch_data udd; 1529 struct uprobe_cpu_buffer *ucb = NULL; 1530 int ret = 0; 1531 1532 tu = container_of(con, struct trace_uprobe, consumer); 1533 1534 this_cpu_inc(*tu->nhits); 1535 1536 udd.tu = tu; 1537 udd.bp_addr = instruction_pointer(regs); 1538 1539 current->utask->vaddr = (unsigned long) &udd; 1540 1541 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1542 return 0; 1543 1544 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) 1545 ret |= uprobe_trace_func(tu, regs, &ucb); 1546 1547 #ifdef CONFIG_PERF_EVENTS 1548 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) 1549 ret |= uprobe_perf_func(tu, regs, &ucb); 1550 #endif 1551 uprobe_buffer_put(ucb); 1552 return ret; 1553 } 1554 1555 static int uretprobe_dispatcher(struct uprobe_consumer *con, 1556 unsigned long func, struct pt_regs *regs) 1557 { 1558 struct trace_uprobe *tu; 1559 struct uprobe_dispatch_data udd; 1560 struct uprobe_cpu_buffer *ucb = NULL; 1561 1562 tu = container_of(con, struct trace_uprobe, consumer); 1563 1564 udd.tu = tu; 1565 udd.bp_addr = func; 1566 1567 current->utask->vaddr = (unsigned long) &udd; 1568 1569 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1570 return 0; 1571 1572 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) 1573 uretprobe_trace_func(tu, func, regs, &ucb); 1574 1575 #ifdef CONFIG_PERF_EVENTS 1576 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) 1577 uretprobe_perf_func(tu, func, regs, &ucb); 1578 #endif 1579 uprobe_buffer_put(ucb); 1580 return 0; 1581 } 1582 1583 static struct trace_event_functions uprobe_funcs = { 1584 .trace = print_uprobe_event 1585 }; 1586 1587 static struct trace_event_fields uprobe_fields_array[] = { 1588 { .type = TRACE_FUNCTION_TYPE, 1589 .define_fields = uprobe_event_define_fields }, 1590 {} 1591 }; 1592 1593 static inline void init_trace_event_call(struct trace_uprobe *tu) 1594 { 1595 struct trace_event_call *call = trace_probe_event_call(&tu->tp); 1596 call->event.funcs = &uprobe_funcs; 1597 call->class->fields_array = uprobe_fields_array; 1598 1599 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; 1600 call->class->reg = trace_uprobe_register; 1601 } 1602 1603 static int register_uprobe_event(struct trace_uprobe *tu) 1604 { 1605 init_trace_event_call(tu); 1606 1607 return trace_probe_register_event_call(&tu->tp); 1608 } 1609 1610 static int unregister_uprobe_event(struct trace_uprobe *tu) 1611 { 1612 return trace_probe_unregister_event_call(&tu->tp); 1613 } 1614 1615 #ifdef CONFIG_PERF_EVENTS 1616 struct trace_event_call * 1617 create_local_trace_uprobe(char *name, unsigned long offs, 1618 unsigned long ref_ctr_offset, bool is_return) 1619 { 1620 enum probe_print_type ptype; 1621 struct trace_uprobe *tu; 1622 struct path path; 1623 int ret; 1624 1625 ret = kern_path(name, LOOKUP_FOLLOW, &path); 1626 if (ret) 1627 return ERR_PTR(ret); 1628 1629 if (!d_is_reg(path.dentry)) { 1630 path_put(&path); 1631 return ERR_PTR(-EINVAL); 1632 } 1633 1634 /* 1635 * local trace_kprobes are not added to dyn_event, so they are never 1636 * searched in find_trace_kprobe(). Therefore, there is no concern of 1637 * duplicated name "DUMMY_EVENT" here. 1638 */ 1639 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0, 1640 is_return); 1641 1642 if (IS_ERR(tu)) { 1643 pr_info("Failed to allocate trace_uprobe.(%d)\n", 1644 (int)PTR_ERR(tu)); 1645 path_put(&path); 1646 return ERR_CAST(tu); 1647 } 1648 1649 tu->offset = offs; 1650 tu->path = path; 1651 tu->ref_ctr_offset = ref_ctr_offset; 1652 tu->filename = kstrdup(name, GFP_KERNEL); 1653 if (!tu->filename) { 1654 ret = -ENOMEM; 1655 goto error; 1656 } 1657 1658 init_trace_event_call(tu); 1659 1660 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; 1661 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) { 1662 ret = -ENOMEM; 1663 goto error; 1664 } 1665 1666 return trace_probe_event_call(&tu->tp); 1667 error: 1668 free_trace_uprobe(tu); 1669 return ERR_PTR(ret); 1670 } 1671 1672 void destroy_local_trace_uprobe(struct trace_event_call *event_call) 1673 { 1674 struct trace_uprobe *tu; 1675 1676 tu = trace_uprobe_primary_from_call(event_call); 1677 1678 free_trace_uprobe(tu); 1679 } 1680 #endif /* CONFIG_PERF_EVENTS */ 1681 1682 /* Make a trace interface for controlling probe points */ 1683 static __init int init_uprobe_trace(void) 1684 { 1685 int ret; 1686 1687 ret = dyn_event_register(&trace_uprobe_ops); 1688 if (ret) 1689 return ret; 1690 1691 ret = tracing_init_dentry(); 1692 if (ret) 1693 return 0; 1694 1695 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL, 1696 NULL, &uprobe_events_ops); 1697 /* Profile interface */ 1698 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL, 1699 NULL, &uprobe_profile_ops); 1700 return 0; 1701 } 1702 1703 fs_initcall(init_uprobe_trace); 1704