Lines Matching +full:tp +full:- +full:link

1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
10 #include <linux/bpf-cgroup.h>
67 struct trace_probe tp; member
72 return ev->ops == &trace_uprobe_ops; in is_trace_uprobe()
81 * for_each_trace_uprobe - iterate over the trace_uprobe list
99 return addr - (n * sizeof(long)); in adjust_stack_addr()
122 * Uprobes-specific fetch functions
129 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0; in probe_mem_read()
139 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
152 return -ENOMEM; in fetch_store_string()
155 ret = strscpy(dst, current->comm, maxlen); in fetch_store_string()
160 dst[ret - 1] = '\0'; in fetch_store_string()
168 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); in fetch_store_string()
170 *(u32 *)dest = make_data_loc(0, (void *)dst - base); in fetch_store_string()
181 /* Return the length of string -- including null terminal byte */
189 len = strlen(current->comm) + 1; in fetch_store_strlen()
207 udd = (void *) current->utask->vaddr; in translate_user_vaddr()
209 base_addr = udd->bp_addr - udd->tu->offset; in translate_user_vaddr()
223 switch (code->op) { in process_fetch_insn()
225 val = regs_get_register(regs, code->param); in process_fetch_insn()
228 val = get_user_stack_nth(regs, code->param); in process_fetch_insn()
240 val = translate_user_vaddr(code->immediate); in process_fetch_insn()
255 rwlock_init(&filter->rwlock); in NOKPROBE_SYMBOL()
256 filter->nr_systemwide = 0; in NOKPROBE_SYMBOL()
257 INIT_LIST_HEAD(&filter->perf_events); in NOKPROBE_SYMBOL()
262 return !filter->nr_systemwide && list_empty(&filter->perf_events); in uprobe_filter_is_empty()
267 return tu->consumer.ret_handler != NULL; in is_ret_probe()
274 return trace_probe_is_enabled(&tu->tp); in trace_uprobe_is_busy()
286 len = strlen(tu->filename); in trace_uprobe_match_command_head()
287 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':') in trace_uprobe_match_command_head()
290 if (tu->ref_ctr_offset == 0) in trace_uprobe_match_command_head()
292 (int)(sizeof(void *) * 2), tu->offset); in trace_uprobe_match_command_head()
295 (int)(sizeof(void *) * 2), tu->offset, in trace_uprobe_match_command_head()
296 tu->ref_ctr_offset); in trace_uprobe_match_command_head()
300 argc--; argv++; in trace_uprobe_match_command_head()
302 return trace_probe_match_command_args(&tu->tp, argc, argv); in trace_uprobe_match_command_head()
311 strcmp(trace_probe_name(&tu->tp), event) == 0) && in trace_uprobe_match()
312 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) && in trace_uprobe_match()
319 struct trace_probe *tp; in trace_uprobe_primary_from_call() local
321 tp = trace_probe_primary_from_call(call); in trace_uprobe_primary_from_call()
322 if (WARN_ON_ONCE(!tp)) in trace_uprobe_primary_from_call()
325 return container_of(tp, struct trace_uprobe, tp); in trace_uprobe_primary_from_call()
337 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); in alloc_trace_uprobe()
339 return ERR_PTR(-ENOMEM); in alloc_trace_uprobe()
341 tu->nhits = alloc_percpu(unsigned long); in alloc_trace_uprobe()
342 if (!tu->nhits) { in alloc_trace_uprobe()
343 ret = -ENOMEM; in alloc_trace_uprobe()
347 ret = trace_probe_init(&tu->tp, event, group, true, nargs); in alloc_trace_uprobe()
351 dyn_event_init(&tu->devent, &trace_uprobe_ops); in alloc_trace_uprobe()
352 tu->consumer.handler = uprobe_dispatcher; in alloc_trace_uprobe()
354 tu->consumer.ret_handler = uretprobe_dispatcher; in alloc_trace_uprobe()
355 init_trace_uprobe_filter(tu->tp.event->filter); in alloc_trace_uprobe()
359 free_percpu(tu->nhits); in alloc_trace_uprobe()
370 path_put(&tu->path); in free_trace_uprobe()
371 trace_probe_cleanup(&tu->tp); in free_trace_uprobe()
372 kfree(tu->filename); in free_trace_uprobe()
373 free_percpu(tu->nhits); in free_trace_uprobe()
383 if (strcmp(trace_probe_name(&tu->tp), event) == 0 && in find_probe_event()
384 strcmp(trace_probe_group_name(&tu->tp), group) == 0) in find_probe_event()
395 if (trace_probe_has_sibling(&tu->tp)) in unregister_trace_uprobe()
399 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp))) in unregister_trace_uprobe()
400 return -EBUSY; in unregister_trace_uprobe()
407 dyn_event_remove(&tu->devent); in unregister_trace_uprobe()
408 trace_probe_unlink(&tu->tp); in unregister_trace_uprobe()
416 struct trace_probe_event *tpe = orig->tp.event; in trace_uprobe_has_same_uprobe()
417 struct inode *comp_inode = d_real_inode(comp->path.dentry); in trace_uprobe_has_same_uprobe()
420 list_for_each_entry(orig, &tpe->probes, tp.list) { in trace_uprobe_has_same_uprobe()
421 if (comp_inode != d_real_inode(orig->path.dentry) || in trace_uprobe_has_same_uprobe()
422 comp->offset != orig->offset) in trace_uprobe_has_same_uprobe()
429 for (i = 0; i < orig->tp.nr_args; i++) { in trace_uprobe_has_same_uprobe()
430 if (strcmp(orig->tp.args[i].comm, in trace_uprobe_has_same_uprobe()
431 comp->tp.args[i].comm)) in trace_uprobe_has_same_uprobe()
435 if (i == orig->tp.nr_args) in trace_uprobe_has_same_uprobe()
446 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp); in append_trace_uprobe()
451 return -EEXIST; in append_trace_uprobe()
456 return -EEXIST; in append_trace_uprobe()
460 ret = trace_probe_append(&tu->tp, &to->tp); in append_trace_uprobe()
462 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); in append_trace_uprobe()
480 struct inode *new_inode = d_real_inode(new->path.dentry); in validate_ref_ctr_offset()
483 if (new_inode == d_real_inode(tmp->path.dentry) && in validate_ref_ctr_offset()
484 new->offset == tmp->offset && in validate_ref_ctr_offset()
485 new->ref_ctr_offset != tmp->ref_ctr_offset) { in validate_ref_ctr_offset()
487 return -EINVAL; in validate_ref_ctr_offset()
506 old_tu = find_probe_event(trace_probe_name(&tu->tp), in register_trace_uprobe()
507 trace_probe_group_name(&tu->tp)); in register_trace_uprobe()
512 ret = -EEXIST; in register_trace_uprobe()
521 if (ret == -EEXIST) { in register_trace_uprobe()
529 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); in register_trace_uprobe()
539 * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
563 return -ECANCELED; in __trace_uprobe_create()
567 return -ECANCELED; in __trace_uprobe_create()
568 if (argc - 2 > MAX_TRACE_ARGS) in __trace_uprobe_create()
569 return -E2BIG; in __trace_uprobe_create()
575 return -ECANCELED; in __trace_uprobe_create()
579 return -ENOMEM; in __trace_uprobe_create()
585 return -ECANCELED; in __trace_uprobe_create()
601 ret = -EINVAL; in __trace_uprobe_create()
610 ret = -EINVAL; in __trace_uprobe_create()
612 trace_probe_log_err(rctr_end - filename, in __trace_uprobe_create()
616 ret = -EINVAL; in __trace_uprobe_create()
617 trace_probe_log_err(rctr_end + 1 - filename, in __trace_uprobe_create()
626 trace_probe_log_err(rctr - filename, BAD_REFCNT); in __trace_uprobe_create()
638 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX); in __trace_uprobe_create()
639 ret = -EINVAL; in __trace_uprobe_create()
647 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS); in __trace_uprobe_create()
655 event - argv[0]); in __trace_uprobe_create()
666 ret = -ENOMEM; in __trace_uprobe_create()
670 ptr = strpbrk(tail, ".-_"); in __trace_uprobe_create()
679 argc -= 2; in __trace_uprobe_create()
685 /* This must return -ENOMEM otherwise there is a bug */ in __trace_uprobe_create()
686 WARN_ON_ONCE(ret != -ENOMEM); in __trace_uprobe_create()
689 tu->offset = offset; in __trace_uprobe_create()
690 tu->ref_ctr_offset = ref_ctr_offset; in __trace_uprobe_create()
691 tu->path = path; in __trace_uprobe_create()
692 tu->filename = filename; in __trace_uprobe_create()
701 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx); in __trace_uprobe_create()
708 ret = traceprobe_set_print_fmt(&tu->tp, ptype); in __trace_uprobe_create()
739 if (raw_command[0] == '-') in create_or_delete_trace_uprobe()
743 return ret == -ECANCELED ? -EINVAL : ret; in create_or_delete_trace_uprobe()
760 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp), in trace_uprobe_show()
761 trace_probe_name(&tu->tp), tu->filename, in trace_uprobe_show()
762 (int)(sizeof(void *) * 2), tu->offset); in trace_uprobe_show()
764 if (tu->ref_ctr_offset) in trace_uprobe_show()
765 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset); in trace_uprobe_show()
767 for (i = 0; i < tu->tp.nr_args; i++) in trace_uprobe_show()
768 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); in trace_uprobe_show()
799 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { in probes_open()
839 nhits += per_cpu(*tu->nhits, cpu); in probes_profile_seq_show()
842 seq_printf(m, " %s %-44s %15lu\n", tu->filename, in probes_profile_seq_show()
843 trace_probe_name(&tu->tp), nhits); in probes_profile_seq_show()
888 return -ENOMEM; in uprobe_buffer_init()
897 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init()
898 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); in uprobe_buffer_init()
907 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); in uprobe_buffer_init()
911 return -ENOMEM; in uprobe_buffer_init()
923 uprobe_buffer_refcnt--; in uprobe_buffer_enable()
935 if (--uprobe_buffer_refcnt == 0) { in uprobe_buffer_disable()
938 cpu)->buf); in uprobe_buffer_disable()
954 * Use per-cpu buffers for fastest access, but we might migrate in uprobe_buffer_get()
957 mutex_lock(&ucb->mutex); in uprobe_buffer_get()
966 mutex_unlock(&ucb->mutex); in uprobe_buffer_put()
980 dsize = __get_data_size(&tu->tp, regs, NULL); in prepare_uprobe_buffer()
983 ucb->dsize = tu->tp.size + dsize; in prepare_uprobe_buffer()
985 if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) { in prepare_uprobe_buffer()
986 ucb->dsize = MAX_UCB_BUFFER_SIZE; in prepare_uprobe_buffer()
987 dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size; in prepare_uprobe_buffer()
990 store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize); in prepare_uprobe_buffer()
1005 struct trace_event_call *call = trace_probe_event_call(&tu->tp); in __uprobe_trace_func()
1007 WARN_ON(call != trace_file->event_call); in __uprobe_trace_func()
1013 size = esize + ucb->dsize; in __uprobe_trace_func()
1019 entry->vaddr[0] = func; in __uprobe_trace_func()
1020 entry->vaddr[1] = instruction_pointer(regs); in __uprobe_trace_func()
1023 entry->vaddr[0] = instruction_pointer(regs); in __uprobe_trace_func()
1027 memcpy(data, ucb->buf, ucb->dsize); in __uprobe_trace_func()
1036 struct event_file_link *link; in uprobe_trace_func() local
1045 trace_probe_for_each_link_rcu(link, &tu->tp) in uprobe_trace_func()
1046 __uprobe_trace_func(tu, 0, regs, ucb, link->file); in uprobe_trace_func()
1056 struct event_file_link *link; in uretprobe_trace_func() local
1062 trace_probe_for_each_link_rcu(link, &tu->tp) in uretprobe_trace_func()
1063 __uprobe_trace_func(tu, func, regs, ucb, link->file); in uretprobe_trace_func()
1072 struct trace_seq *s = &iter->seq; in print_uprobe_event()
1076 entry = (struct uprobe_trace_entry_head *)iter->ent; in print_uprobe_event()
1083 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", in print_uprobe_event()
1084 trace_probe_name(&tu->tp), in print_uprobe_event()
1085 entry->vaddr[1], entry->vaddr[0]); in print_uprobe_event()
1089 trace_probe_name(&tu->tp), in print_uprobe_event()
1090 entry->vaddr[0]); in print_uprobe_event()
1094 if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0) in print_uprobe_event()
1107 struct inode *inode = d_real_inode(tu->path.dentry); in trace_uprobe_enable()
1110 tu->consumer.filter = filter; in trace_uprobe_enable()
1111 uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer); in trace_uprobe_enable()
1115 tu->uprobe = uprobe; in trace_uprobe_enable()
1119 static void __probe_event_disable(struct trace_probe *tp) in __probe_event_disable() argument
1124 tu = container_of(tp, struct trace_uprobe, tp); in __probe_event_disable()
1125 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); in __probe_event_disable()
1127 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { in __probe_event_disable()
1128 if (!tu->uprobe) in __probe_event_disable()
1131 uprobe_unregister_nosync(tu->uprobe, &tu->consumer); in __probe_event_disable()
1133 tu->uprobe = NULL; in __probe_event_disable()
1142 struct trace_probe *tp; in probe_event_enable() local
1147 tp = trace_probe_primary_from_call(call); in probe_event_enable()
1148 if (WARN_ON_ONCE(!tp)) in probe_event_enable()
1149 return -ENODEV; in probe_event_enable()
1150 enabled = trace_probe_is_enabled(tp); in probe_event_enable()
1154 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE)) in probe_event_enable()
1155 return -EINTR; in probe_event_enable()
1157 ret = trace_probe_add_file(tp, file); in probe_event_enable()
1161 if (trace_probe_test_flag(tp, TP_FLAG_TRACE)) in probe_event_enable()
1162 return -EINTR; in probe_event_enable()
1164 trace_probe_set_flag(tp, TP_FLAG_PROFILE); in probe_event_enable()
1167 tu = container_of(tp, struct trace_uprobe, tp); in probe_event_enable()
1168 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); in probe_event_enable()
1177 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { in probe_event_enable()
1180 __probe_event_disable(tp); in probe_event_enable()
1192 trace_probe_remove_file(tp, file); in probe_event_enable()
1194 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); in probe_event_enable()
1202 struct trace_probe *tp; in probe_event_disable() local
1204 tp = trace_probe_primary_from_call(call); in probe_event_disable()
1205 if (WARN_ON_ONCE(!tp)) in probe_event_disable()
1208 if (!trace_probe_is_enabled(tp)) in probe_event_disable()
1212 if (trace_probe_remove_file(tp, file) < 0) in probe_event_disable()
1215 if (trace_probe_is_enabled(tp)) in probe_event_disable()
1218 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); in probe_event_disable()
1220 __probe_event_disable(tp); in probe_event_disable()
1232 return -ENODEV; in uprobe_event_define_fields()
1243 return traceprobe_define_arg_fields(event_call, size, &tu->tp); in uprobe_event_define_fields()
1252 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { in __uprobe_perf_filter()
1253 if (event->hw.target->mm == mm) in __uprobe_perf_filter()
1264 return __uprobe_perf_filter(filter, event->hw.target->mm); in trace_uprobe_filter_event()
1272 write_lock(&filter->rwlock); in trace_uprobe_filter_remove()
1273 if (event->hw.target) { in trace_uprobe_filter_remove()
1274 list_del(&event->hw.tp_list); in trace_uprobe_filter_remove()
1275 done = filter->nr_systemwide || in trace_uprobe_filter_remove()
1276 (event->hw.target->flags & PF_EXITING) || in trace_uprobe_filter_remove()
1279 filter->nr_systemwide--; in trace_uprobe_filter_remove()
1280 done = filter->nr_systemwide; in trace_uprobe_filter_remove()
1282 write_unlock(&filter->rwlock); in trace_uprobe_filter_remove()
1293 write_lock(&filter->rwlock); in trace_uprobe_filter_add()
1294 if (event->hw.target) { in trace_uprobe_filter_add()
1296 * event->parent != NULL means copy_process(), we can avoid in trace_uprobe_filter_add()
1297 * uprobe_apply(). current->mm must be probed and we can rely in trace_uprobe_filter_add()
1303 done = filter->nr_systemwide || in trace_uprobe_filter_add()
1304 event->parent || event->attr.enable_on_exec || in trace_uprobe_filter_add()
1306 list_add(&event->hw.tp_list, &filter->perf_events); in trace_uprobe_filter_add()
1308 done = filter->nr_systemwide; in trace_uprobe_filter_add()
1309 filter->nr_systemwide++; in trace_uprobe_filter_add()
1311 write_unlock(&filter->rwlock); in trace_uprobe_filter_add()
1319 struct trace_probe *tp; in uprobe_perf_close() local
1323 tp = trace_probe_primary_from_call(call); in uprobe_perf_close()
1324 if (WARN_ON_ONCE(!tp)) in uprobe_perf_close()
1325 return -ENODEV; in uprobe_perf_close()
1327 tu = container_of(tp, struct trace_uprobe, tp); in uprobe_perf_close()
1328 if (trace_uprobe_filter_remove(tu->tp.event->filter, event)) in uprobe_perf_close()
1331 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { in uprobe_perf_close()
1332 ret = uprobe_apply(tu->uprobe, &tu->consumer, false); in uprobe_perf_close()
1343 struct trace_probe *tp; in uprobe_perf_open() local
1347 tp = trace_probe_primary_from_call(call); in uprobe_perf_open()
1348 if (WARN_ON_ONCE(!tp)) in uprobe_perf_open()
1349 return -ENODEV; in uprobe_perf_open()
1351 tu = container_of(tp, struct trace_uprobe, tp); in uprobe_perf_open()
1352 if (trace_uprobe_filter_add(tu->tp.event->filter, event)) in uprobe_perf_open()
1355 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { in uprobe_perf_open()
1356 err = uprobe_apply(tu->uprobe, &tu->consumer, true); in uprobe_perf_open()
1373 filter = tu->tp.event->filter; in uprobe_perf_filter()
1376 * speculative short-circuiting check to avoid unnecessarily taking in uprobe_perf_filter()
1377 * filter->rwlock below, if the uprobe has system-wide consumer in uprobe_perf_filter()
1379 if (READ_ONCE(filter->nr_systemwide)) in uprobe_perf_filter()
1382 read_lock(&filter->rwlock); in uprobe_perf_filter()
1384 read_unlock(&filter->rwlock); in uprobe_perf_filter()
1393 struct trace_event_call *call = trace_probe_event_call(&tu->tp); in __uprobe_perf_func()
1405 ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run); in __uprobe_perf_func()
1414 size = esize + ucb->dsize; in __uprobe_perf_func()
1415 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); in __uprobe_perf_func()
1420 head = this_cpu_ptr(call->perf_events); in __uprobe_perf_func()
1429 entry->vaddr[0] = func; in __uprobe_perf_func()
1430 entry->vaddr[1] = instruction_pointer(regs); in __uprobe_perf_func()
1433 entry->vaddr[0] = instruction_pointer(regs); in __uprobe_perf_func()
1437 memcpy(data, ucb->buf, ucb->dsize); in __uprobe_perf_func()
1439 if (size - esize > ucb->dsize) in __uprobe_perf_func()
1440 memset(data + ucb->dsize, 0, size - esize - ucb->dsize); in __uprobe_perf_func()
1442 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, in __uprobe_perf_func()
1452 if (!uprobe_perf_filter(&tu->consumer, current->mm)) in uprobe_perf_func()
1471 const char *pevent = trace_event_name(event->tp_event); in bpf_get_uprobe_info()
1472 const char *group = event->tp_event->class->system; in bpf_get_uprobe_info()
1478 tu = trace_uprobe_primary_from_call(event->tp_event); in bpf_get_uprobe_info()
1480 return -EINVAL; in bpf_get_uprobe_info()
1484 *filename = tu->filename; in bpf_get_uprobe_info()
1485 *probe_offset = tu->offset; in bpf_get_uprobe_info()
1534 this_cpu_inc(*tu->nhits); in uprobe_dispatcher()
1539 current->utask->vaddr = (unsigned long) &udd; in uprobe_dispatcher()
1544 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) in uprobe_dispatcher()
1548 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) in uprobe_dispatcher()
1567 current->utask->vaddr = (unsigned long) &udd; in uretprobe_dispatcher()
1572 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) in uretprobe_dispatcher()
1576 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) in uretprobe_dispatcher()
1595 struct trace_event_call *call = trace_probe_event_call(&tu->tp); in init_trace_event_call()
1596 call->event.funcs = &uprobe_funcs; in init_trace_event_call()
1597 call->class->fields_array = uprobe_fields_array; in init_trace_event_call()
1599 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; in init_trace_event_call()
1600 call->class->reg = trace_uprobe_register; in init_trace_event_call()
1607 return trace_probe_register_event_call(&tu->tp); in register_uprobe_event()
1612 return trace_probe_unregister_event_call(&tu->tp); in unregister_uprobe_event()
1631 return ERR_PTR(-EINVAL); in create_local_trace_uprobe()
1649 tu->offset = offs; in create_local_trace_uprobe()
1650 tu->path = path; in create_local_trace_uprobe()
1651 tu->ref_ctr_offset = ref_ctr_offset; in create_local_trace_uprobe()
1652 tu->filename = kstrdup(name, GFP_KERNEL); in create_local_trace_uprobe()
1653 if (!tu->filename) { in create_local_trace_uprobe()
1654 ret = -ENOMEM; in create_local_trace_uprobe()
1661 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) { in create_local_trace_uprobe()
1662 ret = -ENOMEM; in create_local_trace_uprobe()
1666 return trace_probe_event_call(&tu->tp); in create_local_trace_uprobe()