Lines Matching refs:tr
105 struct trace_array *tr;
110 tr = ops->private;
112 return tr->function_pids != NULL || tr->function_no_pids != NULL;
176 struct trace_array *tr = op->private;
179 if (tr) {
180 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
537 struct trace_array *tr = trace_get_global_array();
559 if (tr->trace_flags & TRACE_ITER(PROF_TEXT_OFFSET)) {
1074 struct trace_array *tr;
1350 static int ftrace_add_mod(struct trace_array *tr,
1355 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
3945 struct trace_array *tr;
3956 struct trace_array *tr = iter->ops->private;
3967 if (!tr)
3970 func_probes = &tr->func_probes;
4087 struct trace_array *tr = iter->tr;
4094 if (iter->mod_list == &tr->mod_trace ||
4095 iter->mod_list == &tr->mod_notrace) {
4116 /* probes are only available if tr is set */
4117 if (!iter->tr)
4140 struct trace_array *tr = iter->tr;
4143 iter->mod_list == &tr->mod_trace ||
4144 iter->mod_list == &tr->mod_notrace)
4663 struct trace_array *tr = ops->private;
4671 if (tracing_check_open_get_tr(tr))
4683 iter->tr = tr;
4689 mod_head = tr ? &tr->mod_notrace : NULL;
4692 mod_head = tr ? &tr->mod_trace : NULL;
4741 if (tr)
4742 trace_array_put(tr);
5051 static int cache_mod(struct trace_array *tr,
5055 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
5087 return ftrace_add_mod(tr, func, module, enable);
5161 struct trace_array *tr;
5169 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5170 if (!list_empty(&tr->mod_trace))
5171 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
5172 if (!list_empty(&tr->mod_notrace))
5173 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
5187 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
5193 if (!tr)
5212 return cache_mod(tr, func_orig, module, enable);
5244 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
5415 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
5431 register_ftrace_function_probe(char *glob, struct trace_array *tr,
5445 if (WARN_ON(!tr))
5455 list_for_each_entry(iter, &tr->func_probes, list) {
5469 probe->tr = tr;
5471 list_add(&probe->list, &tr->func_probes);
5513 ret = probe_ops->init(probe_ops, tr,
5518 probe_ops->free(probe_ops, tr,
5569 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5576 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
5609 list_for_each_entry(iter, &tr->func_probes, list) {
5688 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5706 void clear_ftrace_function_probes(struct trace_array *tr)
5710 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5711 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5759 struct trace_array *tr = iter->ops->private;
5783 return p->func(tr, hash, func, command, next, enable);
6359 struct trace_array *tr = ops->private;
6366 if (!tr)
6381 if (tr && mod && ret < 0) {
6385 ret = cache_mod(tr, tmp, mod, enable);
6554 struct trace_array *tr = trace_get_global_array();
6556 ops->private = tr;
6557 ftrace_init_trace_array(tr);
6611 if (iter->tr) {
6612 if (list_empty(&iter->tr->mod_trace))
6628 if (iter->tr)
6629 trace_array_put(iter->tr);
7471 struct trace_array *tr;
7474 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7475 if (!tr->ops || !tr->ops->func_hash)
7477 mutex_lock(&tr->ops->func_hash->regex_lock);
7478 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
7479 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
7480 mutex_unlock(&tr->ops->func_hash->regex_lock);
7861 struct trace_array *tr;
7864 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7865 if (!tr->ops || !tr->ops->func_hash)
7867 mutex_lock(&tr->ops->func_hash->regex_lock);
7868 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7869 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7870 mutex_unlock(&tr->ops->func_hash->regex_lock);
8043 void ftrace_init_trace_array(struct trace_array *tr)
8045 if (tr->flags & TRACE_ARRAY_FL_MOD_INIT)
8048 INIT_LIST_HEAD(&tr->func_probes);
8049 INIT_LIST_HEAD(&tr->mod_trace);
8050 INIT_LIST_HEAD(&tr->mod_notrace);
8052 tr->flags |= TRACE_ARRAY_FL_MOD_INIT;
8078 __init void ftrace_init_global_array_ops(struct trace_array *tr)
8080 tr->ops = &global_ops;
8082 global_ops.private = tr;
8083 ftrace_init_trace_array(tr);
8084 init_array_fgraph_ops(tr, tr->ops);
8087 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
8090 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
8091 if (WARN_ON(tr->ops->func != ftrace_stub))
8093 tr->ops->func);
8095 tr->ops->func = func;
8096 tr->ops->private = tr;
8099 void ftrace_reset_array_ops(struct trace_array *tr)
8101 tr->ops->func = ftrace_stub;
8227 struct trace_array *tr = data;
8231 pid_list = rcu_dereference_sched(tr->function_pids);
8232 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
8235 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8238 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8248 struct trace_array *tr = data;
8250 pid_list = rcu_dereference_sched(tr->function_pids);
8253 pid_list = rcu_dereference_sched(tr->function_no_pids);
8261 struct trace_array *tr = data;
8263 pid_list = rcu_dereference_sched(tr->function_pids);
8266 pid_list = rcu_dereference_sched(tr->function_no_pids);
8270 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
8274 tr);
8276 tr);
8279 tr);
8281 tr);
8285 static void clear_ftrace_pids(struct trace_array *tr, int type)
8291 pid_list = rcu_dereference_protected(tr->function_pids,
8293 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8302 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8304 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
8308 rcu_assign_pointer(tr->function_pids, NULL);
8311 rcu_assign_pointer(tr->function_no_pids, NULL);
8323 void ftrace_clear_pids(struct trace_array *tr)
8327 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
8332 static void ftrace_pid_reset(struct trace_array *tr, int type)
8335 clear_ftrace_pids(tr, type);
8350 struct trace_array *tr = m->private;
8355 pid_list = rcu_dereference_sched(tr->function_pids);
8365 struct trace_array *tr = m->private;
8366 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
8403 struct trace_array *tr = m->private;
8408 pid_list = rcu_dereference_sched(tr->function_no_pids);
8418 struct trace_array *tr = m->private;
8419 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
8438 struct trace_array *tr = inode->i_private;
8442 ret = tracing_check_open_get_tr(tr);
8448 ftrace_pid_reset(tr, type);
8458 trace_array_put(tr);
8465 trace_array_put(tr);
8468 /* copy tr over to seq ops */
8469 m->private = tr;
8489 struct trace_array *tr = data;
8497 pid_list = rcu_dereference_protected(tr->function_pids,
8499 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8503 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8506 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8515 struct trace_array *tr = m->private;
8528 filtered_pids = rcu_dereference_protected(tr->function_pids,
8530 other_pids = rcu_dereference_protected(tr->function_no_pids,
8534 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
8536 other_pids = rcu_dereference_protected(tr->function_pids,
8550 rcu_assign_pointer(tr->function_pids, pid_list);
8553 rcu_assign_pointer(tr->function_no_pids, pid_list);
8563 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8571 on_each_cpu(ignore_task_cpu, tr, 1);
8598 struct trace_array *tr = inode->i_private;
8600 trace_array_put(tr);
8621 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8624 tr, &ftrace_pid_fops);
8626 d_tracer, tr, &ftrace_no_pid_fops);
8629 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8633 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));